mistralai 1.10.0__py3-none-any.whl → 1.11.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_hooks/tracing.py +28 -3
- mistralai/_version.py +3 -3
- mistralai/accesses.py +22 -12
- mistralai/agents.py +88 -44
- mistralai/basesdk.py +6 -0
- mistralai/chat.py +96 -40
- mistralai/classifiers.py +48 -23
- mistralai/conversations.py +186 -64
- mistralai/documents.py +72 -26
- mistralai/embeddings.py +24 -9
- mistralai/extra/README.md +1 -1
- mistralai/extra/mcp/auth.py +10 -11
- mistralai/extra/mcp/base.py +17 -16
- mistralai/extra/mcp/sse.py +13 -15
- mistralai/extra/mcp/stdio.py +5 -6
- mistralai/extra/observability/otel.py +47 -68
- mistralai/extra/run/context.py +33 -43
- mistralai/extra/run/result.py +29 -30
- mistralai/extra/run/tools.py +8 -9
- mistralai/extra/struct_chat.py +15 -8
- mistralai/extra/utils/response_format.py +5 -3
- mistralai/files.py +58 -24
- mistralai/fim.py +20 -12
- mistralai/httpclient.py +0 -1
- mistralai/jobs.py +65 -26
- mistralai/libraries.py +20 -10
- mistralai/mistral_agents.py +438 -30
- mistralai/mistral_jobs.py +62 -17
- mistralai/models/__init__.py +46 -1
- mistralai/models/agent.py +1 -1
- mistralai/models/agentconversation.py +1 -1
- mistralai/models/agenthandoffdoneevent.py +1 -1
- mistralai/models/agenthandoffentry.py +3 -2
- mistralai/models/agenthandoffstartedevent.py +1 -1
- mistralai/models/agents_api_v1_agents_get_versionop.py +21 -0
- mistralai/models/agents_api_v1_agents_list_versionsop.py +33 -0
- mistralai/models/agents_api_v1_agents_listop.py +5 -1
- mistralai/models/agents_api_v1_conversations_listop.py +1 -1
- mistralai/models/agentscompletionrequest.py +2 -5
- mistralai/models/agentscompletionstreamrequest.py +2 -5
- mistralai/models/archiveftmodelout.py +1 -1
- mistralai/models/assistantmessage.py +1 -1
- mistralai/models/audiochunk.py +1 -1
- mistralai/models/audioencoding.py +18 -0
- mistralai/models/audioformat.py +17 -0
- mistralai/models/basemodelcard.py +1 -1
- mistralai/models/batchjobin.py +18 -9
- mistralai/models/batchjobout.py +6 -1
- mistralai/models/batchjobsout.py +1 -1
- mistralai/models/batchrequest.py +48 -0
- mistralai/models/chatcompletionchoice.py +10 -5
- mistralai/models/chatcompletionrequest.py +2 -5
- mistralai/models/chatcompletionstreamrequest.py +2 -5
- mistralai/models/classificationrequest.py +37 -3
- mistralai/models/classifierdetailedjobout.py +4 -2
- mistralai/models/classifierftmodelout.py +3 -2
- mistralai/models/classifierjobout.py +4 -2
- mistralai/models/codeinterpretertool.py +1 -1
- mistralai/models/completiondetailedjobout.py +5 -2
- mistralai/models/completionftmodelout.py +3 -2
- mistralai/models/completionjobout.py +5 -2
- mistralai/models/completionresponsestreamchoice.py +9 -8
- mistralai/models/conversationappendrequest.py +4 -1
- mistralai/models/conversationappendstreamrequest.py +4 -1
- mistralai/models/conversationhistory.py +2 -1
- mistralai/models/conversationmessages.py +1 -1
- mistralai/models/conversationrequest.py +5 -1
- mistralai/models/conversationresponse.py +2 -1
- mistralai/models/conversationrestartrequest.py +4 -1
- mistralai/models/conversationrestartstreamrequest.py +4 -1
- mistralai/models/conversationstreamrequest.py +5 -1
- mistralai/models/documentlibrarytool.py +1 -1
- mistralai/models/documenturlchunk.py +1 -1
- mistralai/models/embeddingdtype.py +7 -1
- mistralai/models/embeddingrequest.py +11 -3
- mistralai/models/encodingformat.py +4 -1
- mistralai/models/entitytype.py +8 -1
- mistralai/models/filepurpose.py +8 -1
- mistralai/models/files_api_routes_list_filesop.py +4 -11
- mistralai/models/files_api_routes_upload_fileop.py +2 -6
- mistralai/models/fileschema.py +3 -5
- mistralai/models/finetuneablemodeltype.py +4 -1
- mistralai/models/ftclassifierlossfunction.py +4 -1
- mistralai/models/ftmodelcard.py +1 -1
- mistralai/models/functioncallentry.py +3 -2
- mistralai/models/functioncallevent.py +1 -1
- mistralai/models/functionresultentry.py +3 -2
- mistralai/models/functiontool.py +1 -1
- mistralai/models/githubrepositoryin.py +1 -1
- mistralai/models/githubrepositoryout.py +1 -1
- mistralai/models/httpvalidationerror.py +4 -2
- mistralai/models/imagegenerationtool.py +1 -1
- mistralai/models/imageurlchunk.py +1 -1
- mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
- mistralai/models/jobsout.py +1 -1
- mistralai/models/legacyjobmetadataout.py +1 -1
- mistralai/models/messageinputentry.py +9 -3
- mistralai/models/messageoutputentry.py +6 -3
- mistralai/models/messageoutputevent.py +4 -2
- mistralai/models/mistralerror.py +11 -7
- mistralai/models/mistralpromptmode.py +1 -1
- mistralai/models/modelconversation.py +1 -1
- mistralai/models/no_response_error.py +5 -1
- mistralai/models/ocrrequest.py +11 -1
- mistralai/models/ocrtableobject.py +4 -1
- mistralai/models/referencechunk.py +1 -1
- mistralai/models/requestsource.py +5 -1
- mistralai/models/responsedoneevent.py +1 -1
- mistralai/models/responseerrorevent.py +1 -1
- mistralai/models/responseformats.py +5 -1
- mistralai/models/responsestartedevent.py +1 -1
- mistralai/models/responsevalidationerror.py +2 -0
- mistralai/models/retrievefileout.py +3 -5
- mistralai/models/sampletype.py +7 -1
- mistralai/models/sdkerror.py +2 -0
- mistralai/models/shareenum.py +7 -1
- mistralai/models/sharingdelete.py +2 -4
- mistralai/models/sharingin.py +3 -5
- mistralai/models/source.py +8 -1
- mistralai/models/systemmessage.py +1 -1
- mistralai/models/textchunk.py +1 -1
- mistralai/models/thinkchunk.py +1 -1
- mistralai/models/timestampgranularity.py +1 -1
- mistralai/models/tool.py +2 -6
- mistralai/models/toolcall.py +2 -6
- mistralai/models/toolchoice.py +2 -6
- mistralai/models/toolchoiceenum.py +6 -1
- mistralai/models/toolexecutiondeltaevent.py +2 -1
- mistralai/models/toolexecutiondoneevent.py +2 -1
- mistralai/models/toolexecutionentry.py +4 -2
- mistralai/models/toolexecutionstartedevent.py +2 -1
- mistralai/models/toolfilechunk.py +13 -5
- mistralai/models/toolmessage.py +1 -1
- mistralai/models/toolreferencechunk.py +15 -5
- mistralai/models/tooltypes.py +1 -1
- mistralai/models/transcriptionsegmentchunk.py +1 -1
- mistralai/models/transcriptionstreamdone.py +1 -1
- mistralai/models/transcriptionstreamlanguage.py +1 -1
- mistralai/models/transcriptionstreamsegmentdelta.py +1 -1
- mistralai/models/transcriptionstreamtextdelta.py +1 -1
- mistralai/models/unarchiveftmodelout.py +1 -1
- mistralai/models/uploadfileout.py +3 -5
- mistralai/models/usermessage.py +1 -1
- mistralai/models/wandbintegration.py +1 -1
- mistralai/models/wandbintegrationout.py +1 -1
- mistralai/models/websearchpremiumtool.py +1 -1
- mistralai/models/websearchtool.py +1 -1
- mistralai/models_.py +24 -12
- mistralai/ocr.py +38 -10
- mistralai/sdk.py +2 -2
- mistralai/transcriptions.py +28 -12
- mistralai/types/basemodel.py +41 -3
- mistralai/utils/__init__.py +0 -3
- mistralai/utils/annotations.py +32 -8
- mistralai/utils/enums.py +60 -0
- mistralai/utils/forms.py +21 -10
- mistralai/utils/queryparams.py +14 -2
- mistralai/utils/requestbodies.py +3 -3
- mistralai/utils/retries.py +69 -5
- mistralai/utils/serializers.py +0 -20
- mistralai/utils/unmarshal_json_response.py +15 -1
- {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/METADATA +144 -159
- mistralai-1.11.1.dist-info/RECORD +495 -0
- {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/WHEEL +1 -1
- mistralai_azure/_version.py +3 -3
- mistralai_azure/basesdk.py +21 -5
- mistralai_azure/chat.py +82 -109
- mistralai_azure/httpclient.py +0 -1
- mistralai_azure/models/__init__.py +66 -4
- mistralai_azure/models/assistantmessage.py +1 -1
- mistralai_azure/models/chatcompletionchoice.py +10 -7
- mistralai_azure/models/chatcompletionrequest.py +24 -10
- mistralai_azure/models/chatcompletionstreamrequest.py +24 -10
- mistralai_azure/models/completionresponsestreamchoice.py +11 -7
- mistralai_azure/models/documenturlchunk.py +1 -1
- mistralai_azure/models/httpvalidationerror.py +15 -8
- mistralai_azure/models/imageurlchunk.py +1 -1
- mistralai_azure/models/mistralazureerror.py +30 -0
- mistralai_azure/models/mistralpromptmode.py +1 -1
- mistralai_azure/models/no_response_error.py +17 -0
- mistralai_azure/models/ocrpageobject.py +32 -5
- mistralai_azure/models/ocrrequest.py +20 -1
- mistralai_azure/models/ocrtableobject.py +34 -0
- mistralai_azure/models/prediction.py +4 -0
- mistralai_azure/models/referencechunk.py +1 -1
- mistralai_azure/models/responseformat.py +4 -2
- mistralai_azure/models/responseformats.py +5 -2
- mistralai_azure/models/responsevalidationerror.py +27 -0
- mistralai_azure/models/sdkerror.py +32 -14
- mistralai_azure/models/systemmessage.py +8 -4
- mistralai_azure/models/systemmessagecontentchunks.py +21 -0
- mistralai_azure/models/textchunk.py +1 -1
- mistralai_azure/models/thinkchunk.py +35 -0
- mistralai_azure/models/tool.py +2 -6
- mistralai_azure/models/toolcall.py +2 -6
- mistralai_azure/models/toolchoice.py +2 -6
- mistralai_azure/models/toolchoiceenum.py +6 -1
- mistralai_azure/models/toolmessage.py +1 -1
- mistralai_azure/models/tooltypes.py +1 -1
- mistralai_azure/models/usermessage.py +1 -1
- mistralai_azure/ocr.py +39 -40
- mistralai_azure/types/basemodel.py +41 -3
- mistralai_azure/utils/__init__.py +18 -8
- mistralai_azure/utils/annotations.py +32 -8
- mistralai_azure/utils/enums.py +60 -0
- mistralai_azure/utils/eventstreaming.py +10 -0
- mistralai_azure/utils/forms.py +21 -10
- mistralai_azure/utils/queryparams.py +14 -2
- mistralai_azure/utils/requestbodies.py +3 -3
- mistralai_azure/utils/retries.py +69 -5
- mistralai_azure/utils/serializers.py +3 -22
- mistralai_azure/utils/unmarshal_json_response.py +38 -0
- mistralai_gcp/_hooks/types.py +7 -0
- mistralai_gcp/_version.py +4 -4
- mistralai_gcp/basesdk.py +33 -25
- mistralai_gcp/chat.py +98 -109
- mistralai_gcp/fim.py +62 -85
- mistralai_gcp/httpclient.py +6 -17
- mistralai_gcp/models/__init__.py +321 -116
- mistralai_gcp/models/assistantmessage.py +2 -2
- mistralai_gcp/models/chatcompletionchoice.py +10 -7
- mistralai_gcp/models/chatcompletionrequest.py +38 -7
- mistralai_gcp/models/chatcompletionresponse.py +6 -6
- mistralai_gcp/models/chatcompletionstreamrequest.py +38 -7
- mistralai_gcp/models/completionresponsestreamchoice.py +12 -8
- mistralai_gcp/models/deltamessage.py +1 -1
- mistralai_gcp/models/fimcompletionrequest.py +9 -10
- mistralai_gcp/models/fimcompletionresponse.py +6 -6
- mistralai_gcp/models/fimcompletionstreamrequest.py +9 -10
- mistralai_gcp/models/httpvalidationerror.py +15 -8
- mistralai_gcp/models/imageurl.py +1 -1
- mistralai_gcp/models/imageurlchunk.py +1 -1
- mistralai_gcp/models/jsonschema.py +1 -1
- mistralai_gcp/models/mistralgcperror.py +30 -0
- mistralai_gcp/models/mistralpromptmode.py +8 -0
- mistralai_gcp/models/no_response_error.py +17 -0
- mistralai_gcp/models/prediction.py +4 -0
- mistralai_gcp/models/referencechunk.py +1 -1
- mistralai_gcp/models/responseformat.py +5 -3
- mistralai_gcp/models/responseformats.py +5 -2
- mistralai_gcp/models/responsevalidationerror.py +27 -0
- mistralai_gcp/models/sdkerror.py +32 -14
- mistralai_gcp/models/systemmessage.py +8 -4
- mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
- mistralai_gcp/models/textchunk.py +1 -1
- mistralai_gcp/models/thinkchunk.py +35 -0
- mistralai_gcp/models/tool.py +2 -6
- mistralai_gcp/models/toolcall.py +2 -6
- mistralai_gcp/models/toolchoice.py +2 -6
- mistralai_gcp/models/toolchoiceenum.py +6 -1
- mistralai_gcp/models/toolmessage.py +2 -2
- mistralai_gcp/models/tooltypes.py +1 -1
- mistralai_gcp/models/usageinfo.py +71 -8
- mistralai_gcp/models/usermessage.py +2 -2
- mistralai_gcp/sdk.py +12 -10
- mistralai_gcp/sdkconfiguration.py +0 -7
- mistralai_gcp/types/basemodel.py +41 -3
- mistralai_gcp/utils/__init__.py +141 -46
- mistralai_gcp/utils/annotations.py +32 -8
- mistralai_gcp/utils/datetimes.py +23 -0
- mistralai_gcp/utils/enums.py +125 -25
- mistralai_gcp/utils/eventstreaming.py +10 -0
- mistralai_gcp/utils/forms.py +62 -30
- mistralai_gcp/utils/queryparams.py +14 -2
- mistralai_gcp/utils/requestbodies.py +3 -3
- mistralai_gcp/utils/retries.py +69 -5
- mistralai_gcp/utils/serializers.py +33 -23
- mistralai_gcp/utils/unmarshal_json_response.py +38 -0
- mistralai-1.10.0.dist-info/RECORD +0 -475
- {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
+
from .mistralpromptmode import MistralPromptMode
|
|
5
6
|
from .prediction import Prediction, PredictionTypedDict
|
|
6
7
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
7
8
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
@@ -19,7 +20,7 @@ from mistralai_gcp.types import (
|
|
|
19
20
|
)
|
|
20
21
|
from mistralai_gcp.utils import get_discriminator
|
|
21
22
|
from pydantic import Discriminator, Tag, model_serializer
|
|
22
|
-
from typing import List, Optional, Union
|
|
23
|
+
from typing import Any, Dict, List, Optional, Union
|
|
23
24
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
24
25
|
|
|
25
26
|
|
|
@@ -57,11 +58,13 @@ ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType(
|
|
|
57
58
|
"ChatCompletionStreamRequestToolChoiceTypedDict",
|
|
58
59
|
Union[ToolChoiceTypedDict, ToolChoiceEnum],
|
|
59
60
|
)
|
|
61
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
60
62
|
|
|
61
63
|
|
|
62
64
|
ChatCompletionStreamRequestToolChoice = TypeAliasType(
|
|
63
65
|
"ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum]
|
|
64
66
|
)
|
|
67
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
65
68
|
|
|
66
69
|
|
|
67
70
|
class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
@@ -80,17 +83,25 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
80
83
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
81
84
|
random_seed: NotRequired[Nullable[int]]
|
|
82
85
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
86
|
+
metadata: NotRequired[Nullable[Dict[str, Any]]]
|
|
83
87
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
88
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
84
89
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
90
|
+
r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
|
|
85
91
|
tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict]
|
|
92
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
86
93
|
presence_penalty: NotRequired[float]
|
|
87
|
-
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
94
|
+
r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
88
95
|
frequency_penalty: NotRequired[float]
|
|
89
|
-
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
96
|
+
r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
90
97
|
n: NotRequired[Nullable[int]]
|
|
91
98
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
92
99
|
prediction: NotRequired[PredictionTypedDict]
|
|
100
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
93
101
|
parallel_tool_calls: NotRequired[bool]
|
|
102
|
+
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
103
|
+
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
104
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
94
105
|
|
|
95
106
|
|
|
96
107
|
class ChatCompletionStreamRequest(BaseModel):
|
|
@@ -117,24 +128,34 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
117
128
|
random_seed: OptionalNullable[int] = UNSET
|
|
118
129
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
119
130
|
|
|
131
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET
|
|
132
|
+
|
|
120
133
|
response_format: Optional[ResponseFormat] = None
|
|
134
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
121
135
|
|
|
122
136
|
tools: OptionalNullable[List[Tool]] = UNSET
|
|
137
|
+
r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
|
|
123
138
|
|
|
124
139
|
tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None
|
|
140
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
125
141
|
|
|
126
142
|
presence_penalty: Optional[float] = None
|
|
127
|
-
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
143
|
+
r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
128
144
|
|
|
129
145
|
frequency_penalty: Optional[float] = None
|
|
130
|
-
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
146
|
+
r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
131
147
|
|
|
132
148
|
n: OptionalNullable[int] = UNSET
|
|
133
149
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
134
150
|
|
|
135
151
|
prediction: Optional[Prediction] = None
|
|
152
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
136
153
|
|
|
137
154
|
parallel_tool_calls: Optional[bool] = None
|
|
155
|
+
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
156
|
+
|
|
157
|
+
prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
|
|
158
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
138
159
|
|
|
139
160
|
@model_serializer(mode="wrap")
|
|
140
161
|
def serialize_model(self, handler):
|
|
@@ -145,6 +166,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
145
166
|
"stream",
|
|
146
167
|
"stop",
|
|
147
168
|
"random_seed",
|
|
169
|
+
"metadata",
|
|
148
170
|
"response_format",
|
|
149
171
|
"tools",
|
|
150
172
|
"tool_choice",
|
|
@@ -153,15 +175,24 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
153
175
|
"n",
|
|
154
176
|
"prediction",
|
|
155
177
|
"parallel_tool_calls",
|
|
178
|
+
"prompt_mode",
|
|
179
|
+
]
|
|
180
|
+
nullable_fields = [
|
|
181
|
+
"temperature",
|
|
182
|
+
"max_tokens",
|
|
183
|
+
"random_seed",
|
|
184
|
+
"metadata",
|
|
185
|
+
"tools",
|
|
186
|
+
"n",
|
|
187
|
+
"prompt_mode",
|
|
156
188
|
]
|
|
157
|
-
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
158
189
|
null_default_fields = []
|
|
159
190
|
|
|
160
191
|
serialized = handler(self)
|
|
161
192
|
|
|
162
193
|
m = {}
|
|
163
194
|
|
|
164
|
-
for n, f in self.model_fields.items():
|
|
195
|
+
for n, f in type(self).model_fields.items():
|
|
165
196
|
k = f.alias or n
|
|
166
197
|
val = serialized.get(k)
|
|
167
198
|
serialized.pop(k, None)
|
|
@@ -3,14 +3,20 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .deltamessage import DeltaMessage, DeltaMessageTypedDict
|
|
5
5
|
from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr
|
|
6
|
-
from mistralai_gcp.utils import validate_open_enum
|
|
7
6
|
from pydantic import model_serializer
|
|
8
|
-
from pydantic.functional_validators import PlainValidator
|
|
9
7
|
from typing import Literal, Union
|
|
10
|
-
from typing_extensions import
|
|
8
|
+
from typing_extensions import TypedDict
|
|
11
9
|
|
|
12
10
|
|
|
13
|
-
FinishReason = Union[
|
|
11
|
+
FinishReason = Union[
|
|
12
|
+
Literal[
|
|
13
|
+
"stop",
|
|
14
|
+
"length",
|
|
15
|
+
"error",
|
|
16
|
+
"tool_calls",
|
|
17
|
+
],
|
|
18
|
+
UnrecognizedStr,
|
|
19
|
+
]
|
|
14
20
|
|
|
15
21
|
|
|
16
22
|
class CompletionResponseStreamChoiceTypedDict(TypedDict):
|
|
@@ -24,9 +30,7 @@ class CompletionResponseStreamChoice(BaseModel):
|
|
|
24
30
|
|
|
25
31
|
delta: DeltaMessage
|
|
26
32
|
|
|
27
|
-
finish_reason:
|
|
28
|
-
Nullable[FinishReason], PlainValidator(validate_open_enum(False))
|
|
29
|
-
]
|
|
33
|
+
finish_reason: Nullable[FinishReason]
|
|
30
34
|
|
|
31
35
|
@model_serializer(mode="wrap")
|
|
32
36
|
def serialize_model(self, handler):
|
|
@@ -38,7 +42,7 @@ class CompletionResponseStreamChoice(BaseModel):
|
|
|
38
42
|
|
|
39
43
|
m = {}
|
|
40
44
|
|
|
41
|
-
for n, f in self.model_fields.items():
|
|
45
|
+
for n, f in type(self).model_fields.items():
|
|
42
46
|
k = f.alias or n
|
|
43
47
|
val = serialized.get(k)
|
|
44
48
|
serialized.pop(k, None)
|
|
@@ -9,7 +9,7 @@ from mistralai_gcp.types import (
|
|
|
9
9
|
UNSET_SENTINEL,
|
|
10
10
|
)
|
|
11
11
|
from pydantic import model_serializer
|
|
12
|
-
from typing import List, Optional, Union
|
|
12
|
+
from typing import Any, Dict, List, Optional, Union
|
|
13
13
|
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
14
14
|
|
|
15
15
|
|
|
@@ -27,10 +27,7 @@ r"""Stop generation if this token is detected. Or if one of these tokens is dete
|
|
|
27
27
|
|
|
28
28
|
class FIMCompletionRequestTypedDict(TypedDict):
|
|
29
29
|
model: str
|
|
30
|
-
r"""ID of the model to use.
|
|
31
|
-
- `codestral-2405`
|
|
32
|
-
- `codestral-latest`
|
|
33
|
-
"""
|
|
30
|
+
r"""ID of the model with FIM to use."""
|
|
34
31
|
prompt: str
|
|
35
32
|
r"""The text/code to complete."""
|
|
36
33
|
temperature: NotRequired[Nullable[float]]
|
|
@@ -45,6 +42,7 @@ class FIMCompletionRequestTypedDict(TypedDict):
|
|
|
45
42
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
46
43
|
random_seed: NotRequired[Nullable[int]]
|
|
47
44
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
45
|
+
metadata: NotRequired[Nullable[Dict[str, Any]]]
|
|
48
46
|
suffix: NotRequired[Nullable[str]]
|
|
49
47
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
50
48
|
min_tokens: NotRequired[Nullable[int]]
|
|
@@ -53,10 +51,7 @@ class FIMCompletionRequestTypedDict(TypedDict):
|
|
|
53
51
|
|
|
54
52
|
class FIMCompletionRequest(BaseModel):
|
|
55
53
|
model: str
|
|
56
|
-
r"""ID of the model to use.
|
|
57
|
-
- `codestral-2405`
|
|
58
|
-
- `codestral-latest`
|
|
59
|
-
"""
|
|
54
|
+
r"""ID of the model with FIM to use."""
|
|
60
55
|
|
|
61
56
|
prompt: str
|
|
62
57
|
r"""The text/code to complete."""
|
|
@@ -79,6 +74,8 @@ class FIMCompletionRequest(BaseModel):
|
|
|
79
74
|
random_seed: OptionalNullable[int] = UNSET
|
|
80
75
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
81
76
|
|
|
77
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET
|
|
78
|
+
|
|
82
79
|
suffix: OptionalNullable[str] = UNSET
|
|
83
80
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
84
81
|
|
|
@@ -94,6 +91,7 @@ class FIMCompletionRequest(BaseModel):
|
|
|
94
91
|
"stream",
|
|
95
92
|
"stop",
|
|
96
93
|
"random_seed",
|
|
94
|
+
"metadata",
|
|
97
95
|
"suffix",
|
|
98
96
|
"min_tokens",
|
|
99
97
|
]
|
|
@@ -101,6 +99,7 @@ class FIMCompletionRequest(BaseModel):
|
|
|
101
99
|
"temperature",
|
|
102
100
|
"max_tokens",
|
|
103
101
|
"random_seed",
|
|
102
|
+
"metadata",
|
|
104
103
|
"suffix",
|
|
105
104
|
"min_tokens",
|
|
106
105
|
]
|
|
@@ -110,7 +109,7 @@ class FIMCompletionRequest(BaseModel):
|
|
|
110
109
|
|
|
111
110
|
m = {}
|
|
112
111
|
|
|
113
|
-
for n, f in self.model_fields.items():
|
|
112
|
+
for n, f in type(self).model_fields.items():
|
|
114
113
|
k = f.alias or n
|
|
115
114
|
val = serialized.get(k)
|
|
116
115
|
serialized.pop(k, None)
|
|
@@ -4,8 +4,8 @@ from __future__ import annotations
|
|
|
4
4
|
from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
|
|
5
5
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
6
6
|
from mistralai_gcp.types import BaseModel
|
|
7
|
-
from typing import List
|
|
8
|
-
from typing_extensions import
|
|
7
|
+
from typing import List
|
|
8
|
+
from typing_extensions import TypedDict
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class FIMCompletionResponseTypedDict(TypedDict):
|
|
@@ -13,8 +13,8 @@ class FIMCompletionResponseTypedDict(TypedDict):
|
|
|
13
13
|
object: str
|
|
14
14
|
model: str
|
|
15
15
|
usage: UsageInfoTypedDict
|
|
16
|
-
created:
|
|
17
|
-
choices:
|
|
16
|
+
created: int
|
|
17
|
+
choices: List[ChatCompletionChoiceTypedDict]
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
class FIMCompletionResponse(BaseModel):
|
|
@@ -26,6 +26,6 @@ class FIMCompletionResponse(BaseModel):
|
|
|
26
26
|
|
|
27
27
|
usage: UsageInfo
|
|
28
28
|
|
|
29
|
-
created:
|
|
29
|
+
created: int
|
|
30
30
|
|
|
31
|
-
choices:
|
|
31
|
+
choices: List[ChatCompletionChoice]
|
|
@@ -9,7 +9,7 @@ from mistralai_gcp.types import (
|
|
|
9
9
|
UNSET_SENTINEL,
|
|
10
10
|
)
|
|
11
11
|
from pydantic import model_serializer
|
|
12
|
-
from typing import List, Optional, Union
|
|
12
|
+
from typing import Any, Dict, List, Optional, Union
|
|
13
13
|
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
14
14
|
|
|
15
15
|
|
|
@@ -27,10 +27,7 @@ r"""Stop generation if this token is detected. Or if one of these tokens is dete
|
|
|
27
27
|
|
|
28
28
|
class FIMCompletionStreamRequestTypedDict(TypedDict):
|
|
29
29
|
model: str
|
|
30
|
-
r"""ID of the model to use.
|
|
31
|
-
- `codestral-2405`
|
|
32
|
-
- `codestral-latest`
|
|
33
|
-
"""
|
|
30
|
+
r"""ID of the model with FIM to use."""
|
|
34
31
|
prompt: str
|
|
35
32
|
r"""The text/code to complete."""
|
|
36
33
|
temperature: NotRequired[Nullable[float]]
|
|
@@ -44,6 +41,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
|
|
|
44
41
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
45
42
|
random_seed: NotRequired[Nullable[int]]
|
|
46
43
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
44
|
+
metadata: NotRequired[Nullable[Dict[str, Any]]]
|
|
47
45
|
suffix: NotRequired[Nullable[str]]
|
|
48
46
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
49
47
|
min_tokens: NotRequired[Nullable[int]]
|
|
@@ -52,10 +50,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
|
|
|
52
50
|
|
|
53
51
|
class FIMCompletionStreamRequest(BaseModel):
|
|
54
52
|
model: str
|
|
55
|
-
r"""ID of the model to use.
|
|
56
|
-
- `codestral-2405`
|
|
57
|
-
- `codestral-latest`
|
|
58
|
-
"""
|
|
53
|
+
r"""ID of the model with FIM to use."""
|
|
59
54
|
|
|
60
55
|
prompt: str
|
|
61
56
|
r"""The text/code to complete."""
|
|
@@ -77,6 +72,8 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
77
72
|
random_seed: OptionalNullable[int] = UNSET
|
|
78
73
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
79
74
|
|
|
75
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET
|
|
76
|
+
|
|
80
77
|
suffix: OptionalNullable[str] = UNSET
|
|
81
78
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
82
79
|
|
|
@@ -92,6 +89,7 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
92
89
|
"stream",
|
|
93
90
|
"stop",
|
|
94
91
|
"random_seed",
|
|
92
|
+
"metadata",
|
|
95
93
|
"suffix",
|
|
96
94
|
"min_tokens",
|
|
97
95
|
]
|
|
@@ -99,6 +97,7 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
99
97
|
"temperature",
|
|
100
98
|
"max_tokens",
|
|
101
99
|
"random_seed",
|
|
100
|
+
"metadata",
|
|
102
101
|
"suffix",
|
|
103
102
|
"min_tokens",
|
|
104
103
|
]
|
|
@@ -108,7 +107,7 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
108
107
|
|
|
109
108
|
m = {}
|
|
110
109
|
|
|
111
|
-
for n, f in self.model_fields.items():
|
|
110
|
+
for n, f in type(self).model_fields.items():
|
|
112
111
|
k = f.alias or n
|
|
113
112
|
val = serialized.get(k)
|
|
114
113
|
serialized.pop(k, None)
|
|
@@ -2,7 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .validationerror import ValidationError
|
|
5
|
-
from
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
import httpx
|
|
7
|
+
from mistralai_gcp.models import MistralGcpError
|
|
6
8
|
from mistralai_gcp.types import BaseModel
|
|
7
9
|
from typing import List, Optional
|
|
8
10
|
|
|
@@ -11,11 +13,16 @@ class HTTPValidationErrorData(BaseModel):
|
|
|
11
13
|
detail: Optional[List[ValidationError]] = None
|
|
12
14
|
|
|
13
15
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
+
@dataclass(unsafe_hash=True)
|
|
17
|
+
class HTTPValidationError(MistralGcpError):
|
|
18
|
+
data: HTTPValidationErrorData = field(hash=False)
|
|
16
19
|
|
|
17
|
-
def __init__(
|
|
18
|
-
self
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
data: HTTPValidationErrorData,
|
|
23
|
+
raw_response: httpx.Response,
|
|
24
|
+
body: Optional[str] = None,
|
|
25
|
+
):
|
|
26
|
+
message = body or raw_response.text
|
|
27
|
+
super().__init__(message, raw_response, body)
|
|
28
|
+
object.__setattr__(self, "data", data)
|
mistralai_gcp/models/imageurl.py
CHANGED
|
@@ -15,7 +15,7 @@ ImageURLChunkImageURLTypedDict = TypeAliasType(
|
|
|
15
15
|
ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str])
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
ImageURLChunkType = Literal["image_url"]
|
|
18
|
+
ImageURLChunkType = Literal["image_url",]
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
class ImageURLChunkTypedDict(TypedDict):
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
from typing import Optional
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass(unsafe_hash=True)
|
|
9
|
+
class MistralGcpError(Exception):
|
|
10
|
+
"""The base class for all HTTP error responses."""
|
|
11
|
+
|
|
12
|
+
message: str
|
|
13
|
+
status_code: int
|
|
14
|
+
body: str
|
|
15
|
+
headers: httpx.Headers = field(hash=False)
|
|
16
|
+
raw_response: httpx.Response = field(hash=False)
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self, message: str, raw_response: httpx.Response, body: Optional[str] = None
|
|
20
|
+
):
|
|
21
|
+
object.__setattr__(self, "message", message)
|
|
22
|
+
object.__setattr__(self, "status_code", raw_response.status_code)
|
|
23
|
+
object.__setattr__(
|
|
24
|
+
self, "body", body if body is not None else raw_response.text
|
|
25
|
+
)
|
|
26
|
+
object.__setattr__(self, "headers", raw_response.headers)
|
|
27
|
+
object.__setattr__(self, "raw_response", raw_response)
|
|
28
|
+
|
|
29
|
+
def __str__(self):
|
|
30
|
+
return self.message
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai_gcp.types import UnrecognizedStr
|
|
5
|
+
from typing import Literal, Union
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr]
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass(unsafe_hash=True)
|
|
7
|
+
class NoResponseError(Exception):
|
|
8
|
+
"""Error raised when no HTTP response is received from the server."""
|
|
9
|
+
|
|
10
|
+
message: str
|
|
11
|
+
|
|
12
|
+
def __init__(self, message: str = "No response received"):
|
|
13
|
+
object.__setattr__(self, "message", message)
|
|
14
|
+
super().__init__(message)
|
|
15
|
+
|
|
16
|
+
def __str__(self):
|
|
17
|
+
return self.message
|
|
@@ -10,11 +10,15 @@ from typing_extensions import Annotated, NotRequired, TypedDict
|
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class PredictionTypedDict(TypedDict):
|
|
13
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
14
|
+
|
|
13
15
|
type: Literal["content"]
|
|
14
16
|
content: NotRequired[str]
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
class Prediction(BaseModel):
|
|
20
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
21
|
+
|
|
18
22
|
TYPE: Annotated[
|
|
19
23
|
Annotated[
|
|
20
24
|
Optional[Literal["content"]], AfterValidator(validate_const("content"))
|
|
@@ -16,14 +16,16 @@ from typing_extensions import NotRequired, TypedDict
|
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
class ResponseFormatTypedDict(TypedDict):
|
|
19
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
20
|
+
|
|
19
21
|
type: NotRequired[ResponseFormats]
|
|
20
|
-
r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message."""
|
|
21
22
|
json_schema: NotRequired[Nullable[JSONSchemaTypedDict]]
|
|
22
23
|
|
|
23
24
|
|
|
24
25
|
class ResponseFormat(BaseModel):
|
|
26
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
27
|
+
|
|
25
28
|
type: Optional[ResponseFormats] = None
|
|
26
|
-
r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message."""
|
|
27
29
|
|
|
28
30
|
json_schema: OptionalNullable[JSONSchema] = UNSET
|
|
29
31
|
|
|
@@ -37,7 +39,7 @@ class ResponseFormat(BaseModel):
|
|
|
37
39
|
|
|
38
40
|
m = {}
|
|
39
41
|
|
|
40
|
-
for n, f in self.model_fields.items():
|
|
42
|
+
for n, f in type(self).model_fields.items():
|
|
41
43
|
k = f.alias or n
|
|
42
44
|
val = serialized.get(k)
|
|
43
45
|
serialized.pop(k, None)
|
|
@@ -4,5 +4,8 @@ from __future__ import annotations
|
|
|
4
4
|
from typing import Literal
|
|
5
5
|
|
|
6
6
|
|
|
7
|
-
ResponseFormats = Literal[
|
|
8
|
-
|
|
7
|
+
ResponseFormats = Literal[
|
|
8
|
+
"text",
|
|
9
|
+
"json_object",
|
|
10
|
+
"json_schema",
|
|
11
|
+
]
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
from typing import Optional
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
from mistralai_gcp.models import MistralGcpError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass(unsafe_hash=True)
|
|
11
|
+
class ResponseValidationError(MistralGcpError):
|
|
12
|
+
"""Error raised when there is a type mismatch between the response data and the expected Pydantic model."""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
message: str,
|
|
17
|
+
raw_response: httpx.Response,
|
|
18
|
+
cause: Exception,
|
|
19
|
+
body: Optional[str] = None,
|
|
20
|
+
):
|
|
21
|
+
message = f"{message}: {cause}"
|
|
22
|
+
super().__init__(message, raw_response, body)
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def cause(self):
|
|
26
|
+
"""Normally the Pydantic ValidationError"""
|
|
27
|
+
return self.__cause__
|
mistralai_gcp/models/sdkerror.py
CHANGED
|
@@ -1,22 +1,40 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
|
-
from dataclasses import dataclass
|
|
4
|
-
from typing import Optional
|
|
5
3
|
import httpx
|
|
4
|
+
from typing import Optional
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
from mistralai_gcp.models import MistralGcpError
|
|
8
|
+
|
|
9
|
+
MAX_MESSAGE_LEN = 10_000
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass(unsafe_hash=True)
|
|
13
|
+
class SDKError(MistralGcpError):
|
|
14
|
+
"""The fallback error class if no more specific error class is matched."""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self, message: str, raw_response: httpx.Response, body: Optional[str] = None
|
|
18
|
+
):
|
|
19
|
+
body_display = body or raw_response.text or '""'
|
|
6
20
|
|
|
21
|
+
if message:
|
|
22
|
+
message += ": "
|
|
23
|
+
message += f"Status {raw_response.status_code}"
|
|
7
24
|
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
25
|
+
headers = raw_response.headers
|
|
26
|
+
content_type = headers.get("content-type", '""')
|
|
27
|
+
if content_type != "application/json":
|
|
28
|
+
if " " in content_type:
|
|
29
|
+
content_type = f'"{content_type}"'
|
|
30
|
+
message += f" Content-Type {content_type}"
|
|
11
31
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
32
|
+
if len(body_display) > MAX_MESSAGE_LEN:
|
|
33
|
+
truncated = body_display[:MAX_MESSAGE_LEN]
|
|
34
|
+
remaining = len(body_display) - MAX_MESSAGE_LEN
|
|
35
|
+
body_display = f"{truncated}...and {remaining} more chars"
|
|
16
36
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
if len(self.body) > 0:
|
|
20
|
-
body = f"\n{self.body}"
|
|
37
|
+
message += f". Body: {body_display}"
|
|
38
|
+
message = message.strip()
|
|
21
39
|
|
|
22
|
-
|
|
40
|
+
super().__init__(message, raw_response, body)
|