mistralai 1.10.0__py3-none-any.whl → 1.11.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_hooks/tracing.py +28 -3
- mistralai/_version.py +3 -3
- mistralai/accesses.py +22 -12
- mistralai/agents.py +88 -44
- mistralai/basesdk.py +6 -0
- mistralai/chat.py +96 -40
- mistralai/classifiers.py +48 -23
- mistralai/conversations.py +186 -64
- mistralai/documents.py +72 -26
- mistralai/embeddings.py +24 -9
- mistralai/extra/README.md +1 -1
- mistralai/extra/mcp/auth.py +10 -11
- mistralai/extra/mcp/base.py +17 -16
- mistralai/extra/mcp/sse.py +13 -15
- mistralai/extra/mcp/stdio.py +5 -6
- mistralai/extra/observability/otel.py +47 -68
- mistralai/extra/run/context.py +33 -43
- mistralai/extra/run/result.py +29 -30
- mistralai/extra/run/tools.py +8 -9
- mistralai/extra/struct_chat.py +15 -8
- mistralai/extra/utils/response_format.py +5 -3
- mistralai/files.py +58 -24
- mistralai/fim.py +20 -12
- mistralai/httpclient.py +0 -1
- mistralai/jobs.py +65 -26
- mistralai/libraries.py +20 -10
- mistralai/mistral_agents.py +438 -30
- mistralai/mistral_jobs.py +62 -17
- mistralai/models/__init__.py +46 -1
- mistralai/models/agent.py +1 -1
- mistralai/models/agentconversation.py +1 -1
- mistralai/models/agenthandoffdoneevent.py +1 -1
- mistralai/models/agenthandoffentry.py +3 -2
- mistralai/models/agenthandoffstartedevent.py +1 -1
- mistralai/models/agents_api_v1_agents_get_versionop.py +21 -0
- mistralai/models/agents_api_v1_agents_list_versionsop.py +33 -0
- mistralai/models/agents_api_v1_agents_listop.py +5 -1
- mistralai/models/agents_api_v1_conversations_listop.py +1 -1
- mistralai/models/agentscompletionrequest.py +2 -5
- mistralai/models/agentscompletionstreamrequest.py +2 -5
- mistralai/models/archiveftmodelout.py +1 -1
- mistralai/models/assistantmessage.py +1 -1
- mistralai/models/audiochunk.py +1 -1
- mistralai/models/audioencoding.py +18 -0
- mistralai/models/audioformat.py +17 -0
- mistralai/models/basemodelcard.py +1 -1
- mistralai/models/batchjobin.py +18 -9
- mistralai/models/batchjobout.py +6 -1
- mistralai/models/batchjobsout.py +1 -1
- mistralai/models/batchrequest.py +48 -0
- mistralai/models/chatcompletionchoice.py +10 -5
- mistralai/models/chatcompletionrequest.py +2 -5
- mistralai/models/chatcompletionstreamrequest.py +2 -5
- mistralai/models/classificationrequest.py +37 -3
- mistralai/models/classifierdetailedjobout.py +4 -2
- mistralai/models/classifierftmodelout.py +3 -2
- mistralai/models/classifierjobout.py +4 -2
- mistralai/models/codeinterpretertool.py +1 -1
- mistralai/models/completiondetailedjobout.py +5 -2
- mistralai/models/completionftmodelout.py +3 -2
- mistralai/models/completionjobout.py +5 -2
- mistralai/models/completionresponsestreamchoice.py +9 -8
- mistralai/models/conversationappendrequest.py +4 -1
- mistralai/models/conversationappendstreamrequest.py +4 -1
- mistralai/models/conversationhistory.py +2 -1
- mistralai/models/conversationmessages.py +1 -1
- mistralai/models/conversationrequest.py +5 -1
- mistralai/models/conversationresponse.py +2 -1
- mistralai/models/conversationrestartrequest.py +4 -1
- mistralai/models/conversationrestartstreamrequest.py +4 -1
- mistralai/models/conversationstreamrequest.py +5 -1
- mistralai/models/documentlibrarytool.py +1 -1
- mistralai/models/documenturlchunk.py +1 -1
- mistralai/models/embeddingdtype.py +7 -1
- mistralai/models/embeddingrequest.py +11 -3
- mistralai/models/encodingformat.py +4 -1
- mistralai/models/entitytype.py +8 -1
- mistralai/models/filepurpose.py +8 -1
- mistralai/models/files_api_routes_list_filesop.py +4 -11
- mistralai/models/files_api_routes_upload_fileop.py +2 -6
- mistralai/models/fileschema.py +3 -5
- mistralai/models/finetuneablemodeltype.py +4 -1
- mistralai/models/ftclassifierlossfunction.py +4 -1
- mistralai/models/ftmodelcard.py +1 -1
- mistralai/models/functioncallentry.py +3 -2
- mistralai/models/functioncallevent.py +1 -1
- mistralai/models/functionresultentry.py +3 -2
- mistralai/models/functiontool.py +1 -1
- mistralai/models/githubrepositoryin.py +1 -1
- mistralai/models/githubrepositoryout.py +1 -1
- mistralai/models/httpvalidationerror.py +4 -2
- mistralai/models/imagegenerationtool.py +1 -1
- mistralai/models/imageurlchunk.py +1 -1
- mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
- mistralai/models/jobsout.py +1 -1
- mistralai/models/legacyjobmetadataout.py +1 -1
- mistralai/models/messageinputentry.py +9 -3
- mistralai/models/messageoutputentry.py +6 -3
- mistralai/models/messageoutputevent.py +4 -2
- mistralai/models/mistralerror.py +11 -7
- mistralai/models/mistralpromptmode.py +1 -1
- mistralai/models/modelconversation.py +1 -1
- mistralai/models/no_response_error.py +5 -1
- mistralai/models/ocrrequest.py +11 -1
- mistralai/models/ocrtableobject.py +4 -1
- mistralai/models/referencechunk.py +1 -1
- mistralai/models/requestsource.py +5 -1
- mistralai/models/responsedoneevent.py +1 -1
- mistralai/models/responseerrorevent.py +1 -1
- mistralai/models/responseformats.py +5 -1
- mistralai/models/responsestartedevent.py +1 -1
- mistralai/models/responsevalidationerror.py +2 -0
- mistralai/models/retrievefileout.py +3 -5
- mistralai/models/sampletype.py +7 -1
- mistralai/models/sdkerror.py +2 -0
- mistralai/models/shareenum.py +7 -1
- mistralai/models/sharingdelete.py +2 -4
- mistralai/models/sharingin.py +3 -5
- mistralai/models/source.py +8 -1
- mistralai/models/systemmessage.py +1 -1
- mistralai/models/textchunk.py +1 -1
- mistralai/models/thinkchunk.py +1 -1
- mistralai/models/timestampgranularity.py +1 -1
- mistralai/models/tool.py +2 -6
- mistralai/models/toolcall.py +2 -6
- mistralai/models/toolchoice.py +2 -6
- mistralai/models/toolchoiceenum.py +6 -1
- mistralai/models/toolexecutiondeltaevent.py +2 -1
- mistralai/models/toolexecutiondoneevent.py +2 -1
- mistralai/models/toolexecutionentry.py +4 -2
- mistralai/models/toolexecutionstartedevent.py +2 -1
- mistralai/models/toolfilechunk.py +13 -5
- mistralai/models/toolmessage.py +1 -1
- mistralai/models/toolreferencechunk.py +15 -5
- mistralai/models/tooltypes.py +1 -1
- mistralai/models/transcriptionsegmentchunk.py +1 -1
- mistralai/models/transcriptionstreamdone.py +1 -1
- mistralai/models/transcriptionstreamlanguage.py +1 -1
- mistralai/models/transcriptionstreamsegmentdelta.py +1 -1
- mistralai/models/transcriptionstreamtextdelta.py +1 -1
- mistralai/models/unarchiveftmodelout.py +1 -1
- mistralai/models/uploadfileout.py +3 -5
- mistralai/models/usermessage.py +1 -1
- mistralai/models/wandbintegration.py +1 -1
- mistralai/models/wandbintegrationout.py +1 -1
- mistralai/models/websearchpremiumtool.py +1 -1
- mistralai/models/websearchtool.py +1 -1
- mistralai/models_.py +24 -12
- mistralai/ocr.py +38 -10
- mistralai/sdk.py +2 -2
- mistralai/transcriptions.py +28 -12
- mistralai/types/basemodel.py +41 -3
- mistralai/utils/__init__.py +0 -3
- mistralai/utils/annotations.py +32 -8
- mistralai/utils/enums.py +60 -0
- mistralai/utils/forms.py +21 -10
- mistralai/utils/queryparams.py +14 -2
- mistralai/utils/requestbodies.py +3 -3
- mistralai/utils/retries.py +69 -5
- mistralai/utils/serializers.py +0 -20
- mistralai/utils/unmarshal_json_response.py +15 -1
- {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/METADATA +144 -159
- mistralai-1.11.1.dist-info/RECORD +495 -0
- {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/WHEEL +1 -1
- mistralai_azure/_version.py +3 -3
- mistralai_azure/basesdk.py +21 -5
- mistralai_azure/chat.py +82 -109
- mistralai_azure/httpclient.py +0 -1
- mistralai_azure/models/__init__.py +66 -4
- mistralai_azure/models/assistantmessage.py +1 -1
- mistralai_azure/models/chatcompletionchoice.py +10 -7
- mistralai_azure/models/chatcompletionrequest.py +24 -10
- mistralai_azure/models/chatcompletionstreamrequest.py +24 -10
- mistralai_azure/models/completionresponsestreamchoice.py +11 -7
- mistralai_azure/models/documenturlchunk.py +1 -1
- mistralai_azure/models/httpvalidationerror.py +15 -8
- mistralai_azure/models/imageurlchunk.py +1 -1
- mistralai_azure/models/mistralazureerror.py +30 -0
- mistralai_azure/models/mistralpromptmode.py +1 -1
- mistralai_azure/models/no_response_error.py +17 -0
- mistralai_azure/models/ocrpageobject.py +32 -5
- mistralai_azure/models/ocrrequest.py +20 -1
- mistralai_azure/models/ocrtableobject.py +34 -0
- mistralai_azure/models/prediction.py +4 -0
- mistralai_azure/models/referencechunk.py +1 -1
- mistralai_azure/models/responseformat.py +4 -2
- mistralai_azure/models/responseformats.py +5 -2
- mistralai_azure/models/responsevalidationerror.py +27 -0
- mistralai_azure/models/sdkerror.py +32 -14
- mistralai_azure/models/systemmessage.py +8 -4
- mistralai_azure/models/systemmessagecontentchunks.py +21 -0
- mistralai_azure/models/textchunk.py +1 -1
- mistralai_azure/models/thinkchunk.py +35 -0
- mistralai_azure/models/tool.py +2 -6
- mistralai_azure/models/toolcall.py +2 -6
- mistralai_azure/models/toolchoice.py +2 -6
- mistralai_azure/models/toolchoiceenum.py +6 -1
- mistralai_azure/models/toolmessage.py +1 -1
- mistralai_azure/models/tooltypes.py +1 -1
- mistralai_azure/models/usermessage.py +1 -1
- mistralai_azure/ocr.py +39 -40
- mistralai_azure/types/basemodel.py +41 -3
- mistralai_azure/utils/__init__.py +18 -8
- mistralai_azure/utils/annotations.py +32 -8
- mistralai_azure/utils/enums.py +60 -0
- mistralai_azure/utils/eventstreaming.py +10 -0
- mistralai_azure/utils/forms.py +21 -10
- mistralai_azure/utils/queryparams.py +14 -2
- mistralai_azure/utils/requestbodies.py +3 -3
- mistralai_azure/utils/retries.py +69 -5
- mistralai_azure/utils/serializers.py +3 -22
- mistralai_azure/utils/unmarshal_json_response.py +38 -0
- mistralai_gcp/_hooks/types.py +7 -0
- mistralai_gcp/_version.py +4 -4
- mistralai_gcp/basesdk.py +33 -25
- mistralai_gcp/chat.py +98 -109
- mistralai_gcp/fim.py +62 -85
- mistralai_gcp/httpclient.py +6 -17
- mistralai_gcp/models/__init__.py +321 -116
- mistralai_gcp/models/assistantmessage.py +2 -2
- mistralai_gcp/models/chatcompletionchoice.py +10 -7
- mistralai_gcp/models/chatcompletionrequest.py +38 -7
- mistralai_gcp/models/chatcompletionresponse.py +6 -6
- mistralai_gcp/models/chatcompletionstreamrequest.py +38 -7
- mistralai_gcp/models/completionresponsestreamchoice.py +12 -8
- mistralai_gcp/models/deltamessage.py +1 -1
- mistralai_gcp/models/fimcompletionrequest.py +9 -10
- mistralai_gcp/models/fimcompletionresponse.py +6 -6
- mistralai_gcp/models/fimcompletionstreamrequest.py +9 -10
- mistralai_gcp/models/httpvalidationerror.py +15 -8
- mistralai_gcp/models/imageurl.py +1 -1
- mistralai_gcp/models/imageurlchunk.py +1 -1
- mistralai_gcp/models/jsonschema.py +1 -1
- mistralai_gcp/models/mistralgcperror.py +30 -0
- mistralai_gcp/models/mistralpromptmode.py +8 -0
- mistralai_gcp/models/no_response_error.py +17 -0
- mistralai_gcp/models/prediction.py +4 -0
- mistralai_gcp/models/referencechunk.py +1 -1
- mistralai_gcp/models/responseformat.py +5 -3
- mistralai_gcp/models/responseformats.py +5 -2
- mistralai_gcp/models/responsevalidationerror.py +27 -0
- mistralai_gcp/models/sdkerror.py +32 -14
- mistralai_gcp/models/systemmessage.py +8 -4
- mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
- mistralai_gcp/models/textchunk.py +1 -1
- mistralai_gcp/models/thinkchunk.py +35 -0
- mistralai_gcp/models/tool.py +2 -6
- mistralai_gcp/models/toolcall.py +2 -6
- mistralai_gcp/models/toolchoice.py +2 -6
- mistralai_gcp/models/toolchoiceenum.py +6 -1
- mistralai_gcp/models/toolmessage.py +2 -2
- mistralai_gcp/models/tooltypes.py +1 -1
- mistralai_gcp/models/usageinfo.py +71 -8
- mistralai_gcp/models/usermessage.py +2 -2
- mistralai_gcp/sdk.py +12 -10
- mistralai_gcp/sdkconfiguration.py +0 -7
- mistralai_gcp/types/basemodel.py +41 -3
- mistralai_gcp/utils/__init__.py +141 -46
- mistralai_gcp/utils/annotations.py +32 -8
- mistralai_gcp/utils/datetimes.py +23 -0
- mistralai_gcp/utils/enums.py +125 -25
- mistralai_gcp/utils/eventstreaming.py +10 -0
- mistralai_gcp/utils/forms.py +62 -30
- mistralai_gcp/utils/queryparams.py +14 -2
- mistralai_gcp/utils/requestbodies.py +3 -3
- mistralai_gcp/utils/retries.py +69 -5
- mistralai_gcp/utils/serializers.py +33 -23
- mistralai_gcp/utils/unmarshal_json_response.py +38 -0
- mistralai-1.10.0.dist-info/RECORD +0 -475
- {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -3,14 +3,19 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
5
|
from mistralai_azure.types import BaseModel, UnrecognizedStr
|
|
6
|
-
from mistralai_azure.utils import validate_open_enum
|
|
7
|
-
from pydantic.functional_validators import PlainValidator
|
|
8
6
|
from typing import Literal, Union
|
|
9
|
-
from typing_extensions import
|
|
7
|
+
from typing_extensions import TypedDict
|
|
10
8
|
|
|
11
9
|
|
|
12
10
|
ChatCompletionChoiceFinishReason = Union[
|
|
13
|
-
Literal[
|
|
11
|
+
Literal[
|
|
12
|
+
"stop",
|
|
13
|
+
"length",
|
|
14
|
+
"model_length",
|
|
15
|
+
"error",
|
|
16
|
+
"tool_calls",
|
|
17
|
+
],
|
|
18
|
+
UnrecognizedStr,
|
|
14
19
|
]
|
|
15
20
|
|
|
16
21
|
|
|
@@ -25,6 +30,4 @@ class ChatCompletionChoice(BaseModel):
|
|
|
25
30
|
|
|
26
31
|
message: AssistantMessage
|
|
27
32
|
|
|
28
|
-
finish_reason:
|
|
29
|
-
ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False))
|
|
30
|
-
]
|
|
33
|
+
finish_reason: ChatCompletionChoiceFinishReason
|
|
@@ -18,10 +18,9 @@ from mistralai_azure.types import (
|
|
|
18
18
|
UNSET,
|
|
19
19
|
UNSET_SENTINEL,
|
|
20
20
|
)
|
|
21
|
-
from mistralai_azure.utils import get_discriminator
|
|
21
|
+
from mistralai_azure.utils import get_discriminator
|
|
22
22
|
from pydantic import Discriminator, Tag, model_serializer
|
|
23
|
-
from
|
|
24
|
-
from typing import List, Optional, Union
|
|
23
|
+
from typing import Any, Dict, List, Optional, Union
|
|
25
24
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
26
25
|
|
|
27
26
|
|
|
@@ -63,11 +62,13 @@ ChatCompletionRequestToolChoiceTypedDict = TypeAliasType(
|
|
|
63
62
|
"ChatCompletionRequestToolChoiceTypedDict",
|
|
64
63
|
Union[ToolChoiceTypedDict, ToolChoiceEnum],
|
|
65
64
|
)
|
|
65
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
66
66
|
|
|
67
67
|
|
|
68
68
|
ChatCompletionRequestToolChoice = TypeAliasType(
|
|
69
69
|
"ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum]
|
|
70
70
|
)
|
|
71
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
71
72
|
|
|
72
73
|
|
|
73
74
|
class ChatCompletionRequestTypedDict(TypedDict):
|
|
@@ -87,17 +88,23 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
87
88
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
88
89
|
random_seed: NotRequired[Nullable[int]]
|
|
89
90
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
91
|
+
metadata: NotRequired[Nullable[Dict[str, Any]]]
|
|
90
92
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
93
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
91
94
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
95
|
+
r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
|
|
92
96
|
tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict]
|
|
97
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
93
98
|
presence_penalty: NotRequired[float]
|
|
94
|
-
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
99
|
+
r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
95
100
|
frequency_penalty: NotRequired[float]
|
|
96
|
-
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
101
|
+
r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
97
102
|
n: NotRequired[Nullable[int]]
|
|
98
103
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
99
104
|
prediction: NotRequired[PredictionTypedDict]
|
|
105
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
100
106
|
parallel_tool_calls: NotRequired[bool]
|
|
107
|
+
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
101
108
|
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
102
109
|
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
103
110
|
safe_prompt: NotRequired[bool]
|
|
@@ -129,28 +136,33 @@ class ChatCompletionRequest(BaseModel):
|
|
|
129
136
|
random_seed: OptionalNullable[int] = UNSET
|
|
130
137
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
131
138
|
|
|
139
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET
|
|
140
|
+
|
|
132
141
|
response_format: Optional[ResponseFormat] = None
|
|
142
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
133
143
|
|
|
134
144
|
tools: OptionalNullable[List[Tool]] = UNSET
|
|
145
|
+
r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
|
|
135
146
|
|
|
136
147
|
tool_choice: Optional[ChatCompletionRequestToolChoice] = None
|
|
148
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
137
149
|
|
|
138
150
|
presence_penalty: Optional[float] = None
|
|
139
|
-
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
151
|
+
r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
140
152
|
|
|
141
153
|
frequency_penalty: Optional[float] = None
|
|
142
|
-
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
154
|
+
r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
143
155
|
|
|
144
156
|
n: OptionalNullable[int] = UNSET
|
|
145
157
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
146
158
|
|
|
147
159
|
prediction: Optional[Prediction] = None
|
|
160
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
148
161
|
|
|
149
162
|
parallel_tool_calls: Optional[bool] = None
|
|
163
|
+
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
150
164
|
|
|
151
|
-
prompt_mode:
|
|
152
|
-
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
153
|
-
] = UNSET
|
|
165
|
+
prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
|
|
154
166
|
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
155
167
|
|
|
156
168
|
safe_prompt: Optional[bool] = None
|
|
@@ -166,6 +178,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
166
178
|
"stream",
|
|
167
179
|
"stop",
|
|
168
180
|
"random_seed",
|
|
181
|
+
"metadata",
|
|
169
182
|
"response_format",
|
|
170
183
|
"tools",
|
|
171
184
|
"tool_choice",
|
|
@@ -181,6 +194,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
181
194
|
"temperature",
|
|
182
195
|
"max_tokens",
|
|
183
196
|
"random_seed",
|
|
197
|
+
"metadata",
|
|
184
198
|
"tools",
|
|
185
199
|
"n",
|
|
186
200
|
"prompt_mode",
|
|
@@ -18,10 +18,9 @@ from mistralai_azure.types import (
|
|
|
18
18
|
UNSET,
|
|
19
19
|
UNSET_SENTINEL,
|
|
20
20
|
)
|
|
21
|
-
from mistralai_azure.utils import get_discriminator
|
|
21
|
+
from mistralai_azure.utils import get_discriminator
|
|
22
22
|
from pydantic import Discriminator, Tag, model_serializer
|
|
23
|
-
from
|
|
24
|
-
from typing import List, Optional, Union
|
|
23
|
+
from typing import Any, Dict, List, Optional, Union
|
|
25
24
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
26
25
|
|
|
27
26
|
|
|
@@ -59,11 +58,13 @@ ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType(
|
|
|
59
58
|
"ChatCompletionStreamRequestToolChoiceTypedDict",
|
|
60
59
|
Union[ToolChoiceTypedDict, ToolChoiceEnum],
|
|
61
60
|
)
|
|
61
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
62
62
|
|
|
63
63
|
|
|
64
64
|
ChatCompletionStreamRequestToolChoice = TypeAliasType(
|
|
65
65
|
"ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum]
|
|
66
66
|
)
|
|
67
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
67
68
|
|
|
68
69
|
|
|
69
70
|
class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
@@ -82,17 +83,23 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
82
83
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
83
84
|
random_seed: NotRequired[Nullable[int]]
|
|
84
85
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
86
|
+
metadata: NotRequired[Nullable[Dict[str, Any]]]
|
|
85
87
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
88
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
86
89
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
90
|
+
r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
|
|
87
91
|
tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict]
|
|
92
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
88
93
|
presence_penalty: NotRequired[float]
|
|
89
|
-
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
94
|
+
r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
90
95
|
frequency_penalty: NotRequired[float]
|
|
91
|
-
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
96
|
+
r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
92
97
|
n: NotRequired[Nullable[int]]
|
|
93
98
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
94
99
|
prediction: NotRequired[PredictionTypedDict]
|
|
100
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
95
101
|
parallel_tool_calls: NotRequired[bool]
|
|
102
|
+
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
96
103
|
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
97
104
|
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
98
105
|
safe_prompt: NotRequired[bool]
|
|
@@ -123,28 +130,33 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
123
130
|
random_seed: OptionalNullable[int] = UNSET
|
|
124
131
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
125
132
|
|
|
133
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET
|
|
134
|
+
|
|
126
135
|
response_format: Optional[ResponseFormat] = None
|
|
136
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
127
137
|
|
|
128
138
|
tools: OptionalNullable[List[Tool]] = UNSET
|
|
139
|
+
r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
|
|
129
140
|
|
|
130
141
|
tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None
|
|
142
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
131
143
|
|
|
132
144
|
presence_penalty: Optional[float] = None
|
|
133
|
-
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
145
|
+
r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
134
146
|
|
|
135
147
|
frequency_penalty: Optional[float] = None
|
|
136
|
-
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
148
|
+
r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
137
149
|
|
|
138
150
|
n: OptionalNullable[int] = UNSET
|
|
139
151
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
140
152
|
|
|
141
153
|
prediction: Optional[Prediction] = None
|
|
154
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
142
155
|
|
|
143
156
|
parallel_tool_calls: Optional[bool] = None
|
|
157
|
+
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
144
158
|
|
|
145
|
-
prompt_mode:
|
|
146
|
-
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
147
|
-
] = UNSET
|
|
159
|
+
prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
|
|
148
160
|
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
149
161
|
|
|
150
162
|
safe_prompt: Optional[bool] = None
|
|
@@ -160,6 +172,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
160
172
|
"stream",
|
|
161
173
|
"stop",
|
|
162
174
|
"random_seed",
|
|
175
|
+
"metadata",
|
|
163
176
|
"response_format",
|
|
164
177
|
"tools",
|
|
165
178
|
"tool_choice",
|
|
@@ -175,6 +188,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
175
188
|
"temperature",
|
|
176
189
|
"max_tokens",
|
|
177
190
|
"random_seed",
|
|
191
|
+
"metadata",
|
|
178
192
|
"tools",
|
|
179
193
|
"n",
|
|
180
194
|
"prompt_mode",
|
|
@@ -3,14 +3,20 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .deltamessage import DeltaMessage, DeltaMessageTypedDict
|
|
5
5
|
from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr
|
|
6
|
-
from mistralai_azure.utils import validate_open_enum
|
|
7
6
|
from pydantic import model_serializer
|
|
8
|
-
from pydantic.functional_validators import PlainValidator
|
|
9
7
|
from typing import Literal, Union
|
|
10
|
-
from typing_extensions import
|
|
8
|
+
from typing_extensions import TypedDict
|
|
11
9
|
|
|
12
10
|
|
|
13
|
-
FinishReason = Union[
|
|
11
|
+
FinishReason = Union[
|
|
12
|
+
Literal[
|
|
13
|
+
"stop",
|
|
14
|
+
"length",
|
|
15
|
+
"error",
|
|
16
|
+
"tool_calls",
|
|
17
|
+
],
|
|
18
|
+
UnrecognizedStr,
|
|
19
|
+
]
|
|
14
20
|
|
|
15
21
|
|
|
16
22
|
class CompletionResponseStreamChoiceTypedDict(TypedDict):
|
|
@@ -24,9 +30,7 @@ class CompletionResponseStreamChoice(BaseModel):
|
|
|
24
30
|
|
|
25
31
|
delta: DeltaMessage
|
|
26
32
|
|
|
27
|
-
finish_reason:
|
|
28
|
-
Nullable[FinishReason], PlainValidator(validate_open_enum(False))
|
|
29
|
-
]
|
|
33
|
+
finish_reason: Nullable[FinishReason]
|
|
30
34
|
|
|
31
35
|
@model_serializer(mode="wrap")
|
|
32
36
|
def serialize_model(self, handler):
|
|
@@ -2,7 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .validationerror import ValidationError
|
|
5
|
-
from
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
import httpx
|
|
7
|
+
from mistralai_azure.models import MistralAzureError
|
|
6
8
|
from mistralai_azure.types import BaseModel
|
|
7
9
|
from typing import List, Optional
|
|
8
10
|
|
|
@@ -11,11 +13,16 @@ class HTTPValidationErrorData(BaseModel):
|
|
|
11
13
|
detail: Optional[List[ValidationError]] = None
|
|
12
14
|
|
|
13
15
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
+
@dataclass(unsafe_hash=True)
|
|
17
|
+
class HTTPValidationError(MistralAzureError):
|
|
18
|
+
data: HTTPValidationErrorData = field(hash=False)
|
|
16
19
|
|
|
17
|
-
def __init__(
|
|
18
|
-
self
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
data: HTTPValidationErrorData,
|
|
23
|
+
raw_response: httpx.Response,
|
|
24
|
+
body: Optional[str] = None,
|
|
25
|
+
):
|
|
26
|
+
message = body or raw_response.text
|
|
27
|
+
super().__init__(message, raw_response, body)
|
|
28
|
+
object.__setattr__(self, "data", data)
|
|
@@ -15,7 +15,7 @@ ImageURLChunkImageURLTypedDict = TypeAliasType(
|
|
|
15
15
|
ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str])
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
ImageURLChunkType = Literal["image_url"]
|
|
18
|
+
ImageURLChunkType = Literal["image_url",]
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
class ImageURLChunkTypedDict(TypedDict):
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
from typing import Optional
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass(unsafe_hash=True)
|
|
9
|
+
class MistralAzureError(Exception):
|
|
10
|
+
"""The base class for all HTTP error responses."""
|
|
11
|
+
|
|
12
|
+
message: str
|
|
13
|
+
status_code: int
|
|
14
|
+
body: str
|
|
15
|
+
headers: httpx.Headers = field(hash=False)
|
|
16
|
+
raw_response: httpx.Response = field(hash=False)
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self, message: str, raw_response: httpx.Response, body: Optional[str] = None
|
|
20
|
+
):
|
|
21
|
+
object.__setattr__(self, "message", message)
|
|
22
|
+
object.__setattr__(self, "status_code", raw_response.status_code)
|
|
23
|
+
object.__setattr__(
|
|
24
|
+
self, "body", body if body is not None else raw_response.text
|
|
25
|
+
)
|
|
26
|
+
object.__setattr__(self, "headers", raw_response.headers)
|
|
27
|
+
object.__setattr__(self, "raw_response", raw_response)
|
|
28
|
+
|
|
29
|
+
def __str__(self):
|
|
30
|
+
return self.message
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass(unsafe_hash=True)
|
|
7
|
+
class NoResponseError(Exception):
|
|
8
|
+
"""Error raised when no HTTP response is received from the server."""
|
|
9
|
+
|
|
10
|
+
message: str
|
|
11
|
+
|
|
12
|
+
def __init__(self, message: str = "No response received"):
|
|
13
|
+
object.__setattr__(self, "message", message)
|
|
14
|
+
super().__init__(message)
|
|
15
|
+
|
|
16
|
+
def __str__(self):
|
|
17
|
+
return self.message
|
|
@@ -3,10 +3,17 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict
|
|
5
5
|
from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict
|
|
6
|
-
from
|
|
6
|
+
from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict
|
|
7
|
+
from mistralai_azure.types import (
|
|
8
|
+
BaseModel,
|
|
9
|
+
Nullable,
|
|
10
|
+
OptionalNullable,
|
|
11
|
+
UNSET,
|
|
12
|
+
UNSET_SENTINEL,
|
|
13
|
+
)
|
|
7
14
|
from pydantic import model_serializer
|
|
8
|
-
from typing import List
|
|
9
|
-
from typing_extensions import TypedDict
|
|
15
|
+
from typing import List, Optional
|
|
16
|
+
from typing_extensions import NotRequired, TypedDict
|
|
10
17
|
|
|
11
18
|
|
|
12
19
|
class OCRPageObjectTypedDict(TypedDict):
|
|
@@ -18,6 +25,14 @@ class OCRPageObjectTypedDict(TypedDict):
|
|
|
18
25
|
r"""List of all extracted images in the page"""
|
|
19
26
|
dimensions: Nullable[OCRPageDimensionsTypedDict]
|
|
20
27
|
r"""The dimensions of the PDF Page's screenshot image"""
|
|
28
|
+
tables: NotRequired[List[OCRTableObjectTypedDict]]
|
|
29
|
+
r"""List of all extracted tables in the page"""
|
|
30
|
+
hyperlinks: NotRequired[List[str]]
|
|
31
|
+
r"""List of all hyperlinks in the page"""
|
|
32
|
+
header: NotRequired[Nullable[str]]
|
|
33
|
+
r"""Header of the page"""
|
|
34
|
+
footer: NotRequired[Nullable[str]]
|
|
35
|
+
r"""Footer of the page"""
|
|
21
36
|
|
|
22
37
|
|
|
23
38
|
class OCRPageObject(BaseModel):
|
|
@@ -33,10 +48,22 @@ class OCRPageObject(BaseModel):
|
|
|
33
48
|
dimensions: Nullable[OCRPageDimensions]
|
|
34
49
|
r"""The dimensions of the PDF Page's screenshot image"""
|
|
35
50
|
|
|
51
|
+
tables: Optional[List[OCRTableObject]] = None
|
|
52
|
+
r"""List of all extracted tables in the page"""
|
|
53
|
+
|
|
54
|
+
hyperlinks: Optional[List[str]] = None
|
|
55
|
+
r"""List of all hyperlinks in the page"""
|
|
56
|
+
|
|
57
|
+
header: OptionalNullable[str] = UNSET
|
|
58
|
+
r"""Header of the page"""
|
|
59
|
+
|
|
60
|
+
footer: OptionalNullable[str] = UNSET
|
|
61
|
+
r"""Footer of the page"""
|
|
62
|
+
|
|
36
63
|
@model_serializer(mode="wrap")
|
|
37
64
|
def serialize_model(self, handler):
|
|
38
|
-
optional_fields = []
|
|
39
|
-
nullable_fields = ["dimensions"]
|
|
65
|
+
optional_fields = ["tables", "hyperlinks", "header", "footer"]
|
|
66
|
+
nullable_fields = ["header", "footer", "dimensions"]
|
|
40
67
|
null_default_fields = []
|
|
41
68
|
|
|
42
69
|
serialized = handler(self)
|
|
@@ -13,7 +13,7 @@ from mistralai_azure.types import (
|
|
|
13
13
|
UNSET_SENTINEL,
|
|
14
14
|
)
|
|
15
15
|
from pydantic import model_serializer
|
|
16
|
-
from typing import List, Optional, Union
|
|
16
|
+
from typing import List, Literal, Optional, Union
|
|
17
17
|
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
18
18
|
|
|
19
19
|
|
|
@@ -28,6 +28,12 @@ Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURL
|
|
|
28
28
|
r"""Document to run OCR on"""
|
|
29
29
|
|
|
30
30
|
|
|
31
|
+
TableFormat = Literal[
|
|
32
|
+
"markdown",
|
|
33
|
+
"html",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
|
|
31
37
|
class OCRRequestTypedDict(TypedDict):
|
|
32
38
|
model: Nullable[str]
|
|
33
39
|
document: DocumentTypedDict
|
|
@@ -45,6 +51,9 @@ class OCRRequestTypedDict(TypedDict):
|
|
|
45
51
|
r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field"""
|
|
46
52
|
document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]]
|
|
47
53
|
r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field"""
|
|
54
|
+
table_format: NotRequired[Nullable[TableFormat]]
|
|
55
|
+
extract_header: NotRequired[bool]
|
|
56
|
+
extract_footer: NotRequired[bool]
|
|
48
57
|
|
|
49
58
|
|
|
50
59
|
class OCRRequest(BaseModel):
|
|
@@ -73,6 +82,12 @@ class OCRRequest(BaseModel):
|
|
|
73
82
|
document_annotation_format: OptionalNullable[ResponseFormat] = UNSET
|
|
74
83
|
r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field"""
|
|
75
84
|
|
|
85
|
+
table_format: OptionalNullable[TableFormat] = UNSET
|
|
86
|
+
|
|
87
|
+
extract_header: Optional[bool] = None
|
|
88
|
+
|
|
89
|
+
extract_footer: Optional[bool] = None
|
|
90
|
+
|
|
76
91
|
@model_serializer(mode="wrap")
|
|
77
92
|
def serialize_model(self, handler):
|
|
78
93
|
optional_fields = [
|
|
@@ -83,6 +98,9 @@ class OCRRequest(BaseModel):
|
|
|
83
98
|
"image_min_size",
|
|
84
99
|
"bbox_annotation_format",
|
|
85
100
|
"document_annotation_format",
|
|
101
|
+
"table_format",
|
|
102
|
+
"extract_header",
|
|
103
|
+
"extract_footer",
|
|
86
104
|
]
|
|
87
105
|
nullable_fields = [
|
|
88
106
|
"model",
|
|
@@ -92,6 +110,7 @@ class OCRRequest(BaseModel):
|
|
|
92
110
|
"image_min_size",
|
|
93
111
|
"bbox_annotation_format",
|
|
94
112
|
"document_annotation_format",
|
|
113
|
+
"table_format",
|
|
95
114
|
]
|
|
96
115
|
null_default_fields = []
|
|
97
116
|
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai_azure.types import BaseModel
|
|
5
|
+
import pydantic
|
|
6
|
+
from typing import Literal
|
|
7
|
+
from typing_extensions import Annotated, TypedDict
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
Format = Literal[
|
|
11
|
+
"markdown",
|
|
12
|
+
"html",
|
|
13
|
+
]
|
|
14
|
+
r"""Format of the table"""
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class OCRTableObjectTypedDict(TypedDict):
|
|
18
|
+
id: str
|
|
19
|
+
r"""Table ID for extracted table in a page"""
|
|
20
|
+
content: str
|
|
21
|
+
r"""Content of the table in the given format"""
|
|
22
|
+
format_: Format
|
|
23
|
+
r"""Format of the table"""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class OCRTableObject(BaseModel):
|
|
27
|
+
id: str
|
|
28
|
+
r"""Table ID for extracted table in a page"""
|
|
29
|
+
|
|
30
|
+
content: str
|
|
31
|
+
r"""Content of the table in the given format"""
|
|
32
|
+
|
|
33
|
+
format_: Annotated[Format, pydantic.Field(alias="format")]
|
|
34
|
+
r"""Format of the table"""
|
|
@@ -10,11 +10,15 @@ from typing_extensions import Annotated, NotRequired, TypedDict
|
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class PredictionTypedDict(TypedDict):
|
|
13
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
14
|
+
|
|
13
15
|
type: Literal["content"]
|
|
14
16
|
content: NotRequired[str]
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
class Prediction(BaseModel):
|
|
20
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
21
|
+
|
|
18
22
|
TYPE: Annotated[
|
|
19
23
|
Annotated[
|
|
20
24
|
Optional[Literal["content"]], AfterValidator(validate_const("content"))
|
|
@@ -16,14 +16,16 @@ from typing_extensions import NotRequired, TypedDict
|
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
class ResponseFormatTypedDict(TypedDict):
|
|
19
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
20
|
+
|
|
19
21
|
type: NotRequired[ResponseFormats]
|
|
20
|
-
r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message."""
|
|
21
22
|
json_schema: NotRequired[Nullable[JSONSchemaTypedDict]]
|
|
22
23
|
|
|
23
24
|
|
|
24
25
|
class ResponseFormat(BaseModel):
|
|
26
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
27
|
+
|
|
25
28
|
type: Optional[ResponseFormats] = None
|
|
26
|
-
r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message."""
|
|
27
29
|
|
|
28
30
|
json_schema: OptionalNullable[JSONSchema] = UNSET
|
|
29
31
|
|
|
@@ -4,5 +4,8 @@ from __future__ import annotations
|
|
|
4
4
|
from typing import Literal
|
|
5
5
|
|
|
6
6
|
|
|
7
|
-
ResponseFormats = Literal[
|
|
8
|
-
|
|
7
|
+
ResponseFormats = Literal[
|
|
8
|
+
"text",
|
|
9
|
+
"json_object",
|
|
10
|
+
"json_schema",
|
|
11
|
+
]
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
from typing import Optional
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
from mistralai_azure.models import MistralAzureError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass(unsafe_hash=True)
|
|
11
|
+
class ResponseValidationError(MistralAzureError):
|
|
12
|
+
"""Error raised when there is a type mismatch between the response data and the expected Pydantic model."""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
message: str,
|
|
17
|
+
raw_response: httpx.Response,
|
|
18
|
+
cause: Exception,
|
|
19
|
+
body: Optional[str] = None,
|
|
20
|
+
):
|
|
21
|
+
message = f"{message}: {cause}"
|
|
22
|
+
super().__init__(message, raw_response, body)
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def cause(self):
|
|
26
|
+
"""Normally the Pydantic ValidationError"""
|
|
27
|
+
return self.__cause__
|