mistralai 1.8.1__py3-none-any.whl → 1.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_hooks/types.py +7 -0
- mistralai/_version.py +3 -3
- mistralai/agents.py +16 -0
- mistralai/basesdk.py +12 -20
- mistralai/beta.py +1 -1
- mistralai/chat.py +16 -0
- mistralai/classifiers.py +8 -0
- mistralai/conversations.py +35 -15
- mistralai/embeddings.py +2 -0
- mistralai/extra/run/context.py +2 -4
- mistralai/files.py +12 -0
- mistralai/fim.py +4 -0
- mistralai/httpclient.py +6 -16
- mistralai/jobs.py +10 -0
- mistralai/mistral_agents.py +10 -0
- mistralai/mistral_jobs.py +8 -0
- mistralai/models/__init__.py +1357 -722
- mistralai/models/agent.py +1 -1
- mistralai/models/agentconversation.py +1 -1
- mistralai/models/agentcreationrequest.py +1 -1
- mistralai/models/agenthandoffentry.py +1 -1
- mistralai/models/agents_api_v1_conversations_getop.py +2 -0
- mistralai/models/agents_api_v1_conversations_historyop.py +2 -0
- mistralai/models/agents_api_v1_conversations_messagesop.py +2 -0
- mistralai/models/agents_api_v1_conversations_restart_streamop.py +2 -0
- mistralai/models/agents_api_v1_conversations_restartop.py +2 -0
- mistralai/models/agentscompletionrequest.py +13 -3
- mistralai/models/agentscompletionstreamrequest.py +13 -3
- mistralai/models/agentupdaterequest.py +1 -1
- mistralai/models/assistantmessage.py +1 -1
- mistralai/models/basemodelcard.py +8 -6
- mistralai/models/batchjobin.py +1 -1
- mistralai/models/batchjobout.py +1 -1
- mistralai/models/chatcompletionrequest.py +20 -3
- mistralai/models/chatcompletionstreamrequest.py +20 -3
- mistralai/models/classifierdetailedjobout.py +1 -1
- mistralai/models/classifierftmodelout.py +1 -1
- mistralai/models/classifierjobout.py +1 -1
- mistralai/models/classifiertargetin.py +1 -1
- mistralai/models/classifiertrainingparameters.py +1 -1
- mistralai/models/classifiertrainingparametersin.py +1 -1
- mistralai/models/completionargs.py +1 -1
- mistralai/models/completiondetailedjobout.py +1 -1
- mistralai/models/completionftmodelout.py +1 -1
- mistralai/models/completionjobout.py +1 -1
- mistralai/models/completionresponsestreamchoice.py +1 -1
- mistralai/models/completiontrainingparameters.py +1 -1
- mistralai/models/completiontrainingparametersin.py +1 -1
- mistralai/models/contentchunk.py +3 -0
- mistralai/models/conversationrequest.py +1 -1
- mistralai/models/conversationstreamrequest.py +1 -1
- mistralai/models/conversationusageinfo.py +1 -1
- mistralai/models/deltamessage.py +1 -1
- mistralai/models/documenturlchunk.py +1 -1
- mistralai/models/embeddingrequest.py +1 -1
- mistralai/models/eventout.py +1 -1
- mistralai/models/filechunk.py +23 -0
- mistralai/models/files_api_routes_list_filesop.py +1 -1
- mistralai/models/fileschema.py +1 -1
- mistralai/models/fimcompletionrequest.py +1 -1
- mistralai/models/fimcompletionstreamrequest.py +1 -1
- mistralai/models/ftmodelcard.py +9 -6
- mistralai/models/functioncallentry.py +1 -1
- mistralai/models/functionresultentry.py +1 -1
- mistralai/models/githubrepositoryin.py +1 -1
- mistralai/models/githubrepositoryout.py +1 -1
- mistralai/models/imageurl.py +1 -1
- mistralai/models/inputentries.py +21 -2
- mistralai/models/jobin.py +1 -1
- mistralai/models/jobmetadataout.py +1 -1
- mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +1 -1
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +1 -1
- mistralai/models/jsonschema.py +1 -1
- mistralai/models/legacyjobmetadataout.py +1 -1
- mistralai/models/messageinputentry.py +4 -4
- mistralai/models/messageoutputentry.py +1 -1
- mistralai/models/messageoutputevent.py +1 -1
- mistralai/models/metricout.py +1 -1
- mistralai/models/mistralpromptmode.py +8 -0
- mistralai/models/modelcapabilities.py +3 -0
- mistralai/models/modelconversation.py +1 -1
- mistralai/models/ocrimageobject.py +1 -1
- mistralai/models/ocrpageobject.py +1 -1
- mistralai/models/ocrrequest.py +5 -3
- mistralai/models/ocrresponse.py +1 -1
- mistralai/models/ocrusageinfo.py +1 -1
- mistralai/models/responseformat.py +1 -1
- mistralai/models/retrievefileout.py +1 -1
- mistralai/models/toolexecutionentry.py +1 -1
- mistralai/models/toolfilechunk.py +1 -1
- mistralai/models/toolmessage.py +1 -1
- mistralai/models/toolreferencechunk.py +1 -1
- mistralai/models/updateftmodelin.py +1 -1
- mistralai/models/uploadfileout.py +1 -1
- mistralai/models/usermessage.py +1 -1
- mistralai/models/wandbintegration.py +1 -1
- mistralai/models/wandbintegrationout.py +1 -1
- mistralai/models_.py +14 -2
- mistralai/ocr.py +2 -0
- mistralai/sdk.py +68 -40
- mistralai/sdkconfiguration.py +0 -7
- mistralai/types/basemodel.py +3 -3
- mistralai/utils/__init__.py +131 -45
- mistralai/utils/datetimes.py +23 -0
- mistralai/utils/enums.py +67 -27
- mistralai/utils/forms.py +49 -28
- mistralai/utils/serializers.py +32 -3
- {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/METADATA +13 -6
- {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/RECORD +111 -108
- {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/LICENSE +0 -0
- {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/WHEEL +0 -0
mistralai/models/agent.py
CHANGED
|
@@ -11,12 +11,14 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict
|
|
|
11
11
|
|
|
12
12
|
class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict):
|
|
13
13
|
conversation_id: str
|
|
14
|
+
r"""ID of the conversation from which we are fetching metadata."""
|
|
14
15
|
|
|
15
16
|
|
|
16
17
|
class AgentsAPIV1ConversationsGetRequest(BaseModel):
|
|
17
18
|
conversation_id: Annotated[
|
|
18
19
|
str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
|
|
19
20
|
]
|
|
21
|
+
r"""ID of the conversation from which we are fetching metadata."""
|
|
20
22
|
|
|
21
23
|
|
|
22
24
|
AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType(
|
|
@@ -8,9 +8,11 @@ from typing_extensions import Annotated, TypedDict
|
|
|
8
8
|
|
|
9
9
|
class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict):
|
|
10
10
|
conversation_id: str
|
|
11
|
+
r"""ID of the conversation from which we are fetching entries."""
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
class AgentsAPIV1ConversationsHistoryRequest(BaseModel):
|
|
14
15
|
conversation_id: Annotated[
|
|
15
16
|
str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
|
|
16
17
|
]
|
|
18
|
+
r"""ID of the conversation from which we are fetching entries."""
|
|
@@ -8,9 +8,11 @@ from typing_extensions import Annotated, TypedDict
|
|
|
8
8
|
|
|
9
9
|
class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict):
|
|
10
10
|
conversation_id: str
|
|
11
|
+
r"""ID of the conversation from which we are fetching messages."""
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
class AgentsAPIV1ConversationsMessagesRequest(BaseModel):
|
|
14
15
|
conversation_id: Annotated[
|
|
15
16
|
str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
|
|
16
17
|
]
|
|
18
|
+
r"""ID of the conversation from which we are fetching messages."""
|
|
@@ -12,6 +12,7 @@ from typing_extensions import Annotated, TypedDict
|
|
|
12
12
|
|
|
13
13
|
class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict):
|
|
14
14
|
conversation_id: str
|
|
15
|
+
r"""ID of the original conversation which is being restarted."""
|
|
15
16
|
conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict
|
|
16
17
|
|
|
17
18
|
|
|
@@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel):
|
|
|
19
20
|
conversation_id: Annotated[
|
|
20
21
|
str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
|
|
21
22
|
]
|
|
23
|
+
r"""ID of the original conversation which is being restarted."""
|
|
22
24
|
|
|
23
25
|
conversation_restart_stream_request: Annotated[
|
|
24
26
|
ConversationRestartStreamRequest,
|
|
@@ -12,6 +12,7 @@ from typing_extensions import Annotated, TypedDict
|
|
|
12
12
|
|
|
13
13
|
class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict):
|
|
14
14
|
conversation_id: str
|
|
15
|
+
r"""ID of the original conversation which is being restarted."""
|
|
15
16
|
conversation_restart_request: ConversationRestartRequestTypedDict
|
|
16
17
|
|
|
17
18
|
|
|
@@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartRequest(BaseModel):
|
|
|
19
20
|
conversation_id: Annotated[
|
|
20
21
|
str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
|
|
21
22
|
]
|
|
23
|
+
r"""ID of the original conversation which is being restarted."""
|
|
22
24
|
|
|
23
25
|
conversation_restart_request: Annotated[
|
|
24
26
|
ConversationRestartRequest,
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
+
from .mistralpromptmode import MistralPromptMode
|
|
5
6
|
from .prediction import Prediction, PredictionTypedDict
|
|
6
7
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
7
8
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
11
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
12
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
13
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
14
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator, validate_open_enum
|
|
15
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
+
from pydantic.functional_validators import PlainValidator
|
|
16
18
|
from typing import List, Optional, Union
|
|
17
19
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
18
20
|
|
|
@@ -86,6 +88,8 @@ class AgentsCompletionRequestTypedDict(TypedDict):
|
|
|
86
88
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
87
89
|
prediction: NotRequired[PredictionTypedDict]
|
|
88
90
|
parallel_tool_calls: NotRequired[bool]
|
|
91
|
+
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
92
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
89
93
|
|
|
90
94
|
|
|
91
95
|
class AgentsCompletionRequest(BaseModel):
|
|
@@ -126,6 +130,11 @@ class AgentsCompletionRequest(BaseModel):
|
|
|
126
130
|
|
|
127
131
|
parallel_tool_calls: Optional[bool] = None
|
|
128
132
|
|
|
133
|
+
prompt_mode: Annotated[
|
|
134
|
+
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
135
|
+
] = UNSET
|
|
136
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
137
|
+
|
|
129
138
|
@model_serializer(mode="wrap")
|
|
130
139
|
def serialize_model(self, handler):
|
|
131
140
|
optional_fields = [
|
|
@@ -141,15 +150,16 @@ class AgentsCompletionRequest(BaseModel):
|
|
|
141
150
|
"n",
|
|
142
151
|
"prediction",
|
|
143
152
|
"parallel_tool_calls",
|
|
153
|
+
"prompt_mode",
|
|
144
154
|
]
|
|
145
|
-
nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
|
|
155
|
+
nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"]
|
|
146
156
|
null_default_fields = []
|
|
147
157
|
|
|
148
158
|
serialized = handler(self)
|
|
149
159
|
|
|
150
160
|
m = {}
|
|
151
161
|
|
|
152
|
-
for n, f in self.model_fields.items():
|
|
162
|
+
for n, f in type(self).model_fields.items():
|
|
153
163
|
k = f.alias or n
|
|
154
164
|
val = serialized.get(k)
|
|
155
165
|
serialized.pop(k, None)
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
+
from .mistralpromptmode import MistralPromptMode
|
|
5
6
|
from .prediction import Prediction, PredictionTypedDict
|
|
6
7
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
7
8
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
11
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
12
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
13
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
14
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator, validate_open_enum
|
|
15
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
+
from pydantic.functional_validators import PlainValidator
|
|
16
18
|
from typing import List, Optional, Union
|
|
17
19
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
18
20
|
|
|
@@ -85,6 +87,8 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
|
|
|
85
87
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
86
88
|
prediction: NotRequired[PredictionTypedDict]
|
|
87
89
|
parallel_tool_calls: NotRequired[bool]
|
|
90
|
+
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
91
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
88
92
|
|
|
89
93
|
|
|
90
94
|
class AgentsCompletionStreamRequest(BaseModel):
|
|
@@ -124,6 +128,11 @@ class AgentsCompletionStreamRequest(BaseModel):
|
|
|
124
128
|
|
|
125
129
|
parallel_tool_calls: Optional[bool] = None
|
|
126
130
|
|
|
131
|
+
prompt_mode: Annotated[
|
|
132
|
+
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
133
|
+
] = UNSET
|
|
134
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
135
|
+
|
|
127
136
|
@model_serializer(mode="wrap")
|
|
128
137
|
def serialize_model(self, handler):
|
|
129
138
|
optional_fields = [
|
|
@@ -139,15 +148,16 @@ class AgentsCompletionStreamRequest(BaseModel):
|
|
|
139
148
|
"n",
|
|
140
149
|
"prediction",
|
|
141
150
|
"parallel_tool_calls",
|
|
151
|
+
"prompt_mode",
|
|
142
152
|
]
|
|
143
|
-
nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
|
|
153
|
+
nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"]
|
|
144
154
|
null_default_fields = []
|
|
145
155
|
|
|
146
156
|
serialized = handler(self)
|
|
147
157
|
|
|
148
158
|
m = {}
|
|
149
159
|
|
|
150
|
-
for n, f in self.model_fields.items():
|
|
160
|
+
for n, f in type(self).model_fields.items():
|
|
151
161
|
k = f.alias or n
|
|
152
162
|
val = serialized.get(k)
|
|
153
163
|
serialized.pop(k, None)
|
|
@@ -12,9 +12,6 @@ from typing import List, Literal, Optional
|
|
|
12
12
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
13
13
|
|
|
14
14
|
|
|
15
|
-
Type = Literal["base"]
|
|
16
|
-
|
|
17
|
-
|
|
18
15
|
class BaseModelCardTypedDict(TypedDict):
|
|
19
16
|
id: str
|
|
20
17
|
capabilities: ModelCapabilitiesTypedDict
|
|
@@ -26,8 +23,9 @@ class BaseModelCardTypedDict(TypedDict):
|
|
|
26
23
|
max_context_length: NotRequired[int]
|
|
27
24
|
aliases: NotRequired[List[str]]
|
|
28
25
|
deprecation: NotRequired[Nullable[datetime]]
|
|
26
|
+
deprecation_replacement_model: NotRequired[Nullable[str]]
|
|
29
27
|
default_model_temperature: NotRequired[Nullable[float]]
|
|
30
|
-
type:
|
|
28
|
+
type: Literal["base"]
|
|
31
29
|
|
|
32
30
|
|
|
33
31
|
class BaseModelCard(BaseModel):
|
|
@@ -51,10 +49,12 @@ class BaseModelCard(BaseModel):
|
|
|
51
49
|
|
|
52
50
|
deprecation: OptionalNullable[datetime] = UNSET
|
|
53
51
|
|
|
52
|
+
deprecation_replacement_model: OptionalNullable[str] = UNSET
|
|
53
|
+
|
|
54
54
|
default_model_temperature: OptionalNullable[float] = UNSET
|
|
55
55
|
|
|
56
56
|
TYPE: Annotated[
|
|
57
|
-
Annotated[Optional[
|
|
57
|
+
Annotated[Optional[Literal["base"]], AfterValidator(validate_const("base"))],
|
|
58
58
|
pydantic.Field(alias="type"),
|
|
59
59
|
] = "base"
|
|
60
60
|
|
|
@@ -69,6 +69,7 @@ class BaseModelCard(BaseModel):
|
|
|
69
69
|
"max_context_length",
|
|
70
70
|
"aliases",
|
|
71
71
|
"deprecation",
|
|
72
|
+
"deprecation_replacement_model",
|
|
72
73
|
"default_model_temperature",
|
|
73
74
|
"type",
|
|
74
75
|
]
|
|
@@ -76,6 +77,7 @@ class BaseModelCard(BaseModel):
|
|
|
76
77
|
"name",
|
|
77
78
|
"description",
|
|
78
79
|
"deprecation",
|
|
80
|
+
"deprecation_replacement_model",
|
|
79
81
|
"default_model_temperature",
|
|
80
82
|
]
|
|
81
83
|
null_default_fields = []
|
|
@@ -84,7 +86,7 @@ class BaseModelCard(BaseModel):
|
|
|
84
86
|
|
|
85
87
|
m = {}
|
|
86
88
|
|
|
87
|
-
for n, f in self.model_fields.items():
|
|
89
|
+
for n, f in type(self).model_fields.items():
|
|
88
90
|
k = f.alias or n
|
|
89
91
|
val = serialized.get(k)
|
|
90
92
|
serialized.pop(k, None)
|
mistralai/models/batchjobin.py
CHANGED
mistralai/models/batchjobout.py
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
+
from .mistralpromptmode import MistralPromptMode
|
|
5
6
|
from .prediction import Prediction, PredictionTypedDict
|
|
6
7
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
7
8
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
11
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
12
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
13
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
14
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator, validate_open_enum
|
|
15
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
+
from pydantic.functional_validators import PlainValidator
|
|
16
18
|
from typing import List, Optional, Union
|
|
17
19
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
18
20
|
|
|
@@ -86,6 +88,8 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
86
88
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
87
89
|
prediction: NotRequired[PredictionTypedDict]
|
|
88
90
|
parallel_tool_calls: NotRequired[bool]
|
|
91
|
+
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
92
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
89
93
|
safe_prompt: NotRequired[bool]
|
|
90
94
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
91
95
|
|
|
@@ -134,6 +138,11 @@ class ChatCompletionRequest(BaseModel):
|
|
|
134
138
|
|
|
135
139
|
parallel_tool_calls: Optional[bool] = None
|
|
136
140
|
|
|
141
|
+
prompt_mode: Annotated[
|
|
142
|
+
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
143
|
+
] = UNSET
|
|
144
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
145
|
+
|
|
137
146
|
safe_prompt: Optional[bool] = None
|
|
138
147
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
139
148
|
|
|
@@ -154,16 +163,24 @@ class ChatCompletionRequest(BaseModel):
|
|
|
154
163
|
"n",
|
|
155
164
|
"prediction",
|
|
156
165
|
"parallel_tool_calls",
|
|
166
|
+
"prompt_mode",
|
|
157
167
|
"safe_prompt",
|
|
158
168
|
]
|
|
159
|
-
nullable_fields = [
|
|
169
|
+
nullable_fields = [
|
|
170
|
+
"temperature",
|
|
171
|
+
"max_tokens",
|
|
172
|
+
"random_seed",
|
|
173
|
+
"tools",
|
|
174
|
+
"n",
|
|
175
|
+
"prompt_mode",
|
|
176
|
+
]
|
|
160
177
|
null_default_fields = []
|
|
161
178
|
|
|
162
179
|
serialized = handler(self)
|
|
163
180
|
|
|
164
181
|
m = {}
|
|
165
182
|
|
|
166
|
-
for n, f in self.model_fields.items():
|
|
183
|
+
for n, f in type(self).model_fields.items():
|
|
167
184
|
k = f.alias or n
|
|
168
185
|
val = serialized.get(k)
|
|
169
186
|
serialized.pop(k, None)
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
+
from .mistralpromptmode import MistralPromptMode
|
|
5
6
|
from .prediction import Prediction, PredictionTypedDict
|
|
6
7
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
7
8
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
11
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
12
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
13
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
14
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator, validate_open_enum
|
|
15
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
+
from pydantic.functional_validators import PlainValidator
|
|
16
18
|
from typing import List, Optional, Union
|
|
17
19
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
18
20
|
|
|
@@ -89,6 +91,8 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
89
91
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
90
92
|
prediction: NotRequired[PredictionTypedDict]
|
|
91
93
|
parallel_tool_calls: NotRequired[bool]
|
|
94
|
+
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
95
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
92
96
|
safe_prompt: NotRequired[bool]
|
|
93
97
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
94
98
|
|
|
@@ -136,6 +140,11 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
136
140
|
|
|
137
141
|
parallel_tool_calls: Optional[bool] = None
|
|
138
142
|
|
|
143
|
+
prompt_mode: Annotated[
|
|
144
|
+
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
145
|
+
] = UNSET
|
|
146
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
147
|
+
|
|
139
148
|
safe_prompt: Optional[bool] = None
|
|
140
149
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
141
150
|
|
|
@@ -156,16 +165,24 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
156
165
|
"n",
|
|
157
166
|
"prediction",
|
|
158
167
|
"parallel_tool_calls",
|
|
168
|
+
"prompt_mode",
|
|
159
169
|
"safe_prompt",
|
|
160
170
|
]
|
|
161
|
-
nullable_fields = [
|
|
171
|
+
nullable_fields = [
|
|
172
|
+
"temperature",
|
|
173
|
+
"max_tokens",
|
|
174
|
+
"random_seed",
|
|
175
|
+
"tools",
|
|
176
|
+
"n",
|
|
177
|
+
"prompt_mode",
|
|
178
|
+
]
|
|
162
179
|
null_default_fields = []
|
|
163
180
|
|
|
164
181
|
serialized = handler(self)
|
|
165
182
|
|
|
166
183
|
m = {}
|
|
167
184
|
|
|
168
|
-
for n, f in self.model_fields.items():
|
|
185
|
+
for n, f in type(self).model_fields.items():
|
|
169
186
|
k = f.alias or n
|
|
170
187
|
val = serialized.get(k)
|
|
171
188
|
serialized.pop(k, None)
|
mistralai/models/contentchunk.py
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict
|
|
5
|
+
from .filechunk import FileChunk, FileChunkTypedDict
|
|
5
6
|
from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict
|
|
6
7
|
from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict
|
|
7
8
|
from .textchunk import TextChunk, TextChunkTypedDict
|
|
@@ -17,6 +18,7 @@ ContentChunkTypedDict = TypeAliasType(
|
|
|
17
18
|
TextChunkTypedDict,
|
|
18
19
|
ImageURLChunkTypedDict,
|
|
19
20
|
ReferenceChunkTypedDict,
|
|
21
|
+
FileChunkTypedDict,
|
|
20
22
|
DocumentURLChunkTypedDict,
|
|
21
23
|
],
|
|
22
24
|
)
|
|
@@ -28,6 +30,7 @@ ContentChunk = Annotated[
|
|
|
28
30
|
Annotated[DocumentURLChunk, Tag("document_url")],
|
|
29
31
|
Annotated[TextChunk, Tag("text")],
|
|
30
32
|
Annotated[ReferenceChunk, Tag("reference")],
|
|
33
|
+
Annotated[FileChunk, Tag("file")],
|
|
31
34
|
],
|
|
32
35
|
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
33
36
|
]
|