mistralai 1.8.1__py3-none-any.whl → 1.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. mistralai/_hooks/types.py +7 -0
  2. mistralai/_version.py +3 -3
  3. mistralai/agents.py +16 -0
  4. mistralai/basesdk.py +12 -20
  5. mistralai/beta.py +1 -1
  6. mistralai/chat.py +16 -0
  7. mistralai/classifiers.py +8 -0
  8. mistralai/conversations.py +35 -15
  9. mistralai/embeddings.py +2 -0
  10. mistralai/extra/run/context.py +2 -4
  11. mistralai/files.py +12 -0
  12. mistralai/fim.py +4 -0
  13. mistralai/httpclient.py +6 -16
  14. mistralai/jobs.py +10 -0
  15. mistralai/mistral_agents.py +10 -0
  16. mistralai/mistral_jobs.py +8 -0
  17. mistralai/models/__init__.py +1357 -722
  18. mistralai/models/agent.py +1 -1
  19. mistralai/models/agentconversation.py +1 -1
  20. mistralai/models/agentcreationrequest.py +1 -1
  21. mistralai/models/agenthandoffentry.py +1 -1
  22. mistralai/models/agents_api_v1_conversations_getop.py +2 -0
  23. mistralai/models/agents_api_v1_conversations_historyop.py +2 -0
  24. mistralai/models/agents_api_v1_conversations_messagesop.py +2 -0
  25. mistralai/models/agents_api_v1_conversations_restart_streamop.py +2 -0
  26. mistralai/models/agents_api_v1_conversations_restartop.py +2 -0
  27. mistralai/models/agentscompletionrequest.py +13 -3
  28. mistralai/models/agentscompletionstreamrequest.py +13 -3
  29. mistralai/models/agentupdaterequest.py +1 -1
  30. mistralai/models/assistantmessage.py +1 -1
  31. mistralai/models/basemodelcard.py +8 -6
  32. mistralai/models/batchjobin.py +1 -1
  33. mistralai/models/batchjobout.py +1 -1
  34. mistralai/models/chatcompletionrequest.py +20 -3
  35. mistralai/models/chatcompletionstreamrequest.py +20 -3
  36. mistralai/models/classifierdetailedjobout.py +1 -1
  37. mistralai/models/classifierftmodelout.py +1 -1
  38. mistralai/models/classifierjobout.py +1 -1
  39. mistralai/models/classifiertargetin.py +1 -1
  40. mistralai/models/classifiertrainingparameters.py +1 -1
  41. mistralai/models/classifiertrainingparametersin.py +1 -1
  42. mistralai/models/completionargs.py +1 -1
  43. mistralai/models/completiondetailedjobout.py +1 -1
  44. mistralai/models/completionftmodelout.py +1 -1
  45. mistralai/models/completionjobout.py +1 -1
  46. mistralai/models/completionresponsestreamchoice.py +1 -1
  47. mistralai/models/completiontrainingparameters.py +1 -1
  48. mistralai/models/completiontrainingparametersin.py +1 -1
  49. mistralai/models/contentchunk.py +3 -0
  50. mistralai/models/conversationrequest.py +1 -1
  51. mistralai/models/conversationstreamrequest.py +1 -1
  52. mistralai/models/conversationusageinfo.py +1 -1
  53. mistralai/models/deltamessage.py +1 -1
  54. mistralai/models/documenturlchunk.py +1 -1
  55. mistralai/models/embeddingrequest.py +1 -1
  56. mistralai/models/eventout.py +1 -1
  57. mistralai/models/filechunk.py +23 -0
  58. mistralai/models/files_api_routes_list_filesop.py +1 -1
  59. mistralai/models/fileschema.py +1 -1
  60. mistralai/models/fimcompletionrequest.py +1 -1
  61. mistralai/models/fimcompletionstreamrequest.py +1 -1
  62. mistralai/models/ftmodelcard.py +9 -6
  63. mistralai/models/functioncallentry.py +1 -1
  64. mistralai/models/functionresultentry.py +1 -1
  65. mistralai/models/githubrepositoryin.py +1 -1
  66. mistralai/models/githubrepositoryout.py +1 -1
  67. mistralai/models/imageurl.py +1 -1
  68. mistralai/models/inputentries.py +21 -2
  69. mistralai/models/jobin.py +1 -1
  70. mistralai/models/jobmetadataout.py +1 -1
  71. mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +1 -1
  72. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +1 -1
  73. mistralai/models/jsonschema.py +1 -1
  74. mistralai/models/legacyjobmetadataout.py +1 -1
  75. mistralai/models/messageinputentry.py +4 -4
  76. mistralai/models/messageoutputentry.py +1 -1
  77. mistralai/models/messageoutputevent.py +1 -1
  78. mistralai/models/metricout.py +1 -1
  79. mistralai/models/mistralpromptmode.py +8 -0
  80. mistralai/models/modelcapabilities.py +3 -0
  81. mistralai/models/modelconversation.py +1 -1
  82. mistralai/models/ocrimageobject.py +1 -1
  83. mistralai/models/ocrpageobject.py +1 -1
  84. mistralai/models/ocrrequest.py +5 -3
  85. mistralai/models/ocrresponse.py +1 -1
  86. mistralai/models/ocrusageinfo.py +1 -1
  87. mistralai/models/responseformat.py +1 -1
  88. mistralai/models/retrievefileout.py +1 -1
  89. mistralai/models/toolexecutionentry.py +1 -1
  90. mistralai/models/toolfilechunk.py +1 -1
  91. mistralai/models/toolmessage.py +1 -1
  92. mistralai/models/toolreferencechunk.py +1 -1
  93. mistralai/models/updateftmodelin.py +1 -1
  94. mistralai/models/uploadfileout.py +1 -1
  95. mistralai/models/usermessage.py +1 -1
  96. mistralai/models/wandbintegration.py +1 -1
  97. mistralai/models/wandbintegrationout.py +1 -1
  98. mistralai/models_.py +14 -2
  99. mistralai/ocr.py +2 -0
  100. mistralai/sdk.py +68 -40
  101. mistralai/sdkconfiguration.py +0 -7
  102. mistralai/types/basemodel.py +3 -3
  103. mistralai/utils/__init__.py +131 -45
  104. mistralai/utils/datetimes.py +23 -0
  105. mistralai/utils/enums.py +67 -27
  106. mistralai/utils/forms.py +49 -28
  107. mistralai/utils/serializers.py +32 -3
  108. {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/METADATA +13 -6
  109. {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/RECORD +111 -108
  110. {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/LICENSE +0 -0
  111. {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/WHEEL +0 -0
mistralai/models/agent.py CHANGED
@@ -108,7 +108,7 @@ class Agent(BaseModel):
108
108
 
109
109
  m = {}
110
110
 
111
- for n, f in self.model_fields.items():
111
+ for n, f in type(self).model_fields.items():
112
112
  k = f.alias or n
113
113
  val = serialized.get(k)
114
114
  serialized.pop(k, None)
@@ -50,7 +50,7 @@ class AgentConversation(BaseModel):
50
50
 
51
51
  m = {}
52
52
 
53
- for n, f in self.model_fields.items():
53
+ for n, f in type(self).model_fields.items():
54
54
  k = f.alias or n
55
55
  val = serialized.get(k)
56
56
  serialized.pop(k, None)
@@ -88,7 +88,7 @@ class AgentCreationRequest(BaseModel):
88
88
 
89
89
  m = {}
90
90
 
91
- for n, f in self.model_fields.items():
91
+ for n, f in type(self).model_fields.items():
92
92
  k = f.alias or n
93
93
  val = serialized.get(k)
94
94
  serialized.pop(k, None)
@@ -54,7 +54,7 @@ class AgentHandoffEntry(BaseModel):
54
54
 
55
55
  m = {}
56
56
 
57
- for n, f in self.model_fields.items():
57
+ for n, f in type(self).model_fields.items():
58
58
  k = f.alias or n
59
59
  val = serialized.get(k)
60
60
  serialized.pop(k, None)
@@ -11,12 +11,14 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict
11
11
 
12
12
  class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict):
13
13
  conversation_id: str
14
+ r"""ID of the conversation from which we are fetching metadata."""
14
15
 
15
16
 
16
17
  class AgentsAPIV1ConversationsGetRequest(BaseModel):
17
18
  conversation_id: Annotated[
18
19
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
19
20
  ]
21
+ r"""ID of the conversation from which we are fetching metadata."""
20
22
 
21
23
 
22
24
  AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType(
@@ -8,9 +8,11 @@ from typing_extensions import Annotated, TypedDict
8
8
 
9
9
  class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict):
10
10
  conversation_id: str
11
+ r"""ID of the conversation from which we are fetching entries."""
11
12
 
12
13
 
13
14
  class AgentsAPIV1ConversationsHistoryRequest(BaseModel):
14
15
  conversation_id: Annotated[
15
16
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
16
17
  ]
18
+ r"""ID of the conversation from which we are fetching entries."""
@@ -8,9 +8,11 @@ from typing_extensions import Annotated, TypedDict
8
8
 
9
9
  class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict):
10
10
  conversation_id: str
11
+ r"""ID of the conversation from which we are fetching messages."""
11
12
 
12
13
 
13
14
  class AgentsAPIV1ConversationsMessagesRequest(BaseModel):
14
15
  conversation_id: Annotated[
15
16
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
16
17
  ]
18
+ r"""ID of the conversation from which we are fetching messages."""
@@ -12,6 +12,7 @@ from typing_extensions import Annotated, TypedDict
12
12
 
13
13
  class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict):
14
14
  conversation_id: str
15
+ r"""ID of the original conversation which is being restarted."""
15
16
  conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict
16
17
 
17
18
 
@@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel):
19
20
  conversation_id: Annotated[
20
21
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
21
22
  ]
23
+ r"""ID of the original conversation which is being restarted."""
22
24
 
23
25
  conversation_restart_stream_request: Annotated[
24
26
  ConversationRestartStreamRequest,
@@ -12,6 +12,7 @@ from typing_extensions import Annotated, TypedDict
12
12
 
13
13
  class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict):
14
14
  conversation_id: str
15
+ r"""ID of the original conversation which is being restarted."""
15
16
  conversation_restart_request: ConversationRestartRequestTypedDict
16
17
 
17
18
 
@@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartRequest(BaseModel):
19
20
  conversation_id: Annotated[
20
21
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
21
22
  ]
23
+ r"""ID of the original conversation which is being restarted."""
22
24
 
23
25
  conversation_restart_request: Annotated[
24
26
  ConversationRestartRequest,
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .mistralpromptmode import MistralPromptMode
5
6
  from .prediction import Prediction, PredictionTypedDict
6
7
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
7
8
  from .systemmessage import SystemMessage, SystemMessageTypedDict
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
11
12
  from .toolmessage import ToolMessage, ToolMessageTypedDict
12
13
  from .usermessage import UserMessage, UserMessageTypedDict
13
14
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
14
- from mistralai.utils import get_discriminator
15
+ from mistralai.utils import get_discriminator, validate_open_enum
15
16
  from pydantic import Discriminator, Tag, model_serializer
17
+ from pydantic.functional_validators import PlainValidator
16
18
  from typing import List, Optional, Union
17
19
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
18
20
 
@@ -86,6 +88,8 @@ class AgentsCompletionRequestTypedDict(TypedDict):
86
88
  r"""Number of completions to return for each request, input tokens are only billed once."""
87
89
  prediction: NotRequired[PredictionTypedDict]
88
90
  parallel_tool_calls: NotRequired[bool]
91
+ prompt_mode: NotRequired[Nullable[MistralPromptMode]]
92
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
89
93
 
90
94
 
91
95
  class AgentsCompletionRequest(BaseModel):
@@ -126,6 +130,11 @@ class AgentsCompletionRequest(BaseModel):
126
130
 
127
131
  parallel_tool_calls: Optional[bool] = None
128
132
 
133
+ prompt_mode: Annotated[
134
+ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
135
+ ] = UNSET
136
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
137
+
129
138
  @model_serializer(mode="wrap")
130
139
  def serialize_model(self, handler):
131
140
  optional_fields = [
@@ -141,15 +150,16 @@ class AgentsCompletionRequest(BaseModel):
141
150
  "n",
142
151
  "prediction",
143
152
  "parallel_tool_calls",
153
+ "prompt_mode",
144
154
  ]
145
- nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
155
+ nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"]
146
156
  null_default_fields = []
147
157
 
148
158
  serialized = handler(self)
149
159
 
150
160
  m = {}
151
161
 
152
- for n, f in self.model_fields.items():
162
+ for n, f in type(self).model_fields.items():
153
163
  k = f.alias or n
154
164
  val = serialized.get(k)
155
165
  serialized.pop(k, None)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .mistralpromptmode import MistralPromptMode
5
6
  from .prediction import Prediction, PredictionTypedDict
6
7
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
7
8
  from .systemmessage import SystemMessage, SystemMessageTypedDict
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
11
12
  from .toolmessage import ToolMessage, ToolMessageTypedDict
12
13
  from .usermessage import UserMessage, UserMessageTypedDict
13
14
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
14
- from mistralai.utils import get_discriminator
15
+ from mistralai.utils import get_discriminator, validate_open_enum
15
16
  from pydantic import Discriminator, Tag, model_serializer
17
+ from pydantic.functional_validators import PlainValidator
16
18
  from typing import List, Optional, Union
17
19
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
18
20
 
@@ -85,6 +87,8 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
85
87
  r"""Number of completions to return for each request, input tokens are only billed once."""
86
88
  prediction: NotRequired[PredictionTypedDict]
87
89
  parallel_tool_calls: NotRequired[bool]
90
+ prompt_mode: NotRequired[Nullable[MistralPromptMode]]
91
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
88
92
 
89
93
 
90
94
  class AgentsCompletionStreamRequest(BaseModel):
@@ -124,6 +128,11 @@ class AgentsCompletionStreamRequest(BaseModel):
124
128
 
125
129
  parallel_tool_calls: Optional[bool] = None
126
130
 
131
+ prompt_mode: Annotated[
132
+ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
133
+ ] = UNSET
134
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
135
+
127
136
  @model_serializer(mode="wrap")
128
137
  def serialize_model(self, handler):
129
138
  optional_fields = [
@@ -139,15 +148,16 @@ class AgentsCompletionStreamRequest(BaseModel):
139
148
  "n",
140
149
  "prediction",
141
150
  "parallel_tool_calls",
151
+ "prompt_mode",
142
152
  ]
143
- nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
153
+ nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"]
144
154
  null_default_fields = []
145
155
 
146
156
  serialized = handler(self)
147
157
 
148
158
  m = {}
149
159
 
150
- for n, f in self.model_fields.items():
160
+ for n, f in type(self).model_fields.items():
151
161
  k = f.alias or n
152
162
  val = serialized.get(k)
153
163
  serialized.pop(k, None)
@@ -90,7 +90,7 @@ class AgentUpdateRequest(BaseModel):
90
90
 
91
91
  m = {}
92
92
 
93
- for n, f in self.model_fields.items():
93
+ for n, f in type(self).model_fields.items():
94
94
  k = f.alias or n
95
95
  val = serialized.get(k)
96
96
  serialized.pop(k, None)
@@ -50,7 +50,7 @@ class AssistantMessage(BaseModel):
50
50
 
51
51
  m = {}
52
52
 
53
- for n, f in self.model_fields.items():
53
+ for n, f in type(self).model_fields.items():
54
54
  k = f.alias or n
55
55
  val = serialized.get(k)
56
56
  serialized.pop(k, None)
@@ -12,9 +12,6 @@ from typing import List, Literal, Optional
12
12
  from typing_extensions import Annotated, NotRequired, TypedDict
13
13
 
14
14
 
15
- Type = Literal["base"]
16
-
17
-
18
15
  class BaseModelCardTypedDict(TypedDict):
19
16
  id: str
20
17
  capabilities: ModelCapabilitiesTypedDict
@@ -26,8 +23,9 @@ class BaseModelCardTypedDict(TypedDict):
26
23
  max_context_length: NotRequired[int]
27
24
  aliases: NotRequired[List[str]]
28
25
  deprecation: NotRequired[Nullable[datetime]]
26
+ deprecation_replacement_model: NotRequired[Nullable[str]]
29
27
  default_model_temperature: NotRequired[Nullable[float]]
30
- type: Type
28
+ type: Literal["base"]
31
29
 
32
30
 
33
31
  class BaseModelCard(BaseModel):
@@ -51,10 +49,12 @@ class BaseModelCard(BaseModel):
51
49
 
52
50
  deprecation: OptionalNullable[datetime] = UNSET
53
51
 
52
+ deprecation_replacement_model: OptionalNullable[str] = UNSET
53
+
54
54
  default_model_temperature: OptionalNullable[float] = UNSET
55
55
 
56
56
  TYPE: Annotated[
57
- Annotated[Optional[Type], AfterValidator(validate_const("base"))],
57
+ Annotated[Optional[Literal["base"]], AfterValidator(validate_const("base"))],
58
58
  pydantic.Field(alias="type"),
59
59
  ] = "base"
60
60
 
@@ -69,6 +69,7 @@ class BaseModelCard(BaseModel):
69
69
  "max_context_length",
70
70
  "aliases",
71
71
  "deprecation",
72
+ "deprecation_replacement_model",
72
73
  "default_model_temperature",
73
74
  "type",
74
75
  ]
@@ -76,6 +77,7 @@ class BaseModelCard(BaseModel):
76
77
  "name",
77
78
  "description",
78
79
  "deprecation",
80
+ "deprecation_replacement_model",
79
81
  "default_model_temperature",
80
82
  ]
81
83
  null_default_fields = []
@@ -84,7 +86,7 @@ class BaseModelCard(BaseModel):
84
86
 
85
87
  m = {}
86
88
 
87
- for n, f in self.model_fields.items():
89
+ for n, f in type(self).model_fields.items():
88
90
  k = f.alias or n
89
91
  val = serialized.get(k)
90
92
  serialized.pop(k, None)
@@ -39,7 +39,7 @@ class BatchJobIn(BaseModel):
39
39
 
40
40
  m = {}
41
41
 
42
- for n, f in self.model_fields.items():
42
+ for n, f in type(self).model_fields.items():
43
43
  k = f.alias or n
44
44
  val = serialized.get(k)
45
45
  serialized.pop(k, None)
@@ -90,7 +90,7 @@ class BatchJobOut(BaseModel):
90
90
 
91
91
  m = {}
92
92
 
93
- for n, f in self.model_fields.items():
93
+ for n, f in type(self).model_fields.items():
94
94
  k = f.alias or n
95
95
  val = serialized.get(k)
96
96
  serialized.pop(k, None)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .mistralpromptmode import MistralPromptMode
5
6
  from .prediction import Prediction, PredictionTypedDict
6
7
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
7
8
  from .systemmessage import SystemMessage, SystemMessageTypedDict
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
11
12
  from .toolmessage import ToolMessage, ToolMessageTypedDict
12
13
  from .usermessage import UserMessage, UserMessageTypedDict
13
14
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
14
- from mistralai.utils import get_discriminator
15
+ from mistralai.utils import get_discriminator, validate_open_enum
15
16
  from pydantic import Discriminator, Tag, model_serializer
17
+ from pydantic.functional_validators import PlainValidator
16
18
  from typing import List, Optional, Union
17
19
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
18
20
 
@@ -86,6 +88,8 @@ class ChatCompletionRequestTypedDict(TypedDict):
86
88
  r"""Number of completions to return for each request, input tokens are only billed once."""
87
89
  prediction: NotRequired[PredictionTypedDict]
88
90
  parallel_tool_calls: NotRequired[bool]
91
+ prompt_mode: NotRequired[Nullable[MistralPromptMode]]
92
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
89
93
  safe_prompt: NotRequired[bool]
90
94
  r"""Whether to inject a safety prompt before all conversations."""
91
95
 
@@ -134,6 +138,11 @@ class ChatCompletionRequest(BaseModel):
134
138
 
135
139
  parallel_tool_calls: Optional[bool] = None
136
140
 
141
+ prompt_mode: Annotated[
142
+ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
143
+ ] = UNSET
144
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
145
+
137
146
  safe_prompt: Optional[bool] = None
138
147
  r"""Whether to inject a safety prompt before all conversations."""
139
148
 
@@ -154,16 +163,24 @@ class ChatCompletionRequest(BaseModel):
154
163
  "n",
155
164
  "prediction",
156
165
  "parallel_tool_calls",
166
+ "prompt_mode",
157
167
  "safe_prompt",
158
168
  ]
159
- nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
169
+ nullable_fields = [
170
+ "temperature",
171
+ "max_tokens",
172
+ "random_seed",
173
+ "tools",
174
+ "n",
175
+ "prompt_mode",
176
+ ]
160
177
  null_default_fields = []
161
178
 
162
179
  serialized = handler(self)
163
180
 
164
181
  m = {}
165
182
 
166
- for n, f in self.model_fields.items():
183
+ for n, f in type(self).model_fields.items():
167
184
  k = f.alias or n
168
185
  val = serialized.get(k)
169
186
  serialized.pop(k, None)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .mistralpromptmode import MistralPromptMode
5
6
  from .prediction import Prediction, PredictionTypedDict
6
7
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
7
8
  from .systemmessage import SystemMessage, SystemMessageTypedDict
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
11
12
  from .toolmessage import ToolMessage, ToolMessageTypedDict
12
13
  from .usermessage import UserMessage, UserMessageTypedDict
13
14
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
14
- from mistralai.utils import get_discriminator
15
+ from mistralai.utils import get_discriminator, validate_open_enum
15
16
  from pydantic import Discriminator, Tag, model_serializer
17
+ from pydantic.functional_validators import PlainValidator
16
18
  from typing import List, Optional, Union
17
19
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
18
20
 
@@ -89,6 +91,8 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
89
91
  r"""Number of completions to return for each request, input tokens are only billed once."""
90
92
  prediction: NotRequired[PredictionTypedDict]
91
93
  parallel_tool_calls: NotRequired[bool]
94
+ prompt_mode: NotRequired[Nullable[MistralPromptMode]]
95
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
92
96
  safe_prompt: NotRequired[bool]
93
97
  r"""Whether to inject a safety prompt before all conversations."""
94
98
 
@@ -136,6 +140,11 @@ class ChatCompletionStreamRequest(BaseModel):
136
140
 
137
141
  parallel_tool_calls: Optional[bool] = None
138
142
 
143
+ prompt_mode: Annotated[
144
+ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
145
+ ] = UNSET
146
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
147
+
139
148
  safe_prompt: Optional[bool] = None
140
149
  r"""Whether to inject a safety prompt before all conversations."""
141
150
 
@@ -156,16 +165,24 @@ class ChatCompletionStreamRequest(BaseModel):
156
165
  "n",
157
166
  "prediction",
158
167
  "parallel_tool_calls",
168
+ "prompt_mode",
159
169
  "safe_prompt",
160
170
  ]
161
- nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
171
+ nullable_fields = [
172
+ "temperature",
173
+ "max_tokens",
174
+ "random_seed",
175
+ "tools",
176
+ "n",
177
+ "prompt_mode",
178
+ ]
162
179
  null_default_fields = []
163
180
 
164
181
  serialized = handler(self)
165
182
 
166
183
  m = {}
167
184
 
168
- for n, f in self.model_fields.items():
185
+ for n, f in type(self).model_fields.items():
169
186
  k = f.alias or n
170
187
  val = serialized.get(k)
171
188
  serialized.pop(k, None)
@@ -135,7 +135,7 @@ class ClassifierDetailedJobOut(BaseModel):
135
135
 
136
136
  m = {}
137
137
 
138
- for n, f in self.model_fields.items():
138
+ for n, f in type(self).model_fields.items():
139
139
  k = f.alias or n
140
140
  val = serialized.get(k)
141
141
  serialized.pop(k, None)
@@ -80,7 +80,7 @@ class ClassifierFTModelOut(BaseModel):
80
80
 
81
81
  m = {}
82
82
 
83
- for n, f in self.model_fields.items():
83
+ for n, f in type(self).model_fields.items():
84
84
  k = f.alias or n
85
85
  val = serialized.get(k)
86
86
  serialized.pop(k, None)
@@ -144,7 +144,7 @@ class ClassifierJobOut(BaseModel):
144
144
 
145
145
  m = {}
146
146
 
147
- for n, f in self.model_fields.items():
147
+ for n, f in type(self).model_fields.items():
148
148
  k = f.alias or n
149
149
  val = serialized.get(k)
150
150
  serialized.pop(k, None)
@@ -34,7 +34,7 @@ class ClassifierTargetIn(BaseModel):
34
34
 
35
35
  m = {}
36
36
 
37
- for n, f in self.model_fields.items():
37
+ for n, f in type(self).model_fields.items():
38
38
  k = f.alias or n
39
39
  val = serialized.get(k)
40
40
  serialized.pop(k, None)
@@ -52,7 +52,7 @@ class ClassifierTrainingParameters(BaseModel):
52
52
 
53
53
  m = {}
54
54
 
55
- for n, f in self.model_fields.items():
55
+ for n, f in type(self).model_fields.items():
56
56
  k = f.alias or n
57
57
  val = serialized.get(k)
58
58
  serialized.pop(k, None)
@@ -64,7 +64,7 @@ class ClassifierTrainingParametersIn(BaseModel):
64
64
 
65
65
  m = {}
66
66
 
67
- for n, f in self.model_fields.items():
67
+ for n, f in type(self).model_fields.items():
68
68
  k = f.alias or n
69
69
  val = serialized.get(k)
70
70
  serialized.pop(k, None)
@@ -79,7 +79,7 @@ class CompletionArgs(BaseModel):
79
79
 
80
80
  m = {}
81
81
 
82
- for n, f in self.model_fields.items():
82
+ for n, f in type(self).model_fields.items():
83
83
  k = f.alias or n
84
84
  val = serialized.get(k)
85
85
  serialized.pop(k, None)
@@ -141,7 +141,7 @@ class CompletionDetailedJobOut(BaseModel):
141
141
 
142
142
  m = {}
143
143
 
144
- for n, f in self.model_fields.items():
144
+ for n, f in type(self).model_fields.items():
145
145
  k = f.alias or n
146
146
  val = serialized.get(k)
147
147
  serialized.pop(k, None)
@@ -76,7 +76,7 @@ class CompletionFTModelOut(BaseModel):
76
76
 
77
77
  m = {}
78
78
 
79
- for n, f in self.model_fields.items():
79
+ for n, f in type(self).model_fields.items():
80
80
  k = f.alias or n
81
81
  val = serialized.get(k)
82
82
  serialized.pop(k, None)
@@ -154,7 +154,7 @@ class CompletionJobOut(BaseModel):
154
154
 
155
155
  m = {}
156
156
 
157
- for n, f in self.model_fields.items():
157
+ for n, f in type(self).model_fields.items():
158
158
  k = f.alias or n
159
159
  val = serialized.get(k)
160
160
  serialized.pop(k, None)
@@ -41,7 +41,7 @@ class CompletionResponseStreamChoice(BaseModel):
41
41
 
42
42
  m = {}
43
43
 
44
- for n, f in self.model_fields.items():
44
+ for n, f in type(self).model_fields.items():
45
45
  k = f.alias or n
46
46
  val = serialized.get(k)
47
47
  serialized.pop(k, None)
@@ -57,7 +57,7 @@ class CompletionTrainingParameters(BaseModel):
57
57
 
58
58
  m = {}
59
59
 
60
- for n, f in self.model_fields.items():
60
+ for n, f in type(self).model_fields.items():
61
61
  k = f.alias or n
62
62
  val = serialized.get(k)
63
63
  serialized.pop(k, None)
@@ -69,7 +69,7 @@ class CompletionTrainingParametersIn(BaseModel):
69
69
 
70
70
  m = {}
71
71
 
72
- for n, f in self.model_fields.items():
72
+ for n, f in type(self).model_fields.items():
73
73
  k = f.alias or n
74
74
  val = serialized.get(k)
75
75
  serialized.pop(k, None)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict
5
+ from .filechunk import FileChunk, FileChunkTypedDict
5
6
  from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict
6
7
  from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict
7
8
  from .textchunk import TextChunk, TextChunkTypedDict
@@ -17,6 +18,7 @@ ContentChunkTypedDict = TypeAliasType(
17
18
  TextChunkTypedDict,
18
19
  ImageURLChunkTypedDict,
19
20
  ReferenceChunkTypedDict,
21
+ FileChunkTypedDict,
20
22
  DocumentURLChunkTypedDict,
21
23
  ],
22
24
  )
@@ -28,6 +30,7 @@ ContentChunk = Annotated[
28
30
  Annotated[DocumentURLChunk, Tag("document_url")],
29
31
  Annotated[TextChunk, Tag("text")],
30
32
  Annotated[ReferenceChunk, Tag("reference")],
33
+ Annotated[FileChunk, Tag("file")],
31
34
  ],
32
35
  Discriminator(lambda m: get_discriminator(m, "type", "type")),
33
36
  ]
@@ -112,7 +112,7 @@ class ConversationRequest(BaseModel):
112
112
 
113
113
  m = {}
114
114
 
115
- for n, f in self.model_fields.items():
115
+ for n, f in type(self).model_fields.items():
116
116
  k = f.alias or n
117
117
  val = serialized.get(k)
118
118
  serialized.pop(k, None)
@@ -114,7 +114,7 @@ class ConversationStreamRequest(BaseModel):
114
114
 
115
115
  m = {}
116
116
 
117
- for n, f in self.model_fields.items():
117
+ for n, f in type(self).model_fields.items():
118
118
  k = f.alias or n
119
119
  val = serialized.get(k)
120
120
  serialized.pop(k, None)
@@ -42,7 +42,7 @@ class ConversationUsageInfo(BaseModel):
42
42
 
43
43
  m = {}
44
44
 
45
- for n, f in self.model_fields.items():
45
+ for n, f in type(self).model_fields.items():
46
46
  k = f.alias or n
47
47
  val = serialized.get(k)
48
48
  serialized.pop(k, None)