mistralai 1.8.2__py3-none-any.whl → 1.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. mistralai/_hooks/types.py +7 -0
  2. mistralai/_version.py +3 -3
  3. mistralai/agents.py +8 -4
  4. mistralai/basesdk.py +12 -20
  5. mistralai/chat.py +8 -4
  6. mistralai/classifiers.py +8 -0
  7. mistralai/conversations.py +34 -14
  8. mistralai/embeddings.py +2 -0
  9. mistralai/extra/run/context.py +2 -4
  10. mistralai/files.py +12 -0
  11. mistralai/fim.py +4 -0
  12. mistralai/httpclient.py +6 -16
  13. mistralai/jobs.py +10 -0
  14. mistralai/mistral_agents.py +10 -0
  15. mistralai/mistral_jobs.py +8 -0
  16. mistralai/models/__init__.py +1356 -723
  17. mistralai/models/agent.py +1 -1
  18. mistralai/models/agentconversation.py +1 -1
  19. mistralai/models/agentcreationrequest.py +1 -1
  20. mistralai/models/agenthandoffentry.py +1 -1
  21. mistralai/models/agents_api_v1_conversations_getop.py +2 -0
  22. mistralai/models/agents_api_v1_conversations_historyop.py +2 -0
  23. mistralai/models/agents_api_v1_conversations_messagesop.py +2 -0
  24. mistralai/models/agents_api_v1_conversations_restart_streamop.py +2 -0
  25. mistralai/models/agents_api_v1_conversations_restartop.py +2 -0
  26. mistralai/models/agentscompletionrequest.py +3 -1
  27. mistralai/models/agentscompletionstreamrequest.py +3 -1
  28. mistralai/models/agentupdaterequest.py +1 -1
  29. mistralai/models/assistantmessage.py +1 -1
  30. mistralai/models/basemodelcard.py +8 -6
  31. mistralai/models/batchjobin.py +1 -1
  32. mistralai/models/batchjobout.py +1 -1
  33. mistralai/models/chatcompletionrequest.py +3 -1
  34. mistralai/models/chatcompletionstreamrequest.py +3 -1
  35. mistralai/models/classifierdetailedjobout.py +1 -1
  36. mistralai/models/classifierftmodelout.py +1 -1
  37. mistralai/models/classifierjobout.py +1 -1
  38. mistralai/models/classifiertargetin.py +1 -1
  39. mistralai/models/classifiertrainingparameters.py +1 -1
  40. mistralai/models/classifiertrainingparametersin.py +1 -1
  41. mistralai/models/completionargs.py +1 -1
  42. mistralai/models/completiondetailedjobout.py +1 -1
  43. mistralai/models/completionftmodelout.py +1 -1
  44. mistralai/models/completionjobout.py +1 -1
  45. mistralai/models/completionresponsestreamchoice.py +1 -1
  46. mistralai/models/completiontrainingparameters.py +1 -1
  47. mistralai/models/completiontrainingparametersin.py +1 -1
  48. mistralai/models/contentchunk.py +3 -0
  49. mistralai/models/conversationrequest.py +1 -1
  50. mistralai/models/conversationstreamrequest.py +1 -1
  51. mistralai/models/conversationusageinfo.py +1 -1
  52. mistralai/models/deltamessage.py +1 -1
  53. mistralai/models/documenturlchunk.py +1 -1
  54. mistralai/models/embeddingrequest.py +1 -1
  55. mistralai/models/eventout.py +1 -1
  56. mistralai/models/filechunk.py +23 -0
  57. mistralai/models/files_api_routes_list_filesop.py +1 -1
  58. mistralai/models/fileschema.py +1 -1
  59. mistralai/models/fimcompletionrequest.py +1 -1
  60. mistralai/models/fimcompletionstreamrequest.py +1 -1
  61. mistralai/models/ftmodelcard.py +9 -6
  62. mistralai/models/functioncallentry.py +1 -1
  63. mistralai/models/functionresultentry.py +1 -1
  64. mistralai/models/githubrepositoryin.py +1 -1
  65. mistralai/models/githubrepositoryout.py +1 -1
  66. mistralai/models/imageurl.py +1 -1
  67. mistralai/models/inputentries.py +21 -2
  68. mistralai/models/jobin.py +1 -1
  69. mistralai/models/jobmetadataout.py +1 -1
  70. mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +1 -1
  71. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +1 -1
  72. mistralai/models/jsonschema.py +1 -1
  73. mistralai/models/legacyjobmetadataout.py +1 -1
  74. mistralai/models/messageinputentry.py +4 -4
  75. mistralai/models/messageoutputentry.py +1 -1
  76. mistralai/models/messageoutputevent.py +1 -1
  77. mistralai/models/metricout.py +1 -1
  78. mistralai/models/modelcapabilities.py +3 -0
  79. mistralai/models/modelconversation.py +1 -1
  80. mistralai/models/ocrimageobject.py +1 -1
  81. mistralai/models/ocrpageobject.py +1 -1
  82. mistralai/models/ocrrequest.py +5 -3
  83. mistralai/models/ocrresponse.py +1 -1
  84. mistralai/models/ocrusageinfo.py +1 -1
  85. mistralai/models/responseformat.py +1 -1
  86. mistralai/models/retrievefileout.py +1 -1
  87. mistralai/models/toolexecutionentry.py +1 -1
  88. mistralai/models/toolfilechunk.py +1 -1
  89. mistralai/models/toolmessage.py +1 -1
  90. mistralai/models/toolreferencechunk.py +1 -1
  91. mistralai/models/updateftmodelin.py +1 -1
  92. mistralai/models/uploadfileout.py +1 -1
  93. mistralai/models/usermessage.py +1 -1
  94. mistralai/models/wandbintegration.py +1 -1
  95. mistralai/models/wandbintegrationout.py +1 -1
  96. mistralai/models_.py +14 -2
  97. mistralai/ocr.py +2 -0
  98. mistralai/sdk.py +68 -40
  99. mistralai/sdkconfiguration.py +0 -7
  100. mistralai/types/basemodel.py +3 -3
  101. mistralai/utils/__init__.py +131 -45
  102. mistralai/utils/datetimes.py +23 -0
  103. mistralai/utils/enums.py +67 -27
  104. mistralai/utils/forms.py +49 -28
  105. mistralai/utils/serializers.py +32 -3
  106. {mistralai-1.8.2.dist-info → mistralai-1.9.1.dist-info}/METADATA +13 -6
  107. {mistralai-1.8.2.dist-info → mistralai-1.9.1.dist-info}/RECORD +109 -107
  108. {mistralai-1.8.2.dist-info → mistralai-1.9.1.dist-info}/LICENSE +0 -0
  109. {mistralai-1.8.2.dist-info → mistralai-1.9.1.dist-info}/WHEEL +0 -0
mistralai/models/agent.py CHANGED
@@ -108,7 +108,7 @@ class Agent(BaseModel):
108
108
 
109
109
  m = {}
110
110
 
111
- for n, f in self.model_fields.items():
111
+ for n, f in type(self).model_fields.items():
112
112
  k = f.alias or n
113
113
  val = serialized.get(k)
114
114
  serialized.pop(k, None)
@@ -50,7 +50,7 @@ class AgentConversation(BaseModel):
50
50
 
51
51
  m = {}
52
52
 
53
- for n, f in self.model_fields.items():
53
+ for n, f in type(self).model_fields.items():
54
54
  k = f.alias or n
55
55
  val = serialized.get(k)
56
56
  serialized.pop(k, None)
@@ -88,7 +88,7 @@ class AgentCreationRequest(BaseModel):
88
88
 
89
89
  m = {}
90
90
 
91
- for n, f in self.model_fields.items():
91
+ for n, f in type(self).model_fields.items():
92
92
  k = f.alias or n
93
93
  val = serialized.get(k)
94
94
  serialized.pop(k, None)
@@ -54,7 +54,7 @@ class AgentHandoffEntry(BaseModel):
54
54
 
55
55
  m = {}
56
56
 
57
- for n, f in self.model_fields.items():
57
+ for n, f in type(self).model_fields.items():
58
58
  k = f.alias or n
59
59
  val = serialized.get(k)
60
60
  serialized.pop(k, None)
@@ -11,12 +11,14 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict
11
11
 
12
12
  class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict):
13
13
  conversation_id: str
14
+ r"""ID of the conversation from which we are fetching metadata."""
14
15
 
15
16
 
16
17
  class AgentsAPIV1ConversationsGetRequest(BaseModel):
17
18
  conversation_id: Annotated[
18
19
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
19
20
  ]
21
+ r"""ID of the conversation from which we are fetching metadata."""
20
22
 
21
23
 
22
24
  AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType(
@@ -8,9 +8,11 @@ from typing_extensions import Annotated, TypedDict
8
8
 
9
9
  class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict):
10
10
  conversation_id: str
11
+ r"""ID of the conversation from which we are fetching entries."""
11
12
 
12
13
 
13
14
  class AgentsAPIV1ConversationsHistoryRequest(BaseModel):
14
15
  conversation_id: Annotated[
15
16
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
16
17
  ]
18
+ r"""ID of the conversation from which we are fetching entries."""
@@ -8,9 +8,11 @@ from typing_extensions import Annotated, TypedDict
8
8
 
9
9
  class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict):
10
10
  conversation_id: str
11
+ r"""ID of the conversation from which we are fetching messages."""
11
12
 
12
13
 
13
14
  class AgentsAPIV1ConversationsMessagesRequest(BaseModel):
14
15
  conversation_id: Annotated[
15
16
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
16
17
  ]
18
+ r"""ID of the conversation from which we are fetching messages."""
@@ -12,6 +12,7 @@ from typing_extensions import Annotated, TypedDict
12
12
 
13
13
  class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict):
14
14
  conversation_id: str
15
+ r"""ID of the original conversation which is being restarted."""
15
16
  conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict
16
17
 
17
18
 
@@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel):
19
20
  conversation_id: Annotated[
20
21
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
21
22
  ]
23
+ r"""ID of the original conversation which is being restarted."""
22
24
 
23
25
  conversation_restart_stream_request: Annotated[
24
26
  ConversationRestartStreamRequest,
@@ -12,6 +12,7 @@ from typing_extensions import Annotated, TypedDict
12
12
 
13
13
  class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict):
14
14
  conversation_id: str
15
+ r"""ID of the original conversation which is being restarted."""
15
16
  conversation_restart_request: ConversationRestartRequestTypedDict
16
17
 
17
18
 
@@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartRequest(BaseModel):
19
20
  conversation_id: Annotated[
20
21
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
21
22
  ]
23
+ r"""ID of the original conversation which is being restarted."""
22
24
 
23
25
  conversation_restart_request: Annotated[
24
26
  ConversationRestartRequest,
@@ -89,6 +89,7 @@ class AgentsCompletionRequestTypedDict(TypedDict):
89
89
  prediction: NotRequired[PredictionTypedDict]
90
90
  parallel_tool_calls: NotRequired[bool]
91
91
  prompt_mode: NotRequired[Nullable[MistralPromptMode]]
92
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
92
93
 
93
94
 
94
95
  class AgentsCompletionRequest(BaseModel):
@@ -132,6 +133,7 @@ class AgentsCompletionRequest(BaseModel):
132
133
  prompt_mode: Annotated[
133
134
  OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
134
135
  ] = UNSET
136
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
135
137
 
136
138
  @model_serializer(mode="wrap")
137
139
  def serialize_model(self, handler):
@@ -157,7 +159,7 @@ class AgentsCompletionRequest(BaseModel):
157
159
 
158
160
  m = {}
159
161
 
160
- for n, f in self.model_fields.items():
162
+ for n, f in type(self).model_fields.items():
161
163
  k = f.alias or n
162
164
  val = serialized.get(k)
163
165
  serialized.pop(k, None)
@@ -88,6 +88,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
88
88
  prediction: NotRequired[PredictionTypedDict]
89
89
  parallel_tool_calls: NotRequired[bool]
90
90
  prompt_mode: NotRequired[Nullable[MistralPromptMode]]
91
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
91
92
 
92
93
 
93
94
  class AgentsCompletionStreamRequest(BaseModel):
@@ -130,6 +131,7 @@ class AgentsCompletionStreamRequest(BaseModel):
130
131
  prompt_mode: Annotated[
131
132
  OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
132
133
  ] = UNSET
134
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
133
135
 
134
136
  @model_serializer(mode="wrap")
135
137
  def serialize_model(self, handler):
@@ -155,7 +157,7 @@ class AgentsCompletionStreamRequest(BaseModel):
155
157
 
156
158
  m = {}
157
159
 
158
- for n, f in self.model_fields.items():
160
+ for n, f in type(self).model_fields.items():
159
161
  k = f.alias or n
160
162
  val = serialized.get(k)
161
163
  serialized.pop(k, None)
@@ -90,7 +90,7 @@ class AgentUpdateRequest(BaseModel):
90
90
 
91
91
  m = {}
92
92
 
93
- for n, f in self.model_fields.items():
93
+ for n, f in type(self).model_fields.items():
94
94
  k = f.alias or n
95
95
  val = serialized.get(k)
96
96
  serialized.pop(k, None)
@@ -50,7 +50,7 @@ class AssistantMessage(BaseModel):
50
50
 
51
51
  m = {}
52
52
 
53
- for n, f in self.model_fields.items():
53
+ for n, f in type(self).model_fields.items():
54
54
  k = f.alias or n
55
55
  val = serialized.get(k)
56
56
  serialized.pop(k, None)
@@ -12,9 +12,6 @@ from typing import List, Literal, Optional
12
12
  from typing_extensions import Annotated, NotRequired, TypedDict
13
13
 
14
14
 
15
- Type = Literal["base"]
16
-
17
-
18
15
  class BaseModelCardTypedDict(TypedDict):
19
16
  id: str
20
17
  capabilities: ModelCapabilitiesTypedDict
@@ -26,8 +23,9 @@ class BaseModelCardTypedDict(TypedDict):
26
23
  max_context_length: NotRequired[int]
27
24
  aliases: NotRequired[List[str]]
28
25
  deprecation: NotRequired[Nullable[datetime]]
26
+ deprecation_replacement_model: NotRequired[Nullable[str]]
29
27
  default_model_temperature: NotRequired[Nullable[float]]
30
- type: Type
28
+ type: Literal["base"]
31
29
 
32
30
 
33
31
  class BaseModelCard(BaseModel):
@@ -51,10 +49,12 @@ class BaseModelCard(BaseModel):
51
49
 
52
50
  deprecation: OptionalNullable[datetime] = UNSET
53
51
 
52
+ deprecation_replacement_model: OptionalNullable[str] = UNSET
53
+
54
54
  default_model_temperature: OptionalNullable[float] = UNSET
55
55
 
56
56
  TYPE: Annotated[
57
- Annotated[Optional[Type], AfterValidator(validate_const("base"))],
57
+ Annotated[Optional[Literal["base"]], AfterValidator(validate_const("base"))],
58
58
  pydantic.Field(alias="type"),
59
59
  ] = "base"
60
60
 
@@ -69,6 +69,7 @@ class BaseModelCard(BaseModel):
69
69
  "max_context_length",
70
70
  "aliases",
71
71
  "deprecation",
72
+ "deprecation_replacement_model",
72
73
  "default_model_temperature",
73
74
  "type",
74
75
  ]
@@ -76,6 +77,7 @@ class BaseModelCard(BaseModel):
76
77
  "name",
77
78
  "description",
78
79
  "deprecation",
80
+ "deprecation_replacement_model",
79
81
  "default_model_temperature",
80
82
  ]
81
83
  null_default_fields = []
@@ -84,7 +86,7 @@ class BaseModelCard(BaseModel):
84
86
 
85
87
  m = {}
86
88
 
87
- for n, f in self.model_fields.items():
89
+ for n, f in type(self).model_fields.items():
88
90
  k = f.alias or n
89
91
  val = serialized.get(k)
90
92
  serialized.pop(k, None)
@@ -39,7 +39,7 @@ class BatchJobIn(BaseModel):
39
39
 
40
40
  m = {}
41
41
 
42
- for n, f in self.model_fields.items():
42
+ for n, f in type(self).model_fields.items():
43
43
  k = f.alias or n
44
44
  val = serialized.get(k)
45
45
  serialized.pop(k, None)
@@ -90,7 +90,7 @@ class BatchJobOut(BaseModel):
90
90
 
91
91
  m = {}
92
92
 
93
- for n, f in self.model_fields.items():
93
+ for n, f in type(self).model_fields.items():
94
94
  k = f.alias or n
95
95
  val = serialized.get(k)
96
96
  serialized.pop(k, None)
@@ -89,6 +89,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
89
89
  prediction: NotRequired[PredictionTypedDict]
90
90
  parallel_tool_calls: NotRequired[bool]
91
91
  prompt_mode: NotRequired[Nullable[MistralPromptMode]]
92
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
92
93
  safe_prompt: NotRequired[bool]
93
94
  r"""Whether to inject a safety prompt before all conversations."""
94
95
 
@@ -140,6 +141,7 @@ class ChatCompletionRequest(BaseModel):
140
141
  prompt_mode: Annotated[
141
142
  OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
142
143
  ] = UNSET
144
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
143
145
 
144
146
  safe_prompt: Optional[bool] = None
145
147
  r"""Whether to inject a safety prompt before all conversations."""
@@ -178,7 +180,7 @@ class ChatCompletionRequest(BaseModel):
178
180
 
179
181
  m = {}
180
182
 
181
- for n, f in self.model_fields.items():
183
+ for n, f in type(self).model_fields.items():
182
184
  k = f.alias or n
183
185
  val = serialized.get(k)
184
186
  serialized.pop(k, None)
@@ -92,6 +92,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
92
92
  prediction: NotRequired[PredictionTypedDict]
93
93
  parallel_tool_calls: NotRequired[bool]
94
94
  prompt_mode: NotRequired[Nullable[MistralPromptMode]]
95
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
95
96
  safe_prompt: NotRequired[bool]
96
97
  r"""Whether to inject a safety prompt before all conversations."""
97
98
 
@@ -142,6 +143,7 @@ class ChatCompletionStreamRequest(BaseModel):
142
143
  prompt_mode: Annotated[
143
144
  OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
144
145
  ] = UNSET
146
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
145
147
 
146
148
  safe_prompt: Optional[bool] = None
147
149
  r"""Whether to inject a safety prompt before all conversations."""
@@ -180,7 +182,7 @@ class ChatCompletionStreamRequest(BaseModel):
180
182
 
181
183
  m = {}
182
184
 
183
- for n, f in self.model_fields.items():
185
+ for n, f in type(self).model_fields.items():
184
186
  k = f.alias or n
185
187
  val = serialized.get(k)
186
188
  serialized.pop(k, None)
@@ -135,7 +135,7 @@ class ClassifierDetailedJobOut(BaseModel):
135
135
 
136
136
  m = {}
137
137
 
138
- for n, f in self.model_fields.items():
138
+ for n, f in type(self).model_fields.items():
139
139
  k = f.alias or n
140
140
  val = serialized.get(k)
141
141
  serialized.pop(k, None)
@@ -80,7 +80,7 @@ class ClassifierFTModelOut(BaseModel):
80
80
 
81
81
  m = {}
82
82
 
83
- for n, f in self.model_fields.items():
83
+ for n, f in type(self).model_fields.items():
84
84
  k = f.alias or n
85
85
  val = serialized.get(k)
86
86
  serialized.pop(k, None)
@@ -144,7 +144,7 @@ class ClassifierJobOut(BaseModel):
144
144
 
145
145
  m = {}
146
146
 
147
- for n, f in self.model_fields.items():
147
+ for n, f in type(self).model_fields.items():
148
148
  k = f.alias or n
149
149
  val = serialized.get(k)
150
150
  serialized.pop(k, None)
@@ -34,7 +34,7 @@ class ClassifierTargetIn(BaseModel):
34
34
 
35
35
  m = {}
36
36
 
37
- for n, f in self.model_fields.items():
37
+ for n, f in type(self).model_fields.items():
38
38
  k = f.alias or n
39
39
  val = serialized.get(k)
40
40
  serialized.pop(k, None)
@@ -52,7 +52,7 @@ class ClassifierTrainingParameters(BaseModel):
52
52
 
53
53
  m = {}
54
54
 
55
- for n, f in self.model_fields.items():
55
+ for n, f in type(self).model_fields.items():
56
56
  k = f.alias or n
57
57
  val = serialized.get(k)
58
58
  serialized.pop(k, None)
@@ -64,7 +64,7 @@ class ClassifierTrainingParametersIn(BaseModel):
64
64
 
65
65
  m = {}
66
66
 
67
- for n, f in self.model_fields.items():
67
+ for n, f in type(self).model_fields.items():
68
68
  k = f.alias or n
69
69
  val = serialized.get(k)
70
70
  serialized.pop(k, None)
@@ -79,7 +79,7 @@ class CompletionArgs(BaseModel):
79
79
 
80
80
  m = {}
81
81
 
82
- for n, f in self.model_fields.items():
82
+ for n, f in type(self).model_fields.items():
83
83
  k = f.alias or n
84
84
  val = serialized.get(k)
85
85
  serialized.pop(k, None)
@@ -141,7 +141,7 @@ class CompletionDetailedJobOut(BaseModel):
141
141
 
142
142
  m = {}
143
143
 
144
- for n, f in self.model_fields.items():
144
+ for n, f in type(self).model_fields.items():
145
145
  k = f.alias or n
146
146
  val = serialized.get(k)
147
147
  serialized.pop(k, None)
@@ -76,7 +76,7 @@ class CompletionFTModelOut(BaseModel):
76
76
 
77
77
  m = {}
78
78
 
79
- for n, f in self.model_fields.items():
79
+ for n, f in type(self).model_fields.items():
80
80
  k = f.alias or n
81
81
  val = serialized.get(k)
82
82
  serialized.pop(k, None)
@@ -154,7 +154,7 @@ class CompletionJobOut(BaseModel):
154
154
 
155
155
  m = {}
156
156
 
157
- for n, f in self.model_fields.items():
157
+ for n, f in type(self).model_fields.items():
158
158
  k = f.alias or n
159
159
  val = serialized.get(k)
160
160
  serialized.pop(k, None)
@@ -41,7 +41,7 @@ class CompletionResponseStreamChoice(BaseModel):
41
41
 
42
42
  m = {}
43
43
 
44
- for n, f in self.model_fields.items():
44
+ for n, f in type(self).model_fields.items():
45
45
  k = f.alias or n
46
46
  val = serialized.get(k)
47
47
  serialized.pop(k, None)
@@ -57,7 +57,7 @@ class CompletionTrainingParameters(BaseModel):
57
57
 
58
58
  m = {}
59
59
 
60
- for n, f in self.model_fields.items():
60
+ for n, f in type(self).model_fields.items():
61
61
  k = f.alias or n
62
62
  val = serialized.get(k)
63
63
  serialized.pop(k, None)
@@ -69,7 +69,7 @@ class CompletionTrainingParametersIn(BaseModel):
69
69
 
70
70
  m = {}
71
71
 
72
- for n, f in self.model_fields.items():
72
+ for n, f in type(self).model_fields.items():
73
73
  k = f.alias or n
74
74
  val = serialized.get(k)
75
75
  serialized.pop(k, None)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict
5
+ from .filechunk import FileChunk, FileChunkTypedDict
5
6
  from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict
6
7
  from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict
7
8
  from .textchunk import TextChunk, TextChunkTypedDict
@@ -17,6 +18,7 @@ ContentChunkTypedDict = TypeAliasType(
17
18
  TextChunkTypedDict,
18
19
  ImageURLChunkTypedDict,
19
20
  ReferenceChunkTypedDict,
21
+ FileChunkTypedDict,
20
22
  DocumentURLChunkTypedDict,
21
23
  ],
22
24
  )
@@ -28,6 +30,7 @@ ContentChunk = Annotated[
28
30
  Annotated[DocumentURLChunk, Tag("document_url")],
29
31
  Annotated[TextChunk, Tag("text")],
30
32
  Annotated[ReferenceChunk, Tag("reference")],
33
+ Annotated[FileChunk, Tag("file")],
31
34
  ],
32
35
  Discriminator(lambda m: get_discriminator(m, "type", "type")),
33
36
  ]
@@ -112,7 +112,7 @@ class ConversationRequest(BaseModel):
112
112
 
113
113
  m = {}
114
114
 
115
- for n, f in self.model_fields.items():
115
+ for n, f in type(self).model_fields.items():
116
116
  k = f.alias or n
117
117
  val = serialized.get(k)
118
118
  serialized.pop(k, None)
@@ -114,7 +114,7 @@ class ConversationStreamRequest(BaseModel):
114
114
 
115
115
  m = {}
116
116
 
117
- for n, f in self.model_fields.items():
117
+ for n, f in type(self).model_fields.items():
118
118
  k = f.alias or n
119
119
  val = serialized.get(k)
120
120
  serialized.pop(k, None)
@@ -42,7 +42,7 @@ class ConversationUsageInfo(BaseModel):
42
42
 
43
43
  m = {}
44
44
 
45
- for n, f in self.model_fields.items():
45
+ for n, f in type(self).model_fields.items():
46
46
  k = f.alias or n
47
47
  val = serialized.get(k)
48
48
  serialized.pop(k, None)
@@ -40,7 +40,7 @@ class DeltaMessage(BaseModel):
40
40
 
41
41
  m = {}
42
42
 
43
- for n, f in self.model_fields.items():
43
+ for n, f in type(self).model_fields.items():
44
44
  k = f.alias or n
45
45
  val = serialized.get(k)
46
46
  serialized.pop(k, None)
@@ -35,7 +35,7 @@ class DocumentURLChunk(BaseModel):
35
35
 
36
36
  m = {}
37
37
 
38
- for n, f in self.model_fields.items():
38
+ for n, f in type(self).model_fields.items():
39
39
  k = f.alias or n
40
40
  val = serialized.get(k)
41
41
  serialized.pop(k, None)
@@ -51,7 +51,7 @@ class EmbeddingRequest(BaseModel):
51
51
 
52
52
  m = {}
53
53
 
54
- for n, f in self.model_fields.items():
54
+ for n, f in type(self).model_fields.items():
55
55
  k = f.alias or n
56
56
  val = serialized.get(k)
57
57
  serialized.pop(k, None)
@@ -34,7 +34,7 @@ class EventOut(BaseModel):
34
34
 
35
35
  m = {}
36
36
 
37
- for n, f in self.model_fields.items():
37
+ for n, f in type(self).model_fields.items():
38
38
  k = f.alias or n
39
39
  val = serialized.get(k)
40
40
  serialized.pop(k, None)
@@ -0,0 +1,23 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai.types import BaseModel
5
+ from mistralai.utils import validate_const
6
+ import pydantic
7
+ from pydantic.functional_validators import AfterValidator
8
+ from typing import Literal, Optional
9
+ from typing_extensions import Annotated, TypedDict
10
+
11
+
12
+ class FileChunkTypedDict(TypedDict):
13
+ file_id: str
14
+ type: Literal["file"]
15
+
16
+
17
+ class FileChunk(BaseModel):
18
+ file_id: str
19
+
20
+ TYPE: Annotated[
21
+ Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))],
22
+ pydantic.Field(alias="type"),
23
+ ] = "file"
@@ -75,7 +75,7 @@ class FilesAPIRoutesListFilesRequest(BaseModel):
75
75
 
76
76
  m = {}
77
77
 
78
- for n, f in self.model_fields.items():
78
+ for n, f in type(self).model_fields.items():
79
79
  k = f.alias or n
80
80
  val = serialized.get(k)
81
81
  serialized.pop(k, None)
@@ -63,7 +63,7 @@ class FileSchema(BaseModel):
63
63
 
64
64
  m = {}
65
65
 
66
- for n, f in self.model_fields.items():
66
+ for n, f in type(self).model_fields.items():
67
67
  k = f.alias or n
68
68
  val = serialized.get(k)
69
69
  serialized.pop(k, None)
@@ -104,7 +104,7 @@ class FIMCompletionRequest(BaseModel):
104
104
 
105
105
  m = {}
106
106
 
107
- for n, f in self.model_fields.items():
107
+ for n, f in type(self).model_fields.items():
108
108
  k = f.alias or n
109
109
  val = serialized.get(k)
110
110
  serialized.pop(k, None)
@@ -102,7 +102,7 @@ class FIMCompletionStreamRequest(BaseModel):
102
102
 
103
103
  m = {}
104
104
 
105
- for n, f in self.model_fields.items():
105
+ for n, f in type(self).model_fields.items():
106
106
  k = f.alias or n
107
107
  val = serialized.get(k)
108
108
  serialized.pop(k, None)