mistralai 1.8.1__py3-none-any.whl → 1.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mistralai/_version.py CHANGED
@@ -3,10 +3,10 @@
3
3
  import importlib.metadata
4
4
 
5
5
  __title__: str = "mistralai"
6
- __version__: str = "1.8.1"
6
+ __version__: str = "1.8.2"
7
7
  __openapi_doc_version__: str = "1.0.0"
8
8
  __gen_version__: str = "2.548.6"
9
- __user_agent__: str = "speakeasy-sdk/python 1.8.1 2.548.6 1.0.0 mistralai"
9
+ __user_agent__: str = "speakeasy-sdk/python 1.8.2 2.548.6 1.0.0 mistralai"
10
10
 
11
11
  try:
12
12
  if __package__ is not None:
mistralai/agents.py CHANGED
@@ -47,6 +47,7 @@ class Agents(BaseSDK):
47
47
  Union[models.Prediction, models.PredictionTypedDict]
48
48
  ] = None,
49
49
  parallel_tool_calls: Optional[bool] = None,
50
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
50
51
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
51
52
  server_url: Optional[str] = None,
52
53
  timeout_ms: Optional[int] = None,
@@ -68,6 +69,7 @@ class Agents(BaseSDK):
68
69
  :param n: Number of completions to return for each request, input tokens are only billed once.
69
70
  :param prediction:
70
71
  :param parallel_tool_calls:
72
+ :param prompt_mode:
71
73
  :param retries: Override the default retry configuration for this method
72
74
  :param server_url: Override the default server URL for this method
73
75
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -105,6 +107,7 @@ class Agents(BaseSDK):
105
107
  prediction, Optional[models.Prediction]
106
108
  ),
107
109
  parallel_tool_calls=parallel_tool_calls,
110
+ prompt_mode=prompt_mode,
108
111
  agent_id=agent_id,
109
112
  )
110
113
 
@@ -213,6 +216,7 @@ class Agents(BaseSDK):
213
216
  Union[models.Prediction, models.PredictionTypedDict]
214
217
  ] = None,
215
218
  parallel_tool_calls: Optional[bool] = None,
219
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
216
220
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
217
221
  server_url: Optional[str] = None,
218
222
  timeout_ms: Optional[int] = None,
@@ -234,6 +238,7 @@ class Agents(BaseSDK):
234
238
  :param n: Number of completions to return for each request, input tokens are only billed once.
235
239
  :param prediction:
236
240
  :param parallel_tool_calls:
241
+ :param prompt_mode:
237
242
  :param retries: Override the default retry configuration for this method
238
243
  :param server_url: Override the default server URL for this method
239
244
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -271,6 +276,7 @@ class Agents(BaseSDK):
271
276
  prediction, Optional[models.Prediction]
272
277
  ),
273
278
  parallel_tool_calls=parallel_tool_calls,
279
+ prompt_mode=prompt_mode,
274
280
  agent_id=agent_id,
275
281
  )
276
282
 
@@ -379,6 +385,7 @@ class Agents(BaseSDK):
379
385
  Union[models.Prediction, models.PredictionTypedDict]
380
386
  ] = None,
381
387
  parallel_tool_calls: Optional[bool] = None,
388
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
382
389
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
383
390
  server_url: Optional[str] = None,
384
391
  timeout_ms: Optional[int] = None,
@@ -402,6 +409,7 @@ class Agents(BaseSDK):
402
409
  :param n: Number of completions to return for each request, input tokens are only billed once.
403
410
  :param prediction:
404
411
  :param parallel_tool_calls:
412
+ :param prompt_mode:
405
413
  :param retries: Override the default retry configuration for this method
406
414
  :param server_url: Override the default server URL for this method
407
415
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -439,6 +447,7 @@ class Agents(BaseSDK):
439
447
  prediction, Optional[models.Prediction]
440
448
  ),
441
449
  parallel_tool_calls=parallel_tool_calls,
450
+ prompt_mode=prompt_mode,
442
451
  agent_id=agent_id,
443
452
  )
444
453
 
@@ -553,6 +562,7 @@ class Agents(BaseSDK):
553
562
  Union[models.Prediction, models.PredictionTypedDict]
554
563
  ] = None,
555
564
  parallel_tool_calls: Optional[bool] = None,
565
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
556
566
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
557
567
  server_url: Optional[str] = None,
558
568
  timeout_ms: Optional[int] = None,
@@ -576,6 +586,7 @@ class Agents(BaseSDK):
576
586
  :param n: Number of completions to return for each request, input tokens are only billed once.
577
587
  :param prediction:
578
588
  :param parallel_tool_calls:
589
+ :param prompt_mode:
579
590
  :param retries: Override the default retry configuration for this method
580
591
  :param server_url: Override the default server URL for this method
581
592
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -613,6 +624,7 @@ class Agents(BaseSDK):
613
624
  prediction, Optional[models.Prediction]
614
625
  ),
615
626
  parallel_tool_calls=parallel_tool_calls,
627
+ prompt_mode=prompt_mode,
616
628
  agent_id=agent_id,
617
629
  )
618
630
 
mistralai/beta.py CHANGED
@@ -8,7 +8,7 @@ from mistralai.mistral_agents import MistralAgents
8
8
 
9
9
  class Beta(BaseSDK):
10
10
  conversations: Conversations
11
- r"""(beta) Converstations API"""
11
+ r"""(beta) Conversations API"""
12
12
  agents: MistralAgents
13
13
  r"""(beta) Agents API"""
14
14
 
mistralai/chat.py CHANGED
@@ -123,6 +123,7 @@ class Chat(BaseSDK):
123
123
  Union[models.Prediction, models.PredictionTypedDict]
124
124
  ] = None,
125
125
  parallel_tool_calls: Optional[bool] = None,
126
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
126
127
  safe_prompt: Optional[bool] = None,
127
128
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
128
129
  server_url: Optional[str] = None,
@@ -147,6 +148,7 @@ class Chat(BaseSDK):
147
148
  :param n: Number of completions to return for each request, input tokens are only billed once.
148
149
  :param prediction:
149
150
  :param parallel_tool_calls:
151
+ :param prompt_mode:
150
152
  :param safe_prompt: Whether to inject a safety prompt before all conversations.
151
153
  :param retries: Override the default retry configuration for this method
152
154
  :param server_url: Override the default server URL for this method
@@ -186,6 +188,7 @@ class Chat(BaseSDK):
186
188
  prediction, Optional[models.Prediction]
187
189
  ),
188
190
  parallel_tool_calls=parallel_tool_calls,
191
+ prompt_mode=prompt_mode,
189
192
  safe_prompt=safe_prompt,
190
193
  )
191
194
 
@@ -288,6 +291,7 @@ class Chat(BaseSDK):
288
291
  Union[models.Prediction, models.PredictionTypedDict]
289
292
  ] = None,
290
293
  parallel_tool_calls: Optional[bool] = None,
294
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
291
295
  safe_prompt: Optional[bool] = None,
292
296
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
293
297
  server_url: Optional[str] = None,
@@ -312,6 +316,7 @@ class Chat(BaseSDK):
312
316
  :param n: Number of completions to return for each request, input tokens are only billed once.
313
317
  :param prediction:
314
318
  :param parallel_tool_calls:
319
+ :param prompt_mode:
315
320
  :param safe_prompt: Whether to inject a safety prompt before all conversations.
316
321
  :param retries: Override the default retry configuration for this method
317
322
  :param server_url: Override the default server URL for this method
@@ -351,6 +356,7 @@ class Chat(BaseSDK):
351
356
  prediction, Optional[models.Prediction]
352
357
  ),
353
358
  parallel_tool_calls=parallel_tool_calls,
359
+ prompt_mode=prompt_mode,
354
360
  safe_prompt=safe_prompt,
355
361
  )
356
362
 
@@ -461,6 +467,7 @@ class Chat(BaseSDK):
461
467
  Union[models.Prediction, models.PredictionTypedDict]
462
468
  ] = None,
463
469
  parallel_tool_calls: Optional[bool] = None,
470
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
464
471
  safe_prompt: Optional[bool] = None,
465
472
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
466
473
  server_url: Optional[str] = None,
@@ -487,6 +494,7 @@ class Chat(BaseSDK):
487
494
  :param n: Number of completions to return for each request, input tokens are only billed once.
488
495
  :param prediction:
489
496
  :param parallel_tool_calls:
497
+ :param prompt_mode:
490
498
  :param safe_prompt: Whether to inject a safety prompt before all conversations.
491
499
  :param retries: Override the default retry configuration for this method
492
500
  :param server_url: Override the default server URL for this method
@@ -528,6 +536,7 @@ class Chat(BaseSDK):
528
536
  prediction, Optional[models.Prediction]
529
537
  ),
530
538
  parallel_tool_calls=parallel_tool_calls,
539
+ prompt_mode=prompt_mode,
531
540
  safe_prompt=safe_prompt,
532
541
  )
533
542
 
@@ -644,6 +653,7 @@ class Chat(BaseSDK):
644
653
  Union[models.Prediction, models.PredictionTypedDict]
645
654
  ] = None,
646
655
  parallel_tool_calls: Optional[bool] = None,
656
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
647
657
  safe_prompt: Optional[bool] = None,
648
658
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
649
659
  server_url: Optional[str] = None,
@@ -670,6 +680,7 @@ class Chat(BaseSDK):
670
680
  :param n: Number of completions to return for each request, input tokens are only billed once.
671
681
  :param prediction:
672
682
  :param parallel_tool_calls:
683
+ :param prompt_mode:
673
684
  :param safe_prompt: Whether to inject a safety prompt before all conversations.
674
685
  :param retries: Override the default retry configuration for this method
675
686
  :param server_url: Override the default server URL for this method
@@ -711,6 +722,7 @@ class Chat(BaseSDK):
711
722
  prediction, Optional[models.Prediction]
712
723
  ),
713
724
  parallel_tool_calls=parallel_tool_calls,
725
+ prompt_mode=prompt_mode,
714
726
  safe_prompt=safe_prompt,
715
727
  )
716
728
 
@@ -35,7 +35,7 @@ if typing.TYPE_CHECKING:
35
35
 
36
36
 
37
37
  class Conversations(BaseSDK):
38
- r"""(beta) Converstations API"""
38
+ r"""(beta) Conversations API"""
39
39
 
40
40
  # region sdk-class-body
41
41
  # Custom run code allowing client side execution of code
@@ -591,6 +591,7 @@ from .messageoutputevent import (
591
591
  MessageOutputEventTypedDict,
592
592
  )
593
593
  from .metricout import MetricOut, MetricOutTypedDict
594
+ from .mistralpromptmode import MistralPromptMode
594
595
  from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict
595
596
  from .modelconversation import (
596
597
  ModelConversation,
@@ -1154,6 +1155,7 @@ __all__ = [
1154
1155
  "MessagesTypedDict",
1155
1156
  "MetricOut",
1156
1157
  "MetricOutTypedDict",
1158
+ "MistralPromptMode",
1157
1159
  "ModelCapabilities",
1158
1160
  "ModelCapabilitiesTypedDict",
1159
1161
  "ModelConversation",
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .mistralpromptmode import MistralPromptMode
5
6
  from .prediction import Prediction, PredictionTypedDict
6
7
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
7
8
  from .systemmessage import SystemMessage, SystemMessageTypedDict
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
11
12
  from .toolmessage import ToolMessage, ToolMessageTypedDict
12
13
  from .usermessage import UserMessage, UserMessageTypedDict
13
14
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
14
- from mistralai.utils import get_discriminator
15
+ from mistralai.utils import get_discriminator, validate_open_enum
15
16
  from pydantic import Discriminator, Tag, model_serializer
17
+ from pydantic.functional_validators import PlainValidator
16
18
  from typing import List, Optional, Union
17
19
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
18
20
 
@@ -86,6 +88,7 @@ class AgentsCompletionRequestTypedDict(TypedDict):
86
88
  r"""Number of completions to return for each request, input tokens are only billed once."""
87
89
  prediction: NotRequired[PredictionTypedDict]
88
90
  parallel_tool_calls: NotRequired[bool]
91
+ prompt_mode: NotRequired[Nullable[MistralPromptMode]]
89
92
 
90
93
 
91
94
  class AgentsCompletionRequest(BaseModel):
@@ -126,6 +129,10 @@ class AgentsCompletionRequest(BaseModel):
126
129
 
127
130
  parallel_tool_calls: Optional[bool] = None
128
131
 
132
+ prompt_mode: Annotated[
133
+ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
134
+ ] = UNSET
135
+
129
136
  @model_serializer(mode="wrap")
130
137
  def serialize_model(self, handler):
131
138
  optional_fields = [
@@ -141,8 +148,9 @@ class AgentsCompletionRequest(BaseModel):
141
148
  "n",
142
149
  "prediction",
143
150
  "parallel_tool_calls",
151
+ "prompt_mode",
144
152
  ]
145
- nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
153
+ nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"]
146
154
  null_default_fields = []
147
155
 
148
156
  serialized = handler(self)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .mistralpromptmode import MistralPromptMode
5
6
  from .prediction import Prediction, PredictionTypedDict
6
7
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
7
8
  from .systemmessage import SystemMessage, SystemMessageTypedDict
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
11
12
  from .toolmessage import ToolMessage, ToolMessageTypedDict
12
13
  from .usermessage import UserMessage, UserMessageTypedDict
13
14
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
14
- from mistralai.utils import get_discriminator
15
+ from mistralai.utils import get_discriminator, validate_open_enum
15
16
  from pydantic import Discriminator, Tag, model_serializer
17
+ from pydantic.functional_validators import PlainValidator
16
18
  from typing import List, Optional, Union
17
19
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
18
20
 
@@ -85,6 +87,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
85
87
  r"""Number of completions to return for each request, input tokens are only billed once."""
86
88
  prediction: NotRequired[PredictionTypedDict]
87
89
  parallel_tool_calls: NotRequired[bool]
90
+ prompt_mode: NotRequired[Nullable[MistralPromptMode]]
88
91
 
89
92
 
90
93
  class AgentsCompletionStreamRequest(BaseModel):
@@ -124,6 +127,10 @@ class AgentsCompletionStreamRequest(BaseModel):
124
127
 
125
128
  parallel_tool_calls: Optional[bool] = None
126
129
 
130
+ prompt_mode: Annotated[
131
+ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
132
+ ] = UNSET
133
+
127
134
  @model_serializer(mode="wrap")
128
135
  def serialize_model(self, handler):
129
136
  optional_fields = [
@@ -139,8 +146,9 @@ class AgentsCompletionStreamRequest(BaseModel):
139
146
  "n",
140
147
  "prediction",
141
148
  "parallel_tool_calls",
149
+ "prompt_mode",
142
150
  ]
143
- nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
151
+ nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"]
144
152
  null_default_fields = []
145
153
 
146
154
  serialized = handler(self)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .mistralpromptmode import MistralPromptMode
5
6
  from .prediction import Prediction, PredictionTypedDict
6
7
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
7
8
  from .systemmessage import SystemMessage, SystemMessageTypedDict
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
11
12
  from .toolmessage import ToolMessage, ToolMessageTypedDict
12
13
  from .usermessage import UserMessage, UserMessageTypedDict
13
14
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
14
- from mistralai.utils import get_discriminator
15
+ from mistralai.utils import get_discriminator, validate_open_enum
15
16
  from pydantic import Discriminator, Tag, model_serializer
17
+ from pydantic.functional_validators import PlainValidator
16
18
  from typing import List, Optional, Union
17
19
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
18
20
 
@@ -86,6 +88,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
86
88
  r"""Number of completions to return for each request, input tokens are only billed once."""
87
89
  prediction: NotRequired[PredictionTypedDict]
88
90
  parallel_tool_calls: NotRequired[bool]
91
+ prompt_mode: NotRequired[Nullable[MistralPromptMode]]
89
92
  safe_prompt: NotRequired[bool]
90
93
  r"""Whether to inject a safety prompt before all conversations."""
91
94
 
@@ -134,6 +137,10 @@ class ChatCompletionRequest(BaseModel):
134
137
 
135
138
  parallel_tool_calls: Optional[bool] = None
136
139
 
140
+ prompt_mode: Annotated[
141
+ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
142
+ ] = UNSET
143
+
137
144
  safe_prompt: Optional[bool] = None
138
145
  r"""Whether to inject a safety prompt before all conversations."""
139
146
 
@@ -154,9 +161,17 @@ class ChatCompletionRequest(BaseModel):
154
161
  "n",
155
162
  "prediction",
156
163
  "parallel_tool_calls",
164
+ "prompt_mode",
157
165
  "safe_prompt",
158
166
  ]
159
- nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
167
+ nullable_fields = [
168
+ "temperature",
169
+ "max_tokens",
170
+ "random_seed",
171
+ "tools",
172
+ "n",
173
+ "prompt_mode",
174
+ ]
160
175
  null_default_fields = []
161
176
 
162
177
  serialized = handler(self)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .mistralpromptmode import MistralPromptMode
5
6
  from .prediction import Prediction, PredictionTypedDict
6
7
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
7
8
  from .systemmessage import SystemMessage, SystemMessageTypedDict
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
11
12
  from .toolmessage import ToolMessage, ToolMessageTypedDict
12
13
  from .usermessage import UserMessage, UserMessageTypedDict
13
14
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
14
- from mistralai.utils import get_discriminator
15
+ from mistralai.utils import get_discriminator, validate_open_enum
15
16
  from pydantic import Discriminator, Tag, model_serializer
17
+ from pydantic.functional_validators import PlainValidator
16
18
  from typing import List, Optional, Union
17
19
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
18
20
 
@@ -89,6 +91,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
89
91
  r"""Number of completions to return for each request, input tokens are only billed once."""
90
92
  prediction: NotRequired[PredictionTypedDict]
91
93
  parallel_tool_calls: NotRequired[bool]
94
+ prompt_mode: NotRequired[Nullable[MistralPromptMode]]
92
95
  safe_prompt: NotRequired[bool]
93
96
  r"""Whether to inject a safety prompt before all conversations."""
94
97
 
@@ -136,6 +139,10 @@ class ChatCompletionStreamRequest(BaseModel):
136
139
 
137
140
  parallel_tool_calls: Optional[bool] = None
138
141
 
142
+ prompt_mode: Annotated[
143
+ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
144
+ ] = UNSET
145
+
139
146
  safe_prompt: Optional[bool] = None
140
147
  r"""Whether to inject a safety prompt before all conversations."""
141
148
 
@@ -156,9 +163,17 @@ class ChatCompletionStreamRequest(BaseModel):
156
163
  "n",
157
164
  "prediction",
158
165
  "parallel_tool_calls",
166
+ "prompt_mode",
159
167
  "safe_prompt",
160
168
  ]
161
- nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
169
+ nullable_fields = [
170
+ "temperature",
171
+ "max_tokens",
172
+ "random_seed",
173
+ "tools",
174
+ "n",
175
+ "prompt_mode",
176
+ ]
162
177
  null_default_fields = []
163
178
 
164
179
  serialized = handler(self)
@@ -0,0 +1,8 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai.types import UnrecognizedStr
5
+ from typing import Literal, Union
6
+
7
+
8
+ MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: mistralai
3
- Version: 1.8.1
3
+ Version: 1.8.2
4
4
  Summary: Python Client SDK for the Mistral AI API.
5
5
  Author: Mistral
6
6
  Requires-Python: >=3.9
@@ -139,16 +139,16 @@ mistralai/_hooks/deprecation_warning.py,sha256=eyEOf7-o9uqqNWJnufD2RXp3dYrGV4in9
139
139
  mistralai/_hooks/registration.py,sha256=ML0W-XbE4WYdJ4eGks_XxF2aLCJTaIWjQATFGzFwvyU,861
140
140
  mistralai/_hooks/sdkhooks.py,sha256=s-orhdvnV89TmI3QiPC2LWQtYeM9RrsG1CTll-fYZmQ,2559
141
141
  mistralai/_hooks/types.py,sha256=z3AUFDpRJHj2m3h5PklvUeEcGohY0cfph4jL6-nGIzs,2812
142
- mistralai/_version.py,sha256=ov7SFKb_NFEoxMwoKD6j1BNiqtY8jnujwybVOhD60gY,460
143
- mistralai/agents.py,sha256=o_apyuwiDzxv-U252T84ynAHCb5fn1q7MMXqrZ4oHLo,32652
142
+ mistralai/_version.py,sha256=dj7Yct7sMSL3ozDUgcL3O2a3h43gPQWDexXedEqencU,460
143
+ mistralai/agents.py,sha256=-_Ur2oC2007qM8kXcw1-1gTI5hYjwhPckets08DFi6s,33204
144
144
  mistralai/async_client.py,sha256=KUdYxIIqoD6L7vB0EGwUR6lQ0NK5iCTHjnLVR9CVcJY,355
145
145
  mistralai/basesdk.py,sha256=GsU5bp8O5fBCl34tKxaYmeYSIIM971eAPeFBBC_BpFo,12191
146
146
  mistralai/batch.py,sha256=YN4D0Duwrap9Ysmp_lRpADYp1Znay7THE_z8ERGvDds,501
147
- mistralai/beta.py,sha256=wiOFmXiXNKeRMOEdgeIwVqAWg6E_3sqHc_ojwU9oZoY,728
148
- mistralai/chat.py,sha256=1XVVVvDi726bq6HXCur6-dsmFfzQAEpEWbKT_3sTZ4A,40549
147
+ mistralai/beta.py,sha256=zcRqEzVxBsog2vWbX4X7ZraozAvhNyCo1_oXxLbBuAo,727
148
+ mistralai/chat.py,sha256=D3slKAa_1BlV6G8nHvva--Hfj_zPkmBMLjfECO0mPDU,41101
149
149
  mistralai/classifiers.py,sha256=Cbrb6X_eq3-Yz5ZhWkOsFDTGbo3hkgh-vtIEQmU_UdI,33776
150
150
  mistralai/client.py,sha256=hrPg-LciKMKiascF0WbRRmqQyCv1lb2yDh6j-aaKVNo,509
151
- mistralai/conversations.py,sha256=53wWkEymYuoiuGpKwdoAVe5drJYTKT3JsuEQSt59uOA,109317
151
+ mistralai/conversations.py,sha256=mLTN8RfsIaykeJYpTOJXHzRNYefPBMgvD4mLceZuN5c,109316
152
152
  mistralai/embeddings.py,sha256=SHG4s0yKwCdKhYKGXKdzPXmcD4_WXI9BBnrM7cSI3gw,9258
153
153
  mistralai/extra/README.md,sha256=BTS9fy0ijkiUP7ZVoFQ7FVBxHtXIXqucYZyy_ucFjo4,1739
154
154
  mistralai/extra/__init__.py,sha256=8DsU_omYYadqcwlmBOoakBwkWKcSohwLmtB8v-jkn2M,392
@@ -177,7 +177,7 @@ mistralai/httpclient.py,sha256=lC-YQ7q4yiJGKElxBeb3aZnr-4aYxjgEpZ6roeXYlyg,4318
177
177
  mistralai/jobs.py,sha256=1DZE14ad348Vg82VHhLRyXhu7SIh8_KgWXc_jP2oFIA,46767
178
178
  mistralai/mistral_agents.py,sha256=FrIUGExYpwz3EyMOetLKsD-LtcsoB1xrWR86zp1WwzE,45573
179
179
  mistralai/mistral_jobs.py,sha256=EQHFFxFkkx6XvPX-9S8TRZvVSOLUL7z91cg56J8pskQ,31114
180
- mistralai/models/__init__.py,sha256=h-8plap2U1shoIGyNyeNpQmSH5GvIsVZfoT4wC_4Fcc,43395
180
+ mistralai/models/__init__.py,sha256=QNG_8i4IMS1nXu1zRNBCs3jDo80ct4XBaPwJgRkqPrM,43469
181
181
  mistralai/models/agent.py,sha256=dx_uXYDNWoENtgK4InGqs1A9eCw1xDphMPCF_Pv43VQ,4185
182
182
  mistralai/models/agentconversation.py,sha256=WaPQn3pGiD2c6bNaP7Ya1akVQt54aK7qDtAEnHAjwrk,2086
183
183
  mistralai/models/agentcreationrequest.py,sha256=u9WM95ywRmDqnwx0yfEOT1FnJbiPY8lNtju71d3ix4A,3952
@@ -196,8 +196,8 @@ mistralai/models/agents_api_v1_conversations_listop.py,sha256=N7bwRL4VtP3-a9Q-15
196
196
  mistralai/models/agents_api_v1_conversations_messagesop.py,sha256=4V1voPx6mGB4Go5uAsYbkOKgHR0HSUYfadDUoFS0sj0,532
197
197
  mistralai/models/agents_api_v1_conversations_restart_streamop.py,sha256=7QZ8fEnDPcIRiTqgg68siLZCSPO13_r2-qdQ7bVsxwY,957
198
198
  mistralai/models/agents_api_v1_conversations_restartop.py,sha256=QNA8A3ewD3AwxtayUvrCQpqUfqr13-e7-Y9UgrSbp-A,901
199
- mistralai/models/agentscompletionrequest.py,sha256=gyGoh1KsCGbOpfmaqk9d_hf1CYhWIriH4vaeQoEDfzU,7920
200
- mistralai/models/agentscompletionstreamrequest.py,sha256=ZI4iFtl6qDJZ5QTIZ7vDIyFQ9n9rqVqN6tJQAdjpQjA,7365
199
+ mistralai/models/agentscompletionrequest.py,sha256=qBBMmyA-Jcmse1zdCtOVARpg1r0weoifJ4tjaMcSwG4,8277
200
+ mistralai/models/agentscompletionstreamrequest.py,sha256=E4xi4062TUk_vE2PW1R_UPPfKfitHWPHAO6vYSU0vg4,7722
201
201
  mistralai/models/agentupdaterequest.py,sha256=w3K74yFRqilUDc64bS6ulmnvgY8WMJ0evSbzwu3ukLc,4094
202
202
  mistralai/models/apiendpoint.py,sha256=Hvar5leWsJR_FYb0UzRlSw3vRdBZhk_6BR5r2pIb214,400
203
203
  mistralai/models/archiveftmodelout.py,sha256=VdppiqIB9JGNB2B0-Y6XQfQgDmB-hOa1Bta3v_StbLs,565
@@ -211,9 +211,9 @@ mistralai/models/batchjobstatus.py,sha256=WlrIl5vWQGfLmgQA91_9CnCMKhWN6Lli458fT-
211
211
  mistralai/models/builtinconnectors.py,sha256=cX1M7Q_2tsWeuH-lKWomXED7xN7Du6BJKvYpep1vD30,284
212
212
  mistralai/models/chatclassificationrequest.py,sha256=PmU036oOlGqfd75hNESDUJiN4uJNYguACoCt6CzBC2M,534
213
213
  mistralai/models/chatcompletionchoice.py,sha256=6iIFLZj2KYx0HFfzS3-E3sNXG6mPEAlDyXxIA5iZI_U,849
214
- mistralai/models/chatcompletionrequest.py,sha256=6Innwpi7UnKmyauATOJForAVvW0tkSnbjsiQOOp5OKg,9777
214
+ mistralai/models/chatcompletionrequest.py,sha256=NgQ_15kcNqLP87TT6nT6_wM_LdKgmewM6VHLnqn0-D4,10217
215
215
  mistralai/models/chatcompletionresponse.py,sha256=px0hjCapAtTP50u36hiQdPcC9X6LU81Nq5aJ3AlofjM,709
216
- mistralai/models/chatcompletionstreamrequest.py,sha256=0NFa_nMMRmHU66Hsgu1Zm4fggT0AzvW_imrkyZ4sUxc,9465
216
+ mistralai/models/chatcompletionstreamrequest.py,sha256=TKjp0xk9ReNMnT_KVeZ62c_f7n5VRTSeqb0JyY-r5f8,9905
217
217
  mistralai/models/chatmoderationrequest.py,sha256=x1eAoxx_GhaxqGRe4wsqNaUi59K39HQakkedLJVUVD8,2236
218
218
  mistralai/models/checkpointout.py,sha256=A2kXS8-VT_1lbg3brifVjZD6tXdsET8vLqBm2a-yXgA,1109
219
219
  mistralai/models/classificationrequest.py,sha256=FqQfSrGYwLUjVw78Ft7tbmhAkUN0FqolCn4MNArOuR8,922
@@ -318,6 +318,7 @@ mistralai/models/messageoutputcontentchunks.py,sha256=LRvAb-Hn0XSKBBRrBdwW7CtMC_
318
318
  mistralai/models/messageoutputentry.py,sha256=Ow-V0HXFMEowBed_T09281m_ysK4Q8jvWYqAHYxVGqI,2936
319
319
  mistralai/models/messageoutputevent.py,sha256=64MdFYY92WOWofmRm6amp2yO6W7q-dv3uKZ1npr8ONo,2709
320
320
  mistralai/models/metricout.py,sha256=dXQMMU4Nk6-Zr06Jx1TWilFi6cOwiVLjSanCFn0cPxo,2034
321
+ mistralai/models/mistralpromptmode.py,sha256=v0UKuu6N0kcM_gjy3C7pVUWBs9tuMKtbHG6nLF9jtoI,253
321
322
  mistralai/models/modelcapabilities.py,sha256=No-Dl09zT1sG4MxsWnx4s8Yo1tUeMQ7k-HR_iQFIMFc,703
322
323
  mistralai/models/modelconversation.py,sha256=j8bO1HyDRHp4Dp_Bo68jA11z90CIhdt3L8DjfCscuDo,4437
323
324
  mistralai/models/modellist.py,sha256=D4Y784kQkx0ARhofFrpEqGLfxa-jTY8ev0TQMrD_n8I,995
@@ -391,7 +392,7 @@ mistralai/utils/serializers.py,sha256=EGH40Pgp3sSK9uM4PxL7_SYzSHtmo-Uy6QIE5xLVg6
391
392
  mistralai/utils/url.py,sha256=BgGPgcTA6MRK4bF8fjP2dUopN3NzEzxWMXPBVg8NQUA,5254
392
393
  mistralai/utils/values.py,sha256=CcaCXEa3xHhkUDROyXZocN8f0bdITftv9Y0P9lTf0YM,3517
393
394
  mistralai/version.py,sha256=iosXhlXclBwBqlADFKEilxAC2wWKbtuBKi87AmPi7s8,196
394
- mistralai-1.8.1.dist-info/LICENSE,sha256=rUtQ_9GD0OyLPlb-2uWVdfE87hzudMRmsW-tS-0DK-0,11340
395
- mistralai-1.8.1.dist-info/METADATA,sha256=zvxBN6ahY13VTUqmneFHFNXCrEK7Fl5nlhKFGkj_PKw,33379
396
- mistralai-1.8.1.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
397
- mistralai-1.8.1.dist-info/RECORD,,
395
+ mistralai-1.8.2.dist-info/LICENSE,sha256=rUtQ_9GD0OyLPlb-2uWVdfE87hzudMRmsW-tS-0DK-0,11340
396
+ mistralai-1.8.2.dist-info/METADATA,sha256=JmRYmUZoWAa0p5iWdzL3E52JX3-WT9l-E6j2cXkmycA,33379
397
+ mistralai-1.8.2.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
398
+ mistralai-1.8.2.dist-info/RECORD,,