mistralai 1.8.0__py3-none-any.whl → 1.8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_version.py +2 -2
- mistralai/agents.py +12 -0
- mistralai/beta.py +2 -0
- mistralai/chat.py +12 -0
- mistralai/conversations.py +5 -2
- mistralai/embeddings.py +12 -0
- mistralai/mistral_agents.py +2 -0
- mistralai/models/__init__.py +4 -0
- mistralai/models/agentscompletionrequest.py +10 -2
- mistralai/models/agentscompletionstreamrequest.py +10 -2
- mistralai/models/chatcompletionrequest.py +17 -2
- mistralai/models/chatcompletionresponse.py +6 -6
- mistralai/models/chatcompletionstreamrequest.py +17 -2
- mistralai/models/embeddingdtype.py +7 -0
- mistralai/models/embeddingrequest.py +43 -3
- mistralai/models/fimcompletionresponse.py +6 -6
- mistralai/models/mistralpromptmode.py +8 -0
- {mistralai-1.8.0.dist-info → mistralai-1.8.2.dist-info}/METADATA +1 -1
- {mistralai-1.8.0.dist-info → mistralai-1.8.2.dist-info}/RECORD +21 -19
- {mistralai-1.8.0.dist-info → mistralai-1.8.2.dist-info}/LICENSE +0 -0
- {mistralai-1.8.0.dist-info → mistralai-1.8.2.dist-info}/WHEEL +0 -0
mistralai/_version.py
CHANGED
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
import importlib.metadata
|
|
4
4
|
|
|
5
5
|
__title__: str = "mistralai"
|
|
6
|
-
__version__: str = "1.8.
|
|
6
|
+
__version__: str = "1.8.2"
|
|
7
7
|
__openapi_doc_version__: str = "1.0.0"
|
|
8
8
|
__gen_version__: str = "2.548.6"
|
|
9
|
-
__user_agent__: str = "speakeasy-sdk/python 1.8.
|
|
9
|
+
__user_agent__: str = "speakeasy-sdk/python 1.8.2 2.548.6 1.0.0 mistralai"
|
|
10
10
|
|
|
11
11
|
try:
|
|
12
12
|
if __package__ is not None:
|
mistralai/agents.py
CHANGED
|
@@ -47,6 +47,7 @@ class Agents(BaseSDK):
|
|
|
47
47
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
48
48
|
] = None,
|
|
49
49
|
parallel_tool_calls: Optional[bool] = None,
|
|
50
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
50
51
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
51
52
|
server_url: Optional[str] = None,
|
|
52
53
|
timeout_ms: Optional[int] = None,
|
|
@@ -68,6 +69,7 @@ class Agents(BaseSDK):
|
|
|
68
69
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
69
70
|
:param prediction:
|
|
70
71
|
:param parallel_tool_calls:
|
|
72
|
+
:param prompt_mode:
|
|
71
73
|
:param retries: Override the default retry configuration for this method
|
|
72
74
|
:param server_url: Override the default server URL for this method
|
|
73
75
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -105,6 +107,7 @@ class Agents(BaseSDK):
|
|
|
105
107
|
prediction, Optional[models.Prediction]
|
|
106
108
|
),
|
|
107
109
|
parallel_tool_calls=parallel_tool_calls,
|
|
110
|
+
prompt_mode=prompt_mode,
|
|
108
111
|
agent_id=agent_id,
|
|
109
112
|
)
|
|
110
113
|
|
|
@@ -213,6 +216,7 @@ class Agents(BaseSDK):
|
|
|
213
216
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
214
217
|
] = None,
|
|
215
218
|
parallel_tool_calls: Optional[bool] = None,
|
|
219
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
216
220
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
217
221
|
server_url: Optional[str] = None,
|
|
218
222
|
timeout_ms: Optional[int] = None,
|
|
@@ -234,6 +238,7 @@ class Agents(BaseSDK):
|
|
|
234
238
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
235
239
|
:param prediction:
|
|
236
240
|
:param parallel_tool_calls:
|
|
241
|
+
:param prompt_mode:
|
|
237
242
|
:param retries: Override the default retry configuration for this method
|
|
238
243
|
:param server_url: Override the default server URL for this method
|
|
239
244
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -271,6 +276,7 @@ class Agents(BaseSDK):
|
|
|
271
276
|
prediction, Optional[models.Prediction]
|
|
272
277
|
),
|
|
273
278
|
parallel_tool_calls=parallel_tool_calls,
|
|
279
|
+
prompt_mode=prompt_mode,
|
|
274
280
|
agent_id=agent_id,
|
|
275
281
|
)
|
|
276
282
|
|
|
@@ -379,6 +385,7 @@ class Agents(BaseSDK):
|
|
|
379
385
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
380
386
|
] = None,
|
|
381
387
|
parallel_tool_calls: Optional[bool] = None,
|
|
388
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
382
389
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
383
390
|
server_url: Optional[str] = None,
|
|
384
391
|
timeout_ms: Optional[int] = None,
|
|
@@ -402,6 +409,7 @@ class Agents(BaseSDK):
|
|
|
402
409
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
403
410
|
:param prediction:
|
|
404
411
|
:param parallel_tool_calls:
|
|
412
|
+
:param prompt_mode:
|
|
405
413
|
:param retries: Override the default retry configuration for this method
|
|
406
414
|
:param server_url: Override the default server URL for this method
|
|
407
415
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -439,6 +447,7 @@ class Agents(BaseSDK):
|
|
|
439
447
|
prediction, Optional[models.Prediction]
|
|
440
448
|
),
|
|
441
449
|
parallel_tool_calls=parallel_tool_calls,
|
|
450
|
+
prompt_mode=prompt_mode,
|
|
442
451
|
agent_id=agent_id,
|
|
443
452
|
)
|
|
444
453
|
|
|
@@ -553,6 +562,7 @@ class Agents(BaseSDK):
|
|
|
553
562
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
554
563
|
] = None,
|
|
555
564
|
parallel_tool_calls: Optional[bool] = None,
|
|
565
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
556
566
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
557
567
|
server_url: Optional[str] = None,
|
|
558
568
|
timeout_ms: Optional[int] = None,
|
|
@@ -576,6 +586,7 @@ class Agents(BaseSDK):
|
|
|
576
586
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
577
587
|
:param prediction:
|
|
578
588
|
:param parallel_tool_calls:
|
|
589
|
+
:param prompt_mode:
|
|
579
590
|
:param retries: Override the default retry configuration for this method
|
|
580
591
|
:param server_url: Override the default server URL for this method
|
|
581
592
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -613,6 +624,7 @@ class Agents(BaseSDK):
|
|
|
613
624
|
prediction, Optional[models.Prediction]
|
|
614
625
|
),
|
|
615
626
|
parallel_tool_calls=parallel_tool_calls,
|
|
627
|
+
prompt_mode=prompt_mode,
|
|
616
628
|
agent_id=agent_id,
|
|
617
629
|
)
|
|
618
630
|
|
mistralai/beta.py
CHANGED
|
@@ -8,7 +8,9 @@ from mistralai.mistral_agents import MistralAgents
|
|
|
8
8
|
|
|
9
9
|
class Beta(BaseSDK):
|
|
10
10
|
conversations: Conversations
|
|
11
|
+
r"""(beta) Conversations API"""
|
|
11
12
|
agents: MistralAgents
|
|
13
|
+
r"""(beta) Agents API"""
|
|
12
14
|
|
|
13
15
|
def __init__(self, sdk_config: SDKConfiguration) -> None:
|
|
14
16
|
BaseSDK.__init__(self, sdk_config)
|
mistralai/chat.py
CHANGED
|
@@ -123,6 +123,7 @@ class Chat(BaseSDK):
|
|
|
123
123
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
124
124
|
] = None,
|
|
125
125
|
parallel_tool_calls: Optional[bool] = None,
|
|
126
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
126
127
|
safe_prompt: Optional[bool] = None,
|
|
127
128
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
128
129
|
server_url: Optional[str] = None,
|
|
@@ -147,6 +148,7 @@ class Chat(BaseSDK):
|
|
|
147
148
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
148
149
|
:param prediction:
|
|
149
150
|
:param parallel_tool_calls:
|
|
151
|
+
:param prompt_mode:
|
|
150
152
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
151
153
|
:param retries: Override the default retry configuration for this method
|
|
152
154
|
:param server_url: Override the default server URL for this method
|
|
@@ -186,6 +188,7 @@ class Chat(BaseSDK):
|
|
|
186
188
|
prediction, Optional[models.Prediction]
|
|
187
189
|
),
|
|
188
190
|
parallel_tool_calls=parallel_tool_calls,
|
|
191
|
+
prompt_mode=prompt_mode,
|
|
189
192
|
safe_prompt=safe_prompt,
|
|
190
193
|
)
|
|
191
194
|
|
|
@@ -288,6 +291,7 @@ class Chat(BaseSDK):
|
|
|
288
291
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
289
292
|
] = None,
|
|
290
293
|
parallel_tool_calls: Optional[bool] = None,
|
|
294
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
291
295
|
safe_prompt: Optional[bool] = None,
|
|
292
296
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
293
297
|
server_url: Optional[str] = None,
|
|
@@ -312,6 +316,7 @@ class Chat(BaseSDK):
|
|
|
312
316
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
313
317
|
:param prediction:
|
|
314
318
|
:param parallel_tool_calls:
|
|
319
|
+
:param prompt_mode:
|
|
315
320
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
316
321
|
:param retries: Override the default retry configuration for this method
|
|
317
322
|
:param server_url: Override the default server URL for this method
|
|
@@ -351,6 +356,7 @@ class Chat(BaseSDK):
|
|
|
351
356
|
prediction, Optional[models.Prediction]
|
|
352
357
|
),
|
|
353
358
|
parallel_tool_calls=parallel_tool_calls,
|
|
359
|
+
prompt_mode=prompt_mode,
|
|
354
360
|
safe_prompt=safe_prompt,
|
|
355
361
|
)
|
|
356
362
|
|
|
@@ -461,6 +467,7 @@ class Chat(BaseSDK):
|
|
|
461
467
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
462
468
|
] = None,
|
|
463
469
|
parallel_tool_calls: Optional[bool] = None,
|
|
470
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
464
471
|
safe_prompt: Optional[bool] = None,
|
|
465
472
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
466
473
|
server_url: Optional[str] = None,
|
|
@@ -487,6 +494,7 @@ class Chat(BaseSDK):
|
|
|
487
494
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
488
495
|
:param prediction:
|
|
489
496
|
:param parallel_tool_calls:
|
|
497
|
+
:param prompt_mode:
|
|
490
498
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
491
499
|
:param retries: Override the default retry configuration for this method
|
|
492
500
|
:param server_url: Override the default server URL for this method
|
|
@@ -528,6 +536,7 @@ class Chat(BaseSDK):
|
|
|
528
536
|
prediction, Optional[models.Prediction]
|
|
529
537
|
),
|
|
530
538
|
parallel_tool_calls=parallel_tool_calls,
|
|
539
|
+
prompt_mode=prompt_mode,
|
|
531
540
|
safe_prompt=safe_prompt,
|
|
532
541
|
)
|
|
533
542
|
|
|
@@ -644,6 +653,7 @@ class Chat(BaseSDK):
|
|
|
644
653
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
645
654
|
] = None,
|
|
646
655
|
parallel_tool_calls: Optional[bool] = None,
|
|
656
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
647
657
|
safe_prompt: Optional[bool] = None,
|
|
648
658
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
649
659
|
server_url: Optional[str] = None,
|
|
@@ -670,6 +680,7 @@ class Chat(BaseSDK):
|
|
|
670
680
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
671
681
|
:param prediction:
|
|
672
682
|
:param parallel_tool_calls:
|
|
683
|
+
:param prompt_mode:
|
|
673
684
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
674
685
|
:param retries: Override the default retry configuration for this method
|
|
675
686
|
:param server_url: Override the default server URL for this method
|
|
@@ -711,6 +722,7 @@ class Chat(BaseSDK):
|
|
|
711
722
|
prediction, Optional[models.Prediction]
|
|
712
723
|
),
|
|
713
724
|
parallel_tool_calls=parallel_tool_calls,
|
|
725
|
+
prompt_mode=prompt_mode,
|
|
714
726
|
safe_prompt=safe_prompt,
|
|
715
727
|
)
|
|
716
728
|
|
mistralai/conversations.py
CHANGED
|
@@ -34,8 +34,9 @@ if typing.TYPE_CHECKING:
|
|
|
34
34
|
# endregion imports
|
|
35
35
|
|
|
36
36
|
|
|
37
|
-
|
|
38
37
|
class Conversations(BaseSDK):
|
|
38
|
+
r"""(beta) Conversations API"""
|
|
39
|
+
|
|
39
40
|
# region sdk-class-body
|
|
40
41
|
# Custom run code allowing client side execution of code
|
|
41
42
|
|
|
@@ -146,7 +147,9 @@ class Conversations(BaseSDK):
|
|
|
146
147
|
completion_args=completion_args,
|
|
147
148
|
)
|
|
148
149
|
|
|
149
|
-
async def run_generator() ->
|
|
150
|
+
async def run_generator() -> (
|
|
151
|
+
AsyncGenerator[Union[RunResultEvents, RunResult], None]
|
|
152
|
+
):
|
|
150
153
|
current_entries = input_entries
|
|
151
154
|
while True:
|
|
152
155
|
received_event_tracker: defaultdict[
|
mistralai/embeddings.py
CHANGED
|
@@ -18,6 +18,8 @@ class Embeddings(BaseSDK):
|
|
|
18
18
|
inputs: Union[
|
|
19
19
|
models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict
|
|
20
20
|
],
|
|
21
|
+
output_dimension: OptionalNullable[int] = UNSET,
|
|
22
|
+
output_dtype: Optional[models.EmbeddingDtype] = None,
|
|
21
23
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
22
24
|
server_url: Optional[str] = None,
|
|
23
25
|
timeout_ms: Optional[int] = None,
|
|
@@ -29,6 +31,8 @@ class Embeddings(BaseSDK):
|
|
|
29
31
|
|
|
30
32
|
:param model: ID of the model to use.
|
|
31
33
|
:param inputs: Text to embed.
|
|
34
|
+
:param output_dimension: The dimension of the output embeddings.
|
|
35
|
+
:param output_dtype:
|
|
32
36
|
:param retries: Override the default retry configuration for this method
|
|
33
37
|
:param server_url: Override the default server URL for this method
|
|
34
38
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -47,6 +51,8 @@ class Embeddings(BaseSDK):
|
|
|
47
51
|
request = models.EmbeddingRequest(
|
|
48
52
|
model=model,
|
|
49
53
|
inputs=inputs,
|
|
54
|
+
output_dimension=output_dimension,
|
|
55
|
+
output_dtype=output_dtype,
|
|
50
56
|
)
|
|
51
57
|
|
|
52
58
|
req = self._build_request(
|
|
@@ -125,6 +131,8 @@ class Embeddings(BaseSDK):
|
|
|
125
131
|
inputs: Union[
|
|
126
132
|
models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict
|
|
127
133
|
],
|
|
134
|
+
output_dimension: OptionalNullable[int] = UNSET,
|
|
135
|
+
output_dtype: Optional[models.EmbeddingDtype] = None,
|
|
128
136
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
129
137
|
server_url: Optional[str] = None,
|
|
130
138
|
timeout_ms: Optional[int] = None,
|
|
@@ -136,6 +144,8 @@ class Embeddings(BaseSDK):
|
|
|
136
144
|
|
|
137
145
|
:param model: ID of the model to use.
|
|
138
146
|
:param inputs: Text to embed.
|
|
147
|
+
:param output_dimension: The dimension of the output embeddings.
|
|
148
|
+
:param output_dtype:
|
|
139
149
|
:param retries: Override the default retry configuration for this method
|
|
140
150
|
:param server_url: Override the default server URL for this method
|
|
141
151
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -154,6 +164,8 @@ class Embeddings(BaseSDK):
|
|
|
154
164
|
request = models.EmbeddingRequest(
|
|
155
165
|
model=model,
|
|
156
166
|
inputs=inputs,
|
|
167
|
+
output_dimension=output_dimension,
|
|
168
|
+
output_dtype=output_dtype,
|
|
157
169
|
)
|
|
158
170
|
|
|
159
171
|
req = self._build_request_async(
|
mistralai/mistral_agents.py
CHANGED
mistralai/models/__init__.py
CHANGED
|
@@ -347,6 +347,7 @@ from .documenturlchunk import (
|
|
|
347
347
|
DocumentURLChunkType,
|
|
348
348
|
DocumentURLChunkTypedDict,
|
|
349
349
|
)
|
|
350
|
+
from .embeddingdtype import EmbeddingDtype
|
|
350
351
|
from .embeddingrequest import (
|
|
351
352
|
EmbeddingRequest,
|
|
352
353
|
EmbeddingRequestInputs,
|
|
@@ -590,6 +591,7 @@ from .messageoutputevent import (
|
|
|
590
591
|
MessageOutputEventTypedDict,
|
|
591
592
|
)
|
|
592
593
|
from .metricout import MetricOut, MetricOutTypedDict
|
|
594
|
+
from .mistralpromptmode import MistralPromptMode
|
|
593
595
|
from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict
|
|
594
596
|
from .modelconversation import (
|
|
595
597
|
ModelConversation,
|
|
@@ -964,6 +966,7 @@ __all__ = [
|
|
|
964
966
|
"DocumentURLChunk",
|
|
965
967
|
"DocumentURLChunkType",
|
|
966
968
|
"DocumentURLChunkTypedDict",
|
|
969
|
+
"EmbeddingDtype",
|
|
967
970
|
"EmbeddingRequest",
|
|
968
971
|
"EmbeddingRequestInputs",
|
|
969
972
|
"EmbeddingRequestInputsTypedDict",
|
|
@@ -1152,6 +1155,7 @@ __all__ = [
|
|
|
1152
1155
|
"MessagesTypedDict",
|
|
1153
1156
|
"MetricOut",
|
|
1154
1157
|
"MetricOutTypedDict",
|
|
1158
|
+
"MistralPromptMode",
|
|
1155
1159
|
"ModelCapabilities",
|
|
1156
1160
|
"ModelCapabilitiesTypedDict",
|
|
1157
1161
|
"ModelConversation",
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
+
from .mistralpromptmode import MistralPromptMode
|
|
5
6
|
from .prediction import Prediction, PredictionTypedDict
|
|
6
7
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
7
8
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
11
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
12
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
13
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
14
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator, validate_open_enum
|
|
15
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
+
from pydantic.functional_validators import PlainValidator
|
|
16
18
|
from typing import List, Optional, Union
|
|
17
19
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
18
20
|
|
|
@@ -86,6 +88,7 @@ class AgentsCompletionRequestTypedDict(TypedDict):
|
|
|
86
88
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
87
89
|
prediction: NotRequired[PredictionTypedDict]
|
|
88
90
|
parallel_tool_calls: NotRequired[bool]
|
|
91
|
+
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
89
92
|
|
|
90
93
|
|
|
91
94
|
class AgentsCompletionRequest(BaseModel):
|
|
@@ -126,6 +129,10 @@ class AgentsCompletionRequest(BaseModel):
|
|
|
126
129
|
|
|
127
130
|
parallel_tool_calls: Optional[bool] = None
|
|
128
131
|
|
|
132
|
+
prompt_mode: Annotated[
|
|
133
|
+
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
134
|
+
] = UNSET
|
|
135
|
+
|
|
129
136
|
@model_serializer(mode="wrap")
|
|
130
137
|
def serialize_model(self, handler):
|
|
131
138
|
optional_fields = [
|
|
@@ -141,8 +148,9 @@ class AgentsCompletionRequest(BaseModel):
|
|
|
141
148
|
"n",
|
|
142
149
|
"prediction",
|
|
143
150
|
"parallel_tool_calls",
|
|
151
|
+
"prompt_mode",
|
|
144
152
|
]
|
|
145
|
-
nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
|
|
153
|
+
nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"]
|
|
146
154
|
null_default_fields = []
|
|
147
155
|
|
|
148
156
|
serialized = handler(self)
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
+
from .mistralpromptmode import MistralPromptMode
|
|
5
6
|
from .prediction import Prediction, PredictionTypedDict
|
|
6
7
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
7
8
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
11
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
12
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
13
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
14
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator, validate_open_enum
|
|
15
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
+
from pydantic.functional_validators import PlainValidator
|
|
16
18
|
from typing import List, Optional, Union
|
|
17
19
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
18
20
|
|
|
@@ -85,6 +87,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
|
|
|
85
87
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
86
88
|
prediction: NotRequired[PredictionTypedDict]
|
|
87
89
|
parallel_tool_calls: NotRequired[bool]
|
|
90
|
+
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
88
91
|
|
|
89
92
|
|
|
90
93
|
class AgentsCompletionStreamRequest(BaseModel):
|
|
@@ -124,6 +127,10 @@ class AgentsCompletionStreamRequest(BaseModel):
|
|
|
124
127
|
|
|
125
128
|
parallel_tool_calls: Optional[bool] = None
|
|
126
129
|
|
|
130
|
+
prompt_mode: Annotated[
|
|
131
|
+
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
132
|
+
] = UNSET
|
|
133
|
+
|
|
127
134
|
@model_serializer(mode="wrap")
|
|
128
135
|
def serialize_model(self, handler):
|
|
129
136
|
optional_fields = [
|
|
@@ -139,8 +146,9 @@ class AgentsCompletionStreamRequest(BaseModel):
|
|
|
139
146
|
"n",
|
|
140
147
|
"prediction",
|
|
141
148
|
"parallel_tool_calls",
|
|
149
|
+
"prompt_mode",
|
|
142
150
|
]
|
|
143
|
-
nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
|
|
151
|
+
nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"]
|
|
144
152
|
null_default_fields = []
|
|
145
153
|
|
|
146
154
|
serialized = handler(self)
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
+
from .mistralpromptmode import MistralPromptMode
|
|
5
6
|
from .prediction import Prediction, PredictionTypedDict
|
|
6
7
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
7
8
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
11
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
12
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
13
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
14
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator, validate_open_enum
|
|
15
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
+
from pydantic.functional_validators import PlainValidator
|
|
16
18
|
from typing import List, Optional, Union
|
|
17
19
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
18
20
|
|
|
@@ -86,6 +88,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
86
88
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
87
89
|
prediction: NotRequired[PredictionTypedDict]
|
|
88
90
|
parallel_tool_calls: NotRequired[bool]
|
|
91
|
+
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
89
92
|
safe_prompt: NotRequired[bool]
|
|
90
93
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
91
94
|
|
|
@@ -134,6 +137,10 @@ class ChatCompletionRequest(BaseModel):
|
|
|
134
137
|
|
|
135
138
|
parallel_tool_calls: Optional[bool] = None
|
|
136
139
|
|
|
140
|
+
prompt_mode: Annotated[
|
|
141
|
+
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
142
|
+
] = UNSET
|
|
143
|
+
|
|
137
144
|
safe_prompt: Optional[bool] = None
|
|
138
145
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
139
146
|
|
|
@@ -154,9 +161,17 @@ class ChatCompletionRequest(BaseModel):
|
|
|
154
161
|
"n",
|
|
155
162
|
"prediction",
|
|
156
163
|
"parallel_tool_calls",
|
|
164
|
+
"prompt_mode",
|
|
157
165
|
"safe_prompt",
|
|
158
166
|
]
|
|
159
|
-
nullable_fields = [
|
|
167
|
+
nullable_fields = [
|
|
168
|
+
"temperature",
|
|
169
|
+
"max_tokens",
|
|
170
|
+
"random_seed",
|
|
171
|
+
"tools",
|
|
172
|
+
"n",
|
|
173
|
+
"prompt_mode",
|
|
174
|
+
]
|
|
160
175
|
null_default_fields = []
|
|
161
176
|
|
|
162
177
|
serialized = handler(self)
|
|
@@ -4,8 +4,8 @@ from __future__ import annotations
|
|
|
4
4
|
from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
|
|
5
5
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
6
6
|
from mistralai.types import BaseModel
|
|
7
|
-
from typing import List
|
|
8
|
-
from typing_extensions import
|
|
7
|
+
from typing import List
|
|
8
|
+
from typing_extensions import TypedDict
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class ChatCompletionResponseTypedDict(TypedDict):
|
|
@@ -13,8 +13,8 @@ class ChatCompletionResponseTypedDict(TypedDict):
|
|
|
13
13
|
object: str
|
|
14
14
|
model: str
|
|
15
15
|
usage: UsageInfoTypedDict
|
|
16
|
-
created:
|
|
17
|
-
choices:
|
|
16
|
+
created: int
|
|
17
|
+
choices: List[ChatCompletionChoiceTypedDict]
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
class ChatCompletionResponse(BaseModel):
|
|
@@ -26,6 +26,6 @@ class ChatCompletionResponse(BaseModel):
|
|
|
26
26
|
|
|
27
27
|
usage: UsageInfo
|
|
28
28
|
|
|
29
|
-
created:
|
|
29
|
+
created: int
|
|
30
30
|
|
|
31
|
-
choices:
|
|
31
|
+
choices: List[ChatCompletionChoice]
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
+
from .mistralpromptmode import MistralPromptMode
|
|
5
6
|
from .prediction import Prediction, PredictionTypedDict
|
|
6
7
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
7
8
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
@@ -11,8 +12,9 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
11
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
12
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
13
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
14
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator, validate_open_enum
|
|
15
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
+
from pydantic.functional_validators import PlainValidator
|
|
16
18
|
from typing import List, Optional, Union
|
|
17
19
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
18
20
|
|
|
@@ -89,6 +91,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
89
91
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
90
92
|
prediction: NotRequired[PredictionTypedDict]
|
|
91
93
|
parallel_tool_calls: NotRequired[bool]
|
|
94
|
+
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
92
95
|
safe_prompt: NotRequired[bool]
|
|
93
96
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
94
97
|
|
|
@@ -136,6 +139,10 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
136
139
|
|
|
137
140
|
parallel_tool_calls: Optional[bool] = None
|
|
138
141
|
|
|
142
|
+
prompt_mode: Annotated[
|
|
143
|
+
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
144
|
+
] = UNSET
|
|
145
|
+
|
|
139
146
|
safe_prompt: Optional[bool] = None
|
|
140
147
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
141
148
|
|
|
@@ -156,9 +163,17 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
156
163
|
"n",
|
|
157
164
|
"prediction",
|
|
158
165
|
"parallel_tool_calls",
|
|
166
|
+
"prompt_mode",
|
|
159
167
|
"safe_prompt",
|
|
160
168
|
]
|
|
161
|
-
nullable_fields = [
|
|
169
|
+
nullable_fields = [
|
|
170
|
+
"temperature",
|
|
171
|
+
"max_tokens",
|
|
172
|
+
"random_seed",
|
|
173
|
+
"tools",
|
|
174
|
+
"n",
|
|
175
|
+
"prompt_mode",
|
|
176
|
+
]
|
|
162
177
|
null_default_fields = []
|
|
163
178
|
|
|
164
179
|
serialized = handler(self)
|
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from
|
|
4
|
+
from .embeddingdtype import EmbeddingDtype
|
|
5
|
+
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
5
6
|
import pydantic
|
|
6
|
-
from
|
|
7
|
-
from
|
|
7
|
+
from pydantic import model_serializer
|
|
8
|
+
from typing import List, Optional, Union
|
|
9
|
+
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
EmbeddingRequestInputsTypedDict = TypeAliasType(
|
|
@@ -22,6 +24,9 @@ class EmbeddingRequestTypedDict(TypedDict):
|
|
|
22
24
|
r"""ID of the model to use."""
|
|
23
25
|
inputs: EmbeddingRequestInputsTypedDict
|
|
24
26
|
r"""Text to embed."""
|
|
27
|
+
output_dimension: NotRequired[Nullable[int]]
|
|
28
|
+
r"""The dimension of the output embeddings."""
|
|
29
|
+
output_dtype: NotRequired[EmbeddingDtype]
|
|
25
30
|
|
|
26
31
|
|
|
27
32
|
class EmbeddingRequest(BaseModel):
|
|
@@ -30,3 +35,38 @@ class EmbeddingRequest(BaseModel):
|
|
|
30
35
|
|
|
31
36
|
inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")]
|
|
32
37
|
r"""Text to embed."""
|
|
38
|
+
|
|
39
|
+
output_dimension: OptionalNullable[int] = UNSET
|
|
40
|
+
r"""The dimension of the output embeddings."""
|
|
41
|
+
|
|
42
|
+
output_dtype: Optional[EmbeddingDtype] = None
|
|
43
|
+
|
|
44
|
+
@model_serializer(mode="wrap")
|
|
45
|
+
def serialize_model(self, handler):
|
|
46
|
+
optional_fields = ["output_dimension", "output_dtype"]
|
|
47
|
+
nullable_fields = ["output_dimension"]
|
|
48
|
+
null_default_fields = []
|
|
49
|
+
|
|
50
|
+
serialized = handler(self)
|
|
51
|
+
|
|
52
|
+
m = {}
|
|
53
|
+
|
|
54
|
+
for n, f in self.model_fields.items():
|
|
55
|
+
k = f.alias or n
|
|
56
|
+
val = serialized.get(k)
|
|
57
|
+
serialized.pop(k, None)
|
|
58
|
+
|
|
59
|
+
optional_nullable = k in optional_fields and k in nullable_fields
|
|
60
|
+
is_set = (
|
|
61
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
62
|
+
or k in null_default_fields
|
|
63
|
+
) # pylint: disable=no-member
|
|
64
|
+
|
|
65
|
+
if val is not None and val != UNSET_SENTINEL:
|
|
66
|
+
m[k] = val
|
|
67
|
+
elif val != UNSET_SENTINEL and (
|
|
68
|
+
not k in optional_fields or (optional_nullable and is_set)
|
|
69
|
+
):
|
|
70
|
+
m[k] = val
|
|
71
|
+
|
|
72
|
+
return m
|
|
@@ -4,8 +4,8 @@ from __future__ import annotations
|
|
|
4
4
|
from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
|
|
5
5
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
6
6
|
from mistralai.types import BaseModel
|
|
7
|
-
from typing import List
|
|
8
|
-
from typing_extensions import
|
|
7
|
+
from typing import List
|
|
8
|
+
from typing_extensions import TypedDict
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class FIMCompletionResponseTypedDict(TypedDict):
|
|
@@ -13,8 +13,8 @@ class FIMCompletionResponseTypedDict(TypedDict):
|
|
|
13
13
|
object: str
|
|
14
14
|
model: str
|
|
15
15
|
usage: UsageInfoTypedDict
|
|
16
|
-
created:
|
|
17
|
-
choices:
|
|
16
|
+
created: int
|
|
17
|
+
choices: List[ChatCompletionChoiceTypedDict]
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
class FIMCompletionResponse(BaseModel):
|
|
@@ -26,6 +26,6 @@ class FIMCompletionResponse(BaseModel):
|
|
|
26
26
|
|
|
27
27
|
usage: UsageInfo
|
|
28
28
|
|
|
29
|
-
created:
|
|
29
|
+
created: int
|
|
30
30
|
|
|
31
|
-
choices:
|
|
31
|
+
choices: List[ChatCompletionChoice]
|
|
@@ -139,17 +139,17 @@ mistralai/_hooks/deprecation_warning.py,sha256=eyEOf7-o9uqqNWJnufD2RXp3dYrGV4in9
|
|
|
139
139
|
mistralai/_hooks/registration.py,sha256=ML0W-XbE4WYdJ4eGks_XxF2aLCJTaIWjQATFGzFwvyU,861
|
|
140
140
|
mistralai/_hooks/sdkhooks.py,sha256=s-orhdvnV89TmI3QiPC2LWQtYeM9RrsG1CTll-fYZmQ,2559
|
|
141
141
|
mistralai/_hooks/types.py,sha256=z3AUFDpRJHj2m3h5PklvUeEcGohY0cfph4jL6-nGIzs,2812
|
|
142
|
-
mistralai/_version.py,sha256=
|
|
143
|
-
mistralai/agents.py,sha256
|
|
142
|
+
mistralai/_version.py,sha256=dj7Yct7sMSL3ozDUgcL3O2a3h43gPQWDexXedEqencU,460
|
|
143
|
+
mistralai/agents.py,sha256=-_Ur2oC2007qM8kXcw1-1gTI5hYjwhPckets08DFi6s,33204
|
|
144
144
|
mistralai/async_client.py,sha256=KUdYxIIqoD6L7vB0EGwUR6lQ0NK5iCTHjnLVR9CVcJY,355
|
|
145
145
|
mistralai/basesdk.py,sha256=GsU5bp8O5fBCl34tKxaYmeYSIIM971eAPeFBBC_BpFo,12191
|
|
146
146
|
mistralai/batch.py,sha256=YN4D0Duwrap9Ysmp_lRpADYp1Znay7THE_z8ERGvDds,501
|
|
147
|
-
mistralai/beta.py,sha256=
|
|
148
|
-
mistralai/chat.py,sha256=
|
|
147
|
+
mistralai/beta.py,sha256=zcRqEzVxBsog2vWbX4X7ZraozAvhNyCo1_oXxLbBuAo,727
|
|
148
|
+
mistralai/chat.py,sha256=D3slKAa_1BlV6G8nHvva--Hfj_zPkmBMLjfECO0mPDU,41101
|
|
149
149
|
mistralai/classifiers.py,sha256=Cbrb6X_eq3-Yz5ZhWkOsFDTGbo3hkgh-vtIEQmU_UdI,33776
|
|
150
150
|
mistralai/client.py,sha256=hrPg-LciKMKiascF0WbRRmqQyCv1lb2yDh6j-aaKVNo,509
|
|
151
|
-
mistralai/conversations.py,sha256=
|
|
152
|
-
mistralai/embeddings.py,sha256=
|
|
151
|
+
mistralai/conversations.py,sha256=mLTN8RfsIaykeJYpTOJXHzRNYefPBMgvD4mLceZuN5c,109316
|
|
152
|
+
mistralai/embeddings.py,sha256=SHG4s0yKwCdKhYKGXKdzPXmcD4_WXI9BBnrM7cSI3gw,9258
|
|
153
153
|
mistralai/extra/README.md,sha256=BTS9fy0ijkiUP7ZVoFQ7FVBxHtXIXqucYZyy_ucFjo4,1739
|
|
154
154
|
mistralai/extra/__init__.py,sha256=8DsU_omYYadqcwlmBOoakBwkWKcSohwLmtB8v-jkn2M,392
|
|
155
155
|
mistralai/extra/exceptions.py,sha256=4EEygCfdsniYiroHEFVSVDqerQZkpRG027mlJXvMqns,428
|
|
@@ -175,9 +175,9 @@ mistralai/fim.py,sha256=UMx-bFYbaSyANZug8VrCabHsqePdiHoYQO1YMKB2MvY,27935
|
|
|
175
175
|
mistralai/fine_tuning.py,sha256=UENQqfE054VEsAYxdruV-TBLFIFfO-joXNznH08GUvE,477
|
|
176
176
|
mistralai/httpclient.py,sha256=lC-YQ7q4yiJGKElxBeb3aZnr-4aYxjgEpZ6roeXYlyg,4318
|
|
177
177
|
mistralai/jobs.py,sha256=1DZE14ad348Vg82VHhLRyXhu7SIh8_KgWXc_jP2oFIA,46767
|
|
178
|
-
mistralai/mistral_agents.py,sha256=
|
|
178
|
+
mistralai/mistral_agents.py,sha256=FrIUGExYpwz3EyMOetLKsD-LtcsoB1xrWR86zp1WwzE,45573
|
|
179
179
|
mistralai/mistral_jobs.py,sha256=EQHFFxFkkx6XvPX-9S8TRZvVSOLUL7z91cg56J8pskQ,31114
|
|
180
|
-
mistralai/models/__init__.py,sha256=
|
|
180
|
+
mistralai/models/__init__.py,sha256=QNG_8i4IMS1nXu1zRNBCs3jDo80ct4XBaPwJgRkqPrM,43469
|
|
181
181
|
mistralai/models/agent.py,sha256=dx_uXYDNWoENtgK4InGqs1A9eCw1xDphMPCF_Pv43VQ,4185
|
|
182
182
|
mistralai/models/agentconversation.py,sha256=WaPQn3pGiD2c6bNaP7Ya1akVQt54aK7qDtAEnHAjwrk,2086
|
|
183
183
|
mistralai/models/agentcreationrequest.py,sha256=u9WM95ywRmDqnwx0yfEOT1FnJbiPY8lNtju71d3ix4A,3952
|
|
@@ -196,8 +196,8 @@ mistralai/models/agents_api_v1_conversations_listop.py,sha256=N7bwRL4VtP3-a9Q-15
|
|
|
196
196
|
mistralai/models/agents_api_v1_conversations_messagesop.py,sha256=4V1voPx6mGB4Go5uAsYbkOKgHR0HSUYfadDUoFS0sj0,532
|
|
197
197
|
mistralai/models/agents_api_v1_conversations_restart_streamop.py,sha256=7QZ8fEnDPcIRiTqgg68siLZCSPO13_r2-qdQ7bVsxwY,957
|
|
198
198
|
mistralai/models/agents_api_v1_conversations_restartop.py,sha256=QNA8A3ewD3AwxtayUvrCQpqUfqr13-e7-Y9UgrSbp-A,901
|
|
199
|
-
mistralai/models/agentscompletionrequest.py,sha256=
|
|
200
|
-
mistralai/models/agentscompletionstreamrequest.py,sha256=
|
|
199
|
+
mistralai/models/agentscompletionrequest.py,sha256=qBBMmyA-Jcmse1zdCtOVARpg1r0weoifJ4tjaMcSwG4,8277
|
|
200
|
+
mistralai/models/agentscompletionstreamrequest.py,sha256=E4xi4062TUk_vE2PW1R_UPPfKfitHWPHAO6vYSU0vg4,7722
|
|
201
201
|
mistralai/models/agentupdaterequest.py,sha256=w3K74yFRqilUDc64bS6ulmnvgY8WMJ0evSbzwu3ukLc,4094
|
|
202
202
|
mistralai/models/apiendpoint.py,sha256=Hvar5leWsJR_FYb0UzRlSw3vRdBZhk_6BR5r2pIb214,400
|
|
203
203
|
mistralai/models/archiveftmodelout.py,sha256=VdppiqIB9JGNB2B0-Y6XQfQgDmB-hOa1Bta3v_StbLs,565
|
|
@@ -211,9 +211,9 @@ mistralai/models/batchjobstatus.py,sha256=WlrIl5vWQGfLmgQA91_9CnCMKhWN6Lli458fT-
|
|
|
211
211
|
mistralai/models/builtinconnectors.py,sha256=cX1M7Q_2tsWeuH-lKWomXED7xN7Du6BJKvYpep1vD30,284
|
|
212
212
|
mistralai/models/chatclassificationrequest.py,sha256=PmU036oOlGqfd75hNESDUJiN4uJNYguACoCt6CzBC2M,534
|
|
213
213
|
mistralai/models/chatcompletionchoice.py,sha256=6iIFLZj2KYx0HFfzS3-E3sNXG6mPEAlDyXxIA5iZI_U,849
|
|
214
|
-
mistralai/models/chatcompletionrequest.py,sha256=
|
|
215
|
-
mistralai/models/chatcompletionresponse.py,sha256=
|
|
216
|
-
mistralai/models/chatcompletionstreamrequest.py,sha256=
|
|
214
|
+
mistralai/models/chatcompletionrequest.py,sha256=NgQ_15kcNqLP87TT6nT6_wM_LdKgmewM6VHLnqn0-D4,10217
|
|
215
|
+
mistralai/models/chatcompletionresponse.py,sha256=px0hjCapAtTP50u36hiQdPcC9X6LU81Nq5aJ3AlofjM,709
|
|
216
|
+
mistralai/models/chatcompletionstreamrequest.py,sha256=TKjp0xk9ReNMnT_KVeZ62c_f7n5VRTSeqb0JyY-r5f8,9905
|
|
217
217
|
mistralai/models/chatmoderationrequest.py,sha256=x1eAoxx_GhaxqGRe4wsqNaUi59K39HQakkedLJVUVD8,2236
|
|
218
218
|
mistralai/models/checkpointout.py,sha256=A2kXS8-VT_1lbg3brifVjZD6tXdsET8vLqBm2a-yXgA,1109
|
|
219
219
|
mistralai/models/classificationrequest.py,sha256=FqQfSrGYwLUjVw78Ft7tbmhAkUN0FqolCn4MNArOuR8,922
|
|
@@ -256,7 +256,8 @@ mistralai/models/deletemodelout.py,sha256=W_crO0WtksoKUgq5s9Yh8zS8RxSuyKYQCBt1i8
|
|
|
256
256
|
mistralai/models/deltamessage.py,sha256=7NtvEjdmBOl86rwOx7x2fcCCJSzIF8K6-eu-G9Wr9PI,1939
|
|
257
257
|
mistralai/models/documentlibrarytool.py,sha256=EN50sX8wgfAw7mF6W8BkOwKyqRvEzGvHgINn-evuCcg,654
|
|
258
258
|
mistralai/models/documenturlchunk.py,sha256=j3JB_Cy1eIRY7fTJe8AvQrdrLEA6xsJcM1l9_a1Sh68,1704
|
|
259
|
-
mistralai/models/
|
|
259
|
+
mistralai/models/embeddingdtype.py,sha256=c7L-PKhBgPVPZeMGuMub0ZOs0MdxMbpW2ebE0t7oEpU,209
|
|
260
|
+
mistralai/models/embeddingrequest.py,sha256=BE3L0ImjkdmzwOSUQnC_vCsnvyeQqWmqRw4xgzX6vJc,2258
|
|
260
261
|
mistralai/models/embeddingresponse.py,sha256=te6E_LYEzRjHJ9QREmsFp5PeNP2J_8ALVjyb1T20pNA,663
|
|
261
262
|
mistralai/models/embeddingresponsedata.py,sha256=fJ3mrZqyBBBE40a6iegOJX3DVDfgyMRq23ByeGSTLFk,534
|
|
262
263
|
mistralai/models/eventout.py,sha256=TouRJeISBLphMTPHfgSOpuoOmbGDVohPOrdgHyExMpw,1633
|
|
@@ -270,7 +271,7 @@ mistralai/models/files_api_routes_upload_fileop.py,sha256=gIGH5xcPryWYkj1FmNv_0-
|
|
|
270
271
|
mistralai/models/fileschema.py,sha256=n_IjCdNOrC2fuzkv75wJn01XvqGTmPK3JqAFSHaOiMA,2597
|
|
271
272
|
mistralai/models/filesignedurl.py,sha256=VwvuhzhJulAB99Qxz6zr-2F1aINosAfaSxU0IhytDSU,289
|
|
272
273
|
mistralai/models/fimcompletionrequest.py,sha256=wWDCkQ_PMnjB8DrIuIvVJlPGqQtTpVDHt4p7xJ204Ug,6565
|
|
273
|
-
mistralai/models/fimcompletionresponse.py,sha256=
|
|
274
|
+
mistralai/models/fimcompletionresponse.py,sha256=qNgb2WFVgkaW7Isfkk2Aol1gTV9UkhQomcDAhoPDeYw,707
|
|
274
275
|
mistralai/models/fimcompletionstreamrequest.py,sha256=fxuR8FDOWMwIqlYU9ttAfGeRdVgTz4l2k26_OEfxelg,5944
|
|
275
276
|
mistralai/models/finetuneablemodeltype.py,sha256=XmTpXeQU8AINnn1kVmXldFUauCaEnRtJNFAXUTVb6RQ,197
|
|
276
277
|
mistralai/models/ftclassifierlossfunction.py,sha256=ApQB8ssAh2yE26-CljxPO7Jc5lxq3OoBPR4rUp-Td9U,203
|
|
@@ -317,6 +318,7 @@ mistralai/models/messageoutputcontentchunks.py,sha256=LRvAb-Hn0XSKBBRrBdwW7CtMC_
|
|
|
317
318
|
mistralai/models/messageoutputentry.py,sha256=Ow-V0HXFMEowBed_T09281m_ysK4Q8jvWYqAHYxVGqI,2936
|
|
318
319
|
mistralai/models/messageoutputevent.py,sha256=64MdFYY92WOWofmRm6amp2yO6W7q-dv3uKZ1npr8ONo,2709
|
|
319
320
|
mistralai/models/metricout.py,sha256=dXQMMU4Nk6-Zr06Jx1TWilFi6cOwiVLjSanCFn0cPxo,2034
|
|
321
|
+
mistralai/models/mistralpromptmode.py,sha256=v0UKuu6N0kcM_gjy3C7pVUWBs9tuMKtbHG6nLF9jtoI,253
|
|
320
322
|
mistralai/models/modelcapabilities.py,sha256=No-Dl09zT1sG4MxsWnx4s8Yo1tUeMQ7k-HR_iQFIMFc,703
|
|
321
323
|
mistralai/models/modelconversation.py,sha256=j8bO1HyDRHp4Dp_Bo68jA11z90CIhdt3L8DjfCscuDo,4437
|
|
322
324
|
mistralai/models/modellist.py,sha256=D4Y784kQkx0ARhofFrpEqGLfxa-jTY8ev0TQMrD_n8I,995
|
|
@@ -390,7 +392,7 @@ mistralai/utils/serializers.py,sha256=EGH40Pgp3sSK9uM4PxL7_SYzSHtmo-Uy6QIE5xLVg6
|
|
|
390
392
|
mistralai/utils/url.py,sha256=BgGPgcTA6MRK4bF8fjP2dUopN3NzEzxWMXPBVg8NQUA,5254
|
|
391
393
|
mistralai/utils/values.py,sha256=CcaCXEa3xHhkUDROyXZocN8f0bdITftv9Y0P9lTf0YM,3517
|
|
392
394
|
mistralai/version.py,sha256=iosXhlXclBwBqlADFKEilxAC2wWKbtuBKi87AmPi7s8,196
|
|
393
|
-
mistralai-1.8.
|
|
394
|
-
mistralai-1.8.
|
|
395
|
-
mistralai-1.8.
|
|
396
|
-
mistralai-1.8.
|
|
395
|
+
mistralai-1.8.2.dist-info/LICENSE,sha256=rUtQ_9GD0OyLPlb-2uWVdfE87hzudMRmsW-tS-0DK-0,11340
|
|
396
|
+
mistralai-1.8.2.dist-info/METADATA,sha256=JmRYmUZoWAa0p5iWdzL3E52JX3-WT9l-E6j2cXkmycA,33379
|
|
397
|
+
mistralai-1.8.2.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
398
|
+
mistralai-1.8.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|