mistralai 1.5.2rc1__py3-none-any.whl → 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_version.py +2 -2
- mistralai/agents.py +12 -0
- mistralai/chat.py +12 -0
- mistralai/classifiers.py +4 -4
- mistralai/models/agentscompletionrequest.py +4 -0
- mistralai/models/agentscompletionstreamrequest.py +4 -0
- mistralai/models/chatcompletionrequest.py +4 -0
- mistralai/models/chatcompletionstreamrequest.py +4 -0
- mistralai/models/function.py +2 -2
- mistralai/models/jsonschema.py +1 -1
- {mistralai-1.5.2rc1.dist-info → mistralai-1.6.0.dist-info}/METADATA +2 -2
- {mistralai-1.5.2rc1.dist-info → mistralai-1.6.0.dist-info}/RECORD +34 -30
- mistralai_azure/_version.py +2 -2
- mistralai_azure/chat.py +12 -0
- mistralai_azure/models/__init__.py +15 -0
- mistralai_azure/models/chatcompletionrequest.py +4 -0
- mistralai_azure/models/chatcompletionstreamrequest.py +4 -0
- mistralai_azure/models/contentchunk.py +6 -2
- mistralai_azure/models/function.py +2 -2
- mistralai_azure/models/imageurl.py +53 -0
- mistralai_azure/models/imageurlchunk.py +33 -0
- mistralai_azure/models/jsonschema.py +1 -1
- mistralai_gcp/_version.py +2 -2
- mistralai_gcp/chat.py +12 -0
- mistralai_gcp/models/__init__.py +15 -0
- mistralai_gcp/models/chatcompletionrequest.py +4 -0
- mistralai_gcp/models/chatcompletionstreamrequest.py +4 -0
- mistralai_gcp/models/contentchunk.py +6 -2
- mistralai_gcp/models/function.py +2 -2
- mistralai_gcp/models/imageurl.py +53 -0
- mistralai_gcp/models/imageurlchunk.py +33 -0
- mistralai_gcp/models/jsonschema.py +1 -1
- {mistralai-1.5.2rc1.dist-info → mistralai-1.6.0.dist-info}/LICENSE +0 -0
- {mistralai-1.5.2rc1.dist-info → mistralai-1.6.0.dist-info}/WHEEL +0 -0
mistralai/_version.py
CHANGED
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
import importlib.metadata
|
|
4
4
|
|
|
5
5
|
__title__: str = "mistralai"
|
|
6
|
-
__version__: str = "1.
|
|
6
|
+
__version__: str = "1.6.0"
|
|
7
7
|
__openapi_doc_version__: str = "0.0.2"
|
|
8
8
|
__gen_version__: str = "2.548.6"
|
|
9
|
-
__user_agent__: str = "speakeasy-sdk/python 1.
|
|
9
|
+
__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai"
|
|
10
10
|
|
|
11
11
|
try:
|
|
12
12
|
if __package__ is not None:
|
mistralai/agents.py
CHANGED
|
@@ -46,6 +46,7 @@ class Agents(BaseSDK):
|
|
|
46
46
|
prediction: Optional[
|
|
47
47
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
48
48
|
] = None,
|
|
49
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
49
50
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
50
51
|
server_url: Optional[str] = None,
|
|
51
52
|
timeout_ms: Optional[int] = None,
|
|
@@ -66,6 +67,7 @@ class Agents(BaseSDK):
|
|
|
66
67
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
67
68
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
68
69
|
:param prediction:
|
|
70
|
+
:param parallel_tool_calls:
|
|
69
71
|
:param retries: Override the default retry configuration for this method
|
|
70
72
|
:param server_url: Override the default server URL for this method
|
|
71
73
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -102,6 +104,7 @@ class Agents(BaseSDK):
|
|
|
102
104
|
prediction=utils.get_pydantic_model(
|
|
103
105
|
prediction, Optional[models.Prediction]
|
|
104
106
|
),
|
|
107
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
105
108
|
agent_id=agent_id,
|
|
106
109
|
)
|
|
107
110
|
|
|
@@ -209,6 +212,7 @@ class Agents(BaseSDK):
|
|
|
209
212
|
prediction: Optional[
|
|
210
213
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
211
214
|
] = None,
|
|
215
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
212
216
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
213
217
|
server_url: Optional[str] = None,
|
|
214
218
|
timeout_ms: Optional[int] = None,
|
|
@@ -229,6 +233,7 @@ class Agents(BaseSDK):
|
|
|
229
233
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
230
234
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
231
235
|
:param prediction:
|
|
236
|
+
:param parallel_tool_calls:
|
|
232
237
|
:param retries: Override the default retry configuration for this method
|
|
233
238
|
:param server_url: Override the default server URL for this method
|
|
234
239
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -265,6 +270,7 @@ class Agents(BaseSDK):
|
|
|
265
270
|
prediction=utils.get_pydantic_model(
|
|
266
271
|
prediction, Optional[models.Prediction]
|
|
267
272
|
),
|
|
273
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
268
274
|
agent_id=agent_id,
|
|
269
275
|
)
|
|
270
276
|
|
|
@@ -372,6 +378,7 @@ class Agents(BaseSDK):
|
|
|
372
378
|
prediction: Optional[
|
|
373
379
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
374
380
|
] = None,
|
|
381
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
375
382
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
376
383
|
server_url: Optional[str] = None,
|
|
377
384
|
timeout_ms: Optional[int] = None,
|
|
@@ -394,6 +401,7 @@ class Agents(BaseSDK):
|
|
|
394
401
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
395
402
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
396
403
|
:param prediction:
|
|
404
|
+
:param parallel_tool_calls:
|
|
397
405
|
:param retries: Override the default retry configuration for this method
|
|
398
406
|
:param server_url: Override the default server URL for this method
|
|
399
407
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -430,6 +438,7 @@ class Agents(BaseSDK):
|
|
|
430
438
|
prediction=utils.get_pydantic_model(
|
|
431
439
|
prediction, Optional[models.Prediction]
|
|
432
440
|
),
|
|
441
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
433
442
|
agent_id=agent_id,
|
|
434
443
|
)
|
|
435
444
|
|
|
@@ -543,6 +552,7 @@ class Agents(BaseSDK):
|
|
|
543
552
|
prediction: Optional[
|
|
544
553
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
545
554
|
] = None,
|
|
555
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
546
556
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
547
557
|
server_url: Optional[str] = None,
|
|
548
558
|
timeout_ms: Optional[int] = None,
|
|
@@ -565,6 +575,7 @@ class Agents(BaseSDK):
|
|
|
565
575
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
566
576
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
567
577
|
:param prediction:
|
|
578
|
+
:param parallel_tool_calls:
|
|
568
579
|
:param retries: Override the default retry configuration for this method
|
|
569
580
|
:param server_url: Override the default server URL for this method
|
|
570
581
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -601,6 +612,7 @@ class Agents(BaseSDK):
|
|
|
601
612
|
prediction=utils.get_pydantic_model(
|
|
602
613
|
prediction, Optional[models.Prediction]
|
|
603
614
|
),
|
|
615
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
604
616
|
agent_id=agent_id,
|
|
605
617
|
)
|
|
606
618
|
|
mistralai/chat.py
CHANGED
|
@@ -122,6 +122,7 @@ class Chat(BaseSDK):
|
|
|
122
122
|
prediction: Optional[
|
|
123
123
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
124
124
|
] = None,
|
|
125
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
125
126
|
safe_prompt: Optional[bool] = None,
|
|
126
127
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
127
128
|
server_url: Optional[str] = None,
|
|
@@ -145,6 +146,7 @@ class Chat(BaseSDK):
|
|
|
145
146
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
146
147
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
147
148
|
:param prediction:
|
|
149
|
+
:param parallel_tool_calls:
|
|
148
150
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
149
151
|
:param retries: Override the default retry configuration for this method
|
|
150
152
|
:param server_url: Override the default server URL for this method
|
|
@@ -183,6 +185,7 @@ class Chat(BaseSDK):
|
|
|
183
185
|
prediction=utils.get_pydantic_model(
|
|
184
186
|
prediction, Optional[models.Prediction]
|
|
185
187
|
),
|
|
188
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
186
189
|
safe_prompt=safe_prompt,
|
|
187
190
|
)
|
|
188
191
|
|
|
@@ -284,6 +287,7 @@ class Chat(BaseSDK):
|
|
|
284
287
|
prediction: Optional[
|
|
285
288
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
286
289
|
] = None,
|
|
290
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
287
291
|
safe_prompt: Optional[bool] = None,
|
|
288
292
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
289
293
|
server_url: Optional[str] = None,
|
|
@@ -307,6 +311,7 @@ class Chat(BaseSDK):
|
|
|
307
311
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
308
312
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
309
313
|
:param prediction:
|
|
314
|
+
:param parallel_tool_calls:
|
|
310
315
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
311
316
|
:param retries: Override the default retry configuration for this method
|
|
312
317
|
:param server_url: Override the default server URL for this method
|
|
@@ -345,6 +350,7 @@ class Chat(BaseSDK):
|
|
|
345
350
|
prediction=utils.get_pydantic_model(
|
|
346
351
|
prediction, Optional[models.Prediction]
|
|
347
352
|
),
|
|
353
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
348
354
|
safe_prompt=safe_prompt,
|
|
349
355
|
)
|
|
350
356
|
|
|
@@ -454,6 +460,7 @@ class Chat(BaseSDK):
|
|
|
454
460
|
prediction: Optional[
|
|
455
461
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
456
462
|
] = None,
|
|
463
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
457
464
|
safe_prompt: Optional[bool] = None,
|
|
458
465
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
459
466
|
server_url: Optional[str] = None,
|
|
@@ -479,6 +486,7 @@ class Chat(BaseSDK):
|
|
|
479
486
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
480
487
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
481
488
|
:param prediction:
|
|
489
|
+
:param parallel_tool_calls:
|
|
482
490
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
483
491
|
:param retries: Override the default retry configuration for this method
|
|
484
492
|
:param server_url: Override the default server URL for this method
|
|
@@ -519,6 +527,7 @@ class Chat(BaseSDK):
|
|
|
519
527
|
prediction=utils.get_pydantic_model(
|
|
520
528
|
prediction, Optional[models.Prediction]
|
|
521
529
|
),
|
|
530
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
522
531
|
safe_prompt=safe_prompt,
|
|
523
532
|
)
|
|
524
533
|
|
|
@@ -634,6 +643,7 @@ class Chat(BaseSDK):
|
|
|
634
643
|
prediction: Optional[
|
|
635
644
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
636
645
|
] = None,
|
|
646
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
637
647
|
safe_prompt: Optional[bool] = None,
|
|
638
648
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
639
649
|
server_url: Optional[str] = None,
|
|
@@ -659,6 +669,7 @@ class Chat(BaseSDK):
|
|
|
659
669
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
660
670
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
661
671
|
:param prediction:
|
|
672
|
+
:param parallel_tool_calls:
|
|
662
673
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
663
674
|
:param retries: Override the default retry configuration for this method
|
|
664
675
|
:param server_url: Override the default server URL for this method
|
|
@@ -699,6 +710,7 @@ class Chat(BaseSDK):
|
|
|
699
710
|
prediction=utils.get_pydantic_model(
|
|
700
711
|
prediction, Optional[models.Prediction]
|
|
701
712
|
),
|
|
713
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
702
714
|
safe_prompt=safe_prompt,
|
|
703
715
|
)
|
|
704
716
|
|
mistralai/classifiers.py
CHANGED
|
@@ -237,7 +237,7 @@ class Classifiers(BaseSDK):
|
|
|
237
237
|
timeout_ms: Optional[int] = None,
|
|
238
238
|
http_headers: Optional[Mapping[str, str]] = None,
|
|
239
239
|
) -> models.ClassificationResponse:
|
|
240
|
-
r"""Moderations
|
|
240
|
+
r"""Chat Moderations
|
|
241
241
|
|
|
242
242
|
:param model:
|
|
243
243
|
:param inputs: Chat to classify
|
|
@@ -293,7 +293,7 @@ class Classifiers(BaseSDK):
|
|
|
293
293
|
http_res = self.do_request(
|
|
294
294
|
hook_ctx=HookContext(
|
|
295
295
|
base_url=base_url or "",
|
|
296
|
-
operation_id="
|
|
296
|
+
operation_id="chat_moderations_v1_chat_moderations_post",
|
|
297
297
|
oauth2_scopes=[],
|
|
298
298
|
security_source=get_security_from_env(
|
|
299
299
|
self.sdk_configuration.security, models.Security
|
|
@@ -346,7 +346,7 @@ class Classifiers(BaseSDK):
|
|
|
346
346
|
timeout_ms: Optional[int] = None,
|
|
347
347
|
http_headers: Optional[Mapping[str, str]] = None,
|
|
348
348
|
) -> models.ClassificationResponse:
|
|
349
|
-
r"""Moderations
|
|
349
|
+
r"""Chat Moderations
|
|
350
350
|
|
|
351
351
|
:param model:
|
|
352
352
|
:param inputs: Chat to classify
|
|
@@ -402,7 +402,7 @@ class Classifiers(BaseSDK):
|
|
|
402
402
|
http_res = await self.do_request_async(
|
|
403
403
|
hook_ctx=HookContext(
|
|
404
404
|
base_url=base_url or "",
|
|
405
|
-
operation_id="
|
|
405
|
+
operation_id="chat_moderations_v1_chat_moderations_post",
|
|
406
406
|
oauth2_scopes=[],
|
|
407
407
|
security_source=get_security_from_env(
|
|
408
408
|
self.sdk_configuration.security, models.Security
|
|
@@ -85,6 +85,7 @@ class AgentsCompletionRequestTypedDict(TypedDict):
|
|
|
85
85
|
n: NotRequired[Nullable[int]]
|
|
86
86
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
87
87
|
prediction: NotRequired[PredictionTypedDict]
|
|
88
|
+
parallel_tool_calls: NotRequired[bool]
|
|
88
89
|
|
|
89
90
|
|
|
90
91
|
class AgentsCompletionRequest(BaseModel):
|
|
@@ -123,6 +124,8 @@ class AgentsCompletionRequest(BaseModel):
|
|
|
123
124
|
|
|
124
125
|
prediction: Optional[Prediction] = None
|
|
125
126
|
|
|
127
|
+
parallel_tool_calls: Optional[bool] = None
|
|
128
|
+
|
|
126
129
|
@model_serializer(mode="wrap")
|
|
127
130
|
def serialize_model(self, handler):
|
|
128
131
|
optional_fields = [
|
|
@@ -137,6 +140,7 @@ class AgentsCompletionRequest(BaseModel):
|
|
|
137
140
|
"frequency_penalty",
|
|
138
141
|
"n",
|
|
139
142
|
"prediction",
|
|
143
|
+
"parallel_tool_calls",
|
|
140
144
|
]
|
|
141
145
|
nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
|
|
142
146
|
null_default_fields = []
|
|
@@ -84,6 +84,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
|
|
|
84
84
|
n: NotRequired[Nullable[int]]
|
|
85
85
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
86
86
|
prediction: NotRequired[PredictionTypedDict]
|
|
87
|
+
parallel_tool_calls: NotRequired[bool]
|
|
87
88
|
|
|
88
89
|
|
|
89
90
|
class AgentsCompletionStreamRequest(BaseModel):
|
|
@@ -121,6 +122,8 @@ class AgentsCompletionStreamRequest(BaseModel):
|
|
|
121
122
|
|
|
122
123
|
prediction: Optional[Prediction] = None
|
|
123
124
|
|
|
125
|
+
parallel_tool_calls: Optional[bool] = None
|
|
126
|
+
|
|
124
127
|
@model_serializer(mode="wrap")
|
|
125
128
|
def serialize_model(self, handler):
|
|
126
129
|
optional_fields = [
|
|
@@ -135,6 +138,7 @@ class AgentsCompletionStreamRequest(BaseModel):
|
|
|
135
138
|
"frequency_penalty",
|
|
136
139
|
"n",
|
|
137
140
|
"prediction",
|
|
141
|
+
"parallel_tool_calls",
|
|
138
142
|
]
|
|
139
143
|
nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
|
|
140
144
|
null_default_fields = []
|
|
@@ -85,6 +85,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
85
85
|
n: NotRequired[Nullable[int]]
|
|
86
86
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
87
87
|
prediction: NotRequired[PredictionTypedDict]
|
|
88
|
+
parallel_tool_calls: NotRequired[bool]
|
|
88
89
|
safe_prompt: NotRequired[bool]
|
|
89
90
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
90
91
|
|
|
@@ -131,6 +132,8 @@ class ChatCompletionRequest(BaseModel):
|
|
|
131
132
|
|
|
132
133
|
prediction: Optional[Prediction] = None
|
|
133
134
|
|
|
135
|
+
parallel_tool_calls: Optional[bool] = None
|
|
136
|
+
|
|
134
137
|
safe_prompt: Optional[bool] = None
|
|
135
138
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
136
139
|
|
|
@@ -150,6 +153,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
150
153
|
"frequency_penalty",
|
|
151
154
|
"n",
|
|
152
155
|
"prediction",
|
|
156
|
+
"parallel_tool_calls",
|
|
153
157
|
"safe_prompt",
|
|
154
158
|
]
|
|
155
159
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
@@ -88,6 +88,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
88
88
|
n: NotRequired[Nullable[int]]
|
|
89
89
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
90
90
|
prediction: NotRequired[PredictionTypedDict]
|
|
91
|
+
parallel_tool_calls: NotRequired[bool]
|
|
91
92
|
safe_prompt: NotRequired[bool]
|
|
92
93
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
93
94
|
|
|
@@ -133,6 +134,8 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
133
134
|
|
|
134
135
|
prediction: Optional[Prediction] = None
|
|
135
136
|
|
|
137
|
+
parallel_tool_calls: Optional[bool] = None
|
|
138
|
+
|
|
136
139
|
safe_prompt: Optional[bool] = None
|
|
137
140
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
138
141
|
|
|
@@ -152,6 +155,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
152
155
|
"frequency_penalty",
|
|
153
156
|
"n",
|
|
154
157
|
"prediction",
|
|
158
|
+
"parallel_tool_calls",
|
|
155
159
|
"safe_prompt",
|
|
156
160
|
]
|
|
157
161
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
mistralai/models/function.py
CHANGED
mistralai/models/jsonschema.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: mistralai
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.6.0
|
|
4
4
|
Summary: Python Client SDK for the Mistral AI API.
|
|
5
5
|
Author: Mistral
|
|
6
6
|
Requires-Python: >=3.9
|
|
@@ -463,7 +463,7 @@ The documentation for the GCP SDK is available [here](https://github.com/mistral
|
|
|
463
463
|
### [classifiers](https://github.com/mistralai/client-python/blob/master/docs/sdks/classifiers/README.md)
|
|
464
464
|
|
|
465
465
|
* [moderate](https://github.com/mistralai/client-python/blob/master/docs/sdks/classifiers/README.md#moderate) - Moderations
|
|
466
|
-
* [moderate_chat](https://github.com/mistralai/client-python/blob/master/docs/sdks/classifiers/README.md#moderate_chat) - Moderations
|
|
466
|
+
* [moderate_chat](https://github.com/mistralai/client-python/blob/master/docs/sdks/classifiers/README.md#moderate_chat) - Chat Moderations
|
|
467
467
|
|
|
468
468
|
### [embeddings](https://github.com/mistralai/client-python/blob/master/docs/sdks/embeddings/README.md)
|
|
469
469
|
|
|
@@ -4,26 +4,28 @@ mistralai_azure/_hooks/custom_user_agent.py,sha256=0m-1JzJxOT42rvRTEuCiFLqbOMriO
|
|
|
4
4
|
mistralai_azure/_hooks/registration.py,sha256=5BN-U92pwP5kUaN7EOso2vWrwZlLvRcU5Coccibqp20,741
|
|
5
5
|
mistralai_azure/_hooks/sdkhooks.py,sha256=urOhVMYX_n5KgMoNDNmGs4fsgUWoeSG6_GarhPxH-YU,2565
|
|
6
6
|
mistralai_azure/_hooks/types.py,sha256=5lbjAtBy4DcEmoFjepuZA4l3vjE73G_NW5izQHi3DK0,2818
|
|
7
|
-
mistralai_azure/_version.py,sha256=
|
|
7
|
+
mistralai_azure/_version.py,sha256=qa1zEa5rDcZbGrfBSLYlvEl14a0nG1VfMvKuRuXKSwQ,472
|
|
8
8
|
mistralai_azure/basesdk.py,sha256=95JOT11O1oU74EcPvwu3lj9o32a7IPInYIveTVjV8pE,12136
|
|
9
|
-
mistralai_azure/chat.py,sha256=
|
|
9
|
+
mistralai_azure/chat.py,sha256=6zTDL9fbR7v4n23Awgu7P_9nPKeum2W5IoX7ubumn44,35910
|
|
10
10
|
mistralai_azure/httpclient.py,sha256=lC-YQ7q4yiJGKElxBeb3aZnr-4aYxjgEpZ6roeXYlyg,4318
|
|
11
|
-
mistralai_azure/models/__init__.py,sha256=
|
|
11
|
+
mistralai_azure/models/__init__.py,sha256=zByHopg2jAg0Sfj7cfw2tD7ioV6bjK7s61KqZkiDnfA,6075
|
|
12
12
|
mistralai_azure/models/assistantmessage.py,sha256=OmHqIM8Cnp4gW6_NbEGMam-_-XBDqMOdskb4BejEBZY,2655
|
|
13
13
|
mistralai_azure/models/chatcompletionchoice.py,sha256=-JE13p36mWnyc3zxnHLJp1Q43QVgj5QRurnZslXdJc0,935
|
|
14
|
-
mistralai_azure/models/chatcompletionrequest.py,sha256=
|
|
14
|
+
mistralai_azure/models/chatcompletionrequest.py,sha256=ixQP91BFZv1Zebxq0ephBmHyI5W0_yeEQoNR13Z5QdU,9763
|
|
15
15
|
mistralai_azure/models/chatcompletionresponse.py,sha256=sPmb4kih2DpE3r8Xem_HYj6o3E3i-6PyVROvm7Ysrfs,798
|
|
16
|
-
mistralai_azure/models/chatcompletionstreamrequest.py,sha256=
|
|
16
|
+
mistralai_azure/models/chatcompletionstreamrequest.py,sha256=o8cbpKDS0scF_B3dfojOUhnLTd7D2G6AT-anOVe9ZFo,8899
|
|
17
17
|
mistralai_azure/models/completionchunk.py,sha256=yoA0tYoyK5RChQPbEvYUi1BVmuyH-QT5IYwEYJNtsXM,877
|
|
18
18
|
mistralai_azure/models/completionevent.py,sha256=8wkRAMMpDFfhFSm7OEmli80lsK98Tir7R6IxW-KxeuE,405
|
|
19
19
|
mistralai_azure/models/completionresponsestreamchoice.py,sha256=c6BncIEgKnK4HUPCeIhLfVc3RgxXKNcxp2JrlObUu9E,1834
|
|
20
|
-
mistralai_azure/models/contentchunk.py,sha256=
|
|
20
|
+
mistralai_azure/models/contentchunk.py,sha256=a7A9ymr1Qvg4am-uqrGxqrmTf9NBMPiGbVncuOevchE,881
|
|
21
21
|
mistralai_azure/models/deltamessage.py,sha256=DvXCMs-P1i3QlUjCjJv4en2d04ydTrH6AjECpX9L2aw,1970
|
|
22
|
-
mistralai_azure/models/function.py,sha256=
|
|
22
|
+
mistralai_azure/models/function.py,sha256=VKcPB1oJ8_jvfXRfqufa2Y9to5WdxS-hi9OLu78GNpM,540
|
|
23
23
|
mistralai_azure/models/functioncall.py,sha256=H2eemkzk2Zm1LEm11atVh6PGvr6XJn9SWqNUziT_WK8,562
|
|
24
24
|
mistralai_azure/models/functionname.py,sha256=4rGsO-FYjvLMRGDBbdZ3cLyiiwml_voRQQ924K2_S1M,473
|
|
25
25
|
mistralai_azure/models/httpvalidationerror.py,sha256=tcUK2zfyCZ1TJjmvF93E9G2Ah-S2UUSpM-ZJBbR4hgc,616
|
|
26
|
-
mistralai_azure/models/
|
|
26
|
+
mistralai_azure/models/imageurl.py,sha256=Dm3S96XCb-F11vx3HYWnKG5GOm246q21vDJ81ywVDVQ,1396
|
|
27
|
+
mistralai_azure/models/imageurlchunk.py,sha256=JWfOtcxm-AEzRdNny-KWAWXV275hSnWFfn_Ux6OjrYA,1000
|
|
28
|
+
mistralai_azure/models/jsonschema.py,sha256=Hg6iOf3AiR55dX_-4nb0DMcA4TFJQac-51QtjmrcTBE,1683
|
|
27
29
|
mistralai_azure/models/prediction.py,sha256=GERxBI8NoS9Fc14FD4ityVfJfXNts1dxjoK3XIVHHc0,730
|
|
28
30
|
mistralai_azure/models/referencechunk.py,sha256=uiouhIPrWpVEhpY_Cea1Som9XapC4mM3R82hhND-j-s,525
|
|
29
31
|
mistralai_azure/models/responseformat.py,sha256=n0aKQE1girltBvrih5w4bbfp_C7_ban4KTrGpS4bAFM,2256
|
|
@@ -67,30 +69,32 @@ mistralai_gcp/_hooks/custom_user_agent.py,sha256=0m-1JzJxOT42rvRTEuCiFLqbOMriOls
|
|
|
67
69
|
mistralai_gcp/_hooks/registration.py,sha256=5BN-U92pwP5kUaN7EOso2vWrwZlLvRcU5Coccibqp20,741
|
|
68
70
|
mistralai_gcp/_hooks/sdkhooks.py,sha256=nr_ACx8Rn5xvTkmZP6_EI-f_0hw8wMyPqPHNvjAWAxI,2563
|
|
69
71
|
mistralai_gcp/_hooks/types.py,sha256=NzfRMdihvcNazbqJkcbjWcGttNkUi9upj4QDk9IN_Wg,2816
|
|
70
|
-
mistralai_gcp/_version.py,sha256=
|
|
72
|
+
mistralai_gcp/_version.py,sha256=JBRQmuMZFOEg82Gq5TChrV73wHfnblPO4GnleLdGZ6I,468
|
|
71
73
|
mistralai_gcp/basesdk.py,sha256=1qQQeCnhkPR4JYRQ3GGpW8TwbTSCWT4RjfXiJTGWvrU,12130
|
|
72
|
-
mistralai_gcp/chat.py,sha256=
|
|
74
|
+
mistralai_gcp/chat.py,sha256=bbz3SzLyNO6Pnct7Mqtgk3aunPvPXET9CUGY8SlJ78U,35812
|
|
73
75
|
mistralai_gcp/fim.py,sha256=zOcVDvQzFzPNy6xxV_yfW2wJNHQhrxhPb4utNrIVJXk,27718
|
|
74
76
|
mistralai_gcp/httpclient.py,sha256=lC-YQ7q4yiJGKElxBeb3aZnr-4aYxjgEpZ6roeXYlyg,4318
|
|
75
|
-
mistralai_gcp/models/__init__.py,sha256=
|
|
77
|
+
mistralai_gcp/models/__init__.py,sha256=AztbrrgcEdLp7b7TyBzJPpZV-48R9ysK25HHp66X4qY,6897
|
|
76
78
|
mistralai_gcp/models/assistantmessage.py,sha256=DQEkGoA288mFwGN29q1E3r5uT_vUfkeTRjliT4aHWdw,2653
|
|
77
79
|
mistralai_gcp/models/chatcompletionchoice.py,sha256=1t3Sb_IICDH7gyyEMX-WuxHnSVV-PZTLfpUjkUVp3do,931
|
|
78
|
-
mistralai_gcp/models/chatcompletionrequest.py,sha256=
|
|
80
|
+
mistralai_gcp/models/chatcompletionrequest.py,sha256=nBk41aPENmT2mwmRpkVpeZMCAvCCSUGOAmPag7sMq3M,9809
|
|
79
81
|
mistralai_gcp/models/chatcompletionresponse.py,sha256=Ctvqs2ZjvWTycozqXn-fvucgqOn0dm4cOjUZ2BjD4BM,796
|
|
80
|
-
mistralai_gcp/models/chatcompletionstreamrequest.py,sha256=
|
|
82
|
+
mistralai_gcp/models/chatcompletionstreamrequest.py,sha256=KTikDhadXgyYc0go-5ZN1CyzFOxbZWr7syTaiqnbZBs,8945
|
|
81
83
|
mistralai_gcp/models/completionchunk.py,sha256=0DBDcrqVWrUskHA3hHYtuWk2E4JcJy_zc_LiGyLHBlA,875
|
|
82
84
|
mistralai_gcp/models/completionevent.py,sha256=cP7Q5dN4Z46FQTlyCYeIwvqt7pgN-22jNPD2bi7Eals,403
|
|
83
85
|
mistralai_gcp/models/completionresponsestreamchoice.py,sha256=MdZaPMSqFbIbenEAdPyYMFemsFSZdPglEEt5ssZ3x7E,1830
|
|
84
|
-
mistralai_gcp/models/contentchunk.py,sha256=
|
|
86
|
+
mistralai_gcp/models/contentchunk.py,sha256=YnkuzJSAJGvNsmRLQWscl43INmRVDAbidtLMOwYipM4,879
|
|
85
87
|
mistralai_gcp/models/deltamessage.py,sha256=6AcVFRWaW4mLFAyd7yOIJfKVroFe0htdclMlbv_R_iM,1968
|
|
86
88
|
mistralai_gcp/models/fimcompletionrequest.py,sha256=fmOlJENpPYggcJPZEa6u1pezZMUG9XufDn98RptNIPE,6594
|
|
87
89
|
mistralai_gcp/models/fimcompletionresponse.py,sha256=zUG83S6DchgEYsSG1dkOSuoOFHvlAR62gCoN9UzF06A,794
|
|
88
90
|
mistralai_gcp/models/fimcompletionstreamrequest.py,sha256=VjYBNv9aa2hRHZd7ogHtxFkpqHs4EhymHdrmn1lrRd8,5973
|
|
89
|
-
mistralai_gcp/models/function.py,sha256=
|
|
91
|
+
mistralai_gcp/models/function.py,sha256=FKnuRp-z4lQxq43iDzFaGtledj6zuXf8bHk5erTs62Q,538
|
|
90
92
|
mistralai_gcp/models/functioncall.py,sha256=iIeo1sJUi1DJmASNUuqMq6iYwGLgM1fxC-mWgEiluQ4,560
|
|
91
93
|
mistralai_gcp/models/functionname.py,sha256=Rp4TPQA1IvhnBZx-GwBF1fFyAd6w5Ys5A84waQ9fYKg,471
|
|
92
94
|
mistralai_gcp/models/httpvalidationerror.py,sha256=wGmVyH_T7APhs_mCpOkumZ3x15FQ95cL-GH5M2iLst8,612
|
|
93
|
-
mistralai_gcp/models/
|
|
95
|
+
mistralai_gcp/models/imageurl.py,sha256=McP_wQQvlV_0LirWXiDnOWoR5c6CNKPB79dmyS1KYqc,1394
|
|
96
|
+
mistralai_gcp/models/imageurlchunk.py,sha256=FWe88MyC-AFko2SGFmwkkihuOZduFzneCcgNailGUzI,998
|
|
97
|
+
mistralai_gcp/models/jsonschema.py,sha256=CcBseBHz7VGgMbvC-jGI4KZ5DuIi79cJLGrRlAs9OKs,1681
|
|
94
98
|
mistralai_gcp/models/prediction.py,sha256=B96QIAqMDDbF_uEzcL3XMisXg-AaMzHCSRUvaop2ktI,726
|
|
95
99
|
mistralai_gcp/models/referencechunk.py,sha256=NmajuCeC5caD70iUPL8P6DlTO44oivRnFaOhfLGBiE8,523
|
|
96
100
|
mistralai_gcp/models/responseformat.py,sha256=0aI9IEpq6p4iIz1MMt_uBQtDh0CoW3fVHAjfamTgZ7U,2254
|
|
@@ -135,13 +139,13 @@ mistralai/_hooks/deprecation_warning.py,sha256=eyEOf7-o9uqqNWJnufD2RXp3dYrGV4in9
|
|
|
135
139
|
mistralai/_hooks/registration.py,sha256=ML0W-XbE4WYdJ4eGks_XxF2aLCJTaIWjQATFGzFwvyU,861
|
|
136
140
|
mistralai/_hooks/sdkhooks.py,sha256=s-orhdvnV89TmI3QiPC2LWQtYeM9RrsG1CTll-fYZmQ,2559
|
|
137
141
|
mistralai/_hooks/types.py,sha256=z3AUFDpRJHj2m3h5PklvUeEcGohY0cfph4jL6-nGIzs,2812
|
|
138
|
-
mistralai/_version.py,sha256=
|
|
139
|
-
mistralai/agents.py,sha256=
|
|
142
|
+
mistralai/_version.py,sha256=eFkb-GJRzmbd4tr1xl66OXRTR1qnI6sLGg_C5o8P-DY,460
|
|
143
|
+
mistralai/agents.py,sha256=o_apyuwiDzxv-U252T84ynAHCb5fn1q7MMXqrZ4oHLo,32652
|
|
140
144
|
mistralai/async_client.py,sha256=KUdYxIIqoD6L7vB0EGwUR6lQ0NK5iCTHjnLVR9CVcJY,355
|
|
141
145
|
mistralai/basesdk.py,sha256=GsU5bp8O5fBCl34tKxaYmeYSIIM971eAPeFBBC_BpFo,12191
|
|
142
146
|
mistralai/batch.py,sha256=YN4D0Duwrap9Ysmp_lRpADYp1Znay7THE_z8ERGvDds,501
|
|
143
|
-
mistralai/chat.py,sha256=
|
|
144
|
-
mistralai/classifiers.py,sha256=
|
|
147
|
+
mistralai/chat.py,sha256=1XVVVvDi726bq6HXCur6-dsmFfzQAEpEWbKT_3sTZ4A,40549
|
|
148
|
+
mistralai/classifiers.py,sha256=zVJreCGyw_F2-9p8uyYbxlQA5RFZ0pvE690Holw5SDk,17504
|
|
145
149
|
mistralai/client.py,sha256=hrPg-LciKMKiascF0WbRRmqQyCv1lb2yDh6j-aaKVNo,509
|
|
146
150
|
mistralai/embeddings.py,sha256=5tTfvz5g9QYqEYPp785bPm88HvsJC9Ha_NcEuOKfiww,8536
|
|
147
151
|
mistralai/extra/README.md,sha256=BTS9fy0ijkiUP7ZVoFQ7FVBxHtXIXqucYZyy_ucFjo4,1739
|
|
@@ -160,8 +164,8 @@ mistralai/httpclient.py,sha256=lC-YQ7q4yiJGKElxBeb3aZnr-4aYxjgEpZ6roeXYlyg,4318
|
|
|
160
164
|
mistralai/jobs.py,sha256=WmyPoGpvCV0QSbxM6PGE1qs_b2t44wn6yoM7eL1GJD8,44651
|
|
161
165
|
mistralai/mistral_jobs.py,sha256=NdlPFopM1YzsLqHciZilF2D5Lsezqk4TviYR_mpNZUA,31102
|
|
162
166
|
mistralai/models/__init__.py,sha256=vkNm0A_E435ipGQcXBZ2K_WhEi7nEVjVQPR3SdLvJ28,22838
|
|
163
|
-
mistralai/models/agentscompletionrequest.py,sha256=
|
|
164
|
-
mistralai/models/agentscompletionstreamrequest.py,sha256=
|
|
167
|
+
mistralai/models/agentscompletionrequest.py,sha256=gyGoh1KsCGbOpfmaqk9d_hf1CYhWIriH4vaeQoEDfzU,7920
|
|
168
|
+
mistralai/models/agentscompletionstreamrequest.py,sha256=ZI4iFtl6qDJZ5QTIZ7vDIyFQ9n9rqVqN6tJQAdjpQjA,7365
|
|
165
169
|
mistralai/models/apiendpoint.py,sha256=Hvar5leWsJR_FYb0UzRlSw3vRdBZhk_6BR5r2pIb214,400
|
|
166
170
|
mistralai/models/archiveftmodelout.py,sha256=dQx1J91UA06pjk2r7okhKMyBBePmHal7SPpn6Y_wEsY,820
|
|
167
171
|
mistralai/models/assistantmessage.py,sha256=pmOhSINRB8sJ11lNtfKEL0k6-JnTEJ7cjlWW9D0pIMM,2624
|
|
@@ -172,9 +176,9 @@ mistralai/models/batchjobout.py,sha256=AXFSgDFGY_1LFVHtVdp66y08DUZCxsZtZ_NzTAJYD
|
|
|
172
176
|
mistralai/models/batchjobsout.py,sha256=Tq6bcb4_-fcW8C9AOnfJN8_sTy1NQhDn82qFOKdFPcg,868
|
|
173
177
|
mistralai/models/batchjobstatus.py,sha256=WlrIl5vWQGfLmgQA91_9CnCMKhWN6Lli458fT-4Asj4,294
|
|
174
178
|
mistralai/models/chatcompletionchoice.py,sha256=6iIFLZj2KYx0HFfzS3-E3sNXG6mPEAlDyXxIA5iZI_U,849
|
|
175
|
-
mistralai/models/chatcompletionrequest.py,sha256=
|
|
179
|
+
mistralai/models/chatcompletionrequest.py,sha256=6Innwpi7UnKmyauATOJForAVvW0tkSnbjsiQOOp5OKg,9777
|
|
176
180
|
mistralai/models/chatcompletionresponse.py,sha256=sLE-_Bx9W5rH2-HE2fBWPVbJbmBWx_jSY2mJ3KBEn6w,792
|
|
177
|
-
mistralai/models/chatcompletionstreamrequest.py,sha256=
|
|
181
|
+
mistralai/models/chatcompletionstreamrequest.py,sha256=0NFa_nMMRmHU66Hsgu1Zm4fggT0AzvW_imrkyZ4sUxc,9465
|
|
178
182
|
mistralai/models/chatmoderationrequest.py,sha256=LX-dhlYxecEzChSTt4jo4DA8lC4DEp5brgaiksTGF-o,2367
|
|
179
183
|
mistralai/models/checkpointout.py,sha256=A2kXS8-VT_1lbg3brifVjZD6tXdsET8vLqBm2a-yXgA,1109
|
|
180
184
|
mistralai/models/classificationobject.py,sha256=JqaKo3AQD4t5X12ZnHjJ6K3Y6LXUn94uGdLJSoGr8vY,665
|
|
@@ -209,7 +213,7 @@ mistralai/models/fimcompletionstreamrequest.py,sha256=fxuR8FDOWMwIqlYU9ttAfGeRdV
|
|
|
209
213
|
mistralai/models/ftmodelcapabilitiesout.py,sha256=H1kKEChUPgYT31ZQUz0tn9NRa7Z3hRZlh-sFfDYvBos,648
|
|
210
214
|
mistralai/models/ftmodelcard.py,sha256=G3dioHDMOhbI5HIw5gCaxZDb1sCthBzkXIAYMNHwya8,3300
|
|
211
215
|
mistralai/models/ftmodelout.py,sha256=dw-y8KKT_7rzW6tu10gfc1YKB8-Kpw4e4zy23z_U1d4,2530
|
|
212
|
-
mistralai/models/function.py,sha256=
|
|
216
|
+
mistralai/models/function.py,sha256=QaQriwBCCIS65IHO5Ge2OnMW6L1dS-o8JS8zlGYKSRU,534
|
|
213
217
|
mistralai/models/functioncall.py,sha256=VvvBe4bVq1Irqo5t4_n1iq60UF7hLf8tE_GjkbyM8iE,556
|
|
214
218
|
mistralai/models/functionname.py,sha256=jgd0moI9eORQtEAQI4ROiMSKpWSbCLmK6IhDn7uppKY,467
|
|
215
219
|
mistralai/models/githubrepositoryin.py,sha256=kHU3QnOEJsdsyAt-74jrY2ztEao3aXcNdtj7aOgASRg,1956
|
|
@@ -232,7 +236,7 @@ mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py,sha256=h
|
|
|
232
236
|
mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py,sha256=_pkyhD7OzG-59fgcajI9NmSLTLDktkCxXo_IuvWeyfs,636
|
|
233
237
|
mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py,sha256=s-EYS-Hw0NExYeIyN-3JlHbKmnTmtyB8ljVSfOylqYk,907
|
|
234
238
|
mistralai/models/jobsout.py,sha256=uCKt0aw7yXzI4oLDGeAAEhsRjdRg3g7lPopg0__czTA,818
|
|
235
|
-
mistralai/models/jsonschema.py,sha256=
|
|
239
|
+
mistralai/models/jsonschema.py,sha256=Itbk3BS9M9nnEPwShGyyOCVmqfbP6y44XsIUn6d7cDY,1652
|
|
236
240
|
mistralai/models/legacyjobmetadataout.py,sha256=08zAGNTSrICsK8u2SFFUXiNWF7MCQvezmFQeMQzxsys,4762
|
|
237
241
|
mistralai/models/listfilesout.py,sha256=tW2fNabLKcftc5kytkjwVaChlOzWRL4FKtNzDak9MNs,468
|
|
238
242
|
mistralai/models/metricout.py,sha256=dXQMMU4Nk6-Zr06Jx1TWilFi6cOwiVLjSanCFn0cPxo,2034
|
|
@@ -296,7 +300,7 @@ mistralai/utils/serializers.py,sha256=EGH40Pgp3sSK9uM4PxL7_SYzSHtmo-Uy6QIE5xLVg6
|
|
|
296
300
|
mistralai/utils/url.py,sha256=BgGPgcTA6MRK4bF8fjP2dUopN3NzEzxWMXPBVg8NQUA,5254
|
|
297
301
|
mistralai/utils/values.py,sha256=CcaCXEa3xHhkUDROyXZocN8f0bdITftv9Y0P9lTf0YM,3517
|
|
298
302
|
mistralai/version.py,sha256=iosXhlXclBwBqlADFKEilxAC2wWKbtuBKi87AmPi7s8,196
|
|
299
|
-
mistralai-1.
|
|
300
|
-
mistralai-1.
|
|
301
|
-
mistralai-1.
|
|
302
|
-
mistralai-1.
|
|
303
|
+
mistralai-1.6.0.dist-info/LICENSE,sha256=rUtQ_9GD0OyLPlb-2uWVdfE87hzudMRmsW-tS-0DK-0,11340
|
|
304
|
+
mistralai-1.6.0.dist-info/METADATA,sha256=xKI7FL5iu6kbvVeVKe9lUfsd_itWzOp-x511W3CO0W0,30057
|
|
305
|
+
mistralai-1.6.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
|
306
|
+
mistralai-1.6.0.dist-info/RECORD,,
|
mistralai_azure/_version.py
CHANGED
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
import importlib.metadata
|
|
4
4
|
|
|
5
5
|
__title__: str = "mistralai_azure"
|
|
6
|
-
__version__: str = "1.
|
|
6
|
+
__version__: str = "1.6.0"
|
|
7
7
|
__openapi_doc_version__: str = "0.0.2"
|
|
8
8
|
__gen_version__: str = "2.548.6"
|
|
9
|
-
__user_agent__: str = "speakeasy-sdk/python 1.
|
|
9
|
+
__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai_azure"
|
|
10
10
|
|
|
11
11
|
try:
|
|
12
12
|
if __package__ is not None:
|
mistralai_azure/chat.py
CHANGED
|
@@ -40,6 +40,7 @@ class Chat(BaseSDK):
|
|
|
40
40
|
prediction: Optional[
|
|
41
41
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
42
42
|
] = None,
|
|
43
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
43
44
|
safe_prompt: Optional[bool] = None,
|
|
44
45
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
45
46
|
server_url: Optional[str] = None,
|
|
@@ -65,6 +66,7 @@ class Chat(BaseSDK):
|
|
|
65
66
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
66
67
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
67
68
|
:param prediction:
|
|
69
|
+
:param parallel_tool_calls:
|
|
68
70
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
69
71
|
:param retries: Override the default retry configuration for this method
|
|
70
72
|
:param server_url: Override the default server URL for this method
|
|
@@ -103,6 +105,7 @@ class Chat(BaseSDK):
|
|
|
103
105
|
prediction=utils.get_pydantic_model(
|
|
104
106
|
prediction, Optional[models.Prediction]
|
|
105
107
|
),
|
|
108
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
106
109
|
safe_prompt=safe_prompt,
|
|
107
110
|
)
|
|
108
111
|
|
|
@@ -208,6 +211,7 @@ class Chat(BaseSDK):
|
|
|
208
211
|
prediction: Optional[
|
|
209
212
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
210
213
|
] = None,
|
|
214
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
211
215
|
safe_prompt: Optional[bool] = None,
|
|
212
216
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
213
217
|
server_url: Optional[str] = None,
|
|
@@ -233,6 +237,7 @@ class Chat(BaseSDK):
|
|
|
233
237
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
234
238
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
235
239
|
:param prediction:
|
|
240
|
+
:param parallel_tool_calls:
|
|
236
241
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
237
242
|
:param retries: Override the default retry configuration for this method
|
|
238
243
|
:param server_url: Override the default server URL for this method
|
|
@@ -271,6 +276,7 @@ class Chat(BaseSDK):
|
|
|
271
276
|
prediction=utils.get_pydantic_model(
|
|
272
277
|
prediction, Optional[models.Prediction]
|
|
273
278
|
),
|
|
279
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
274
280
|
safe_prompt=safe_prompt,
|
|
275
281
|
)
|
|
276
282
|
|
|
@@ -384,6 +390,7 @@ class Chat(BaseSDK):
|
|
|
384
390
|
prediction: Optional[
|
|
385
391
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
386
392
|
] = None,
|
|
393
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
387
394
|
safe_prompt: Optional[bool] = None,
|
|
388
395
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
389
396
|
server_url: Optional[str] = None,
|
|
@@ -407,6 +414,7 @@ class Chat(BaseSDK):
|
|
|
407
414
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
408
415
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
409
416
|
:param prediction:
|
|
417
|
+
:param parallel_tool_calls:
|
|
410
418
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
411
419
|
:param retries: Override the default retry configuration for this method
|
|
412
420
|
:param server_url: Override the default server URL for this method
|
|
@@ -447,6 +455,7 @@ class Chat(BaseSDK):
|
|
|
447
455
|
prediction=utils.get_pydantic_model(
|
|
448
456
|
prediction, Optional[models.Prediction]
|
|
449
457
|
),
|
|
458
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
450
459
|
safe_prompt=safe_prompt,
|
|
451
460
|
)
|
|
452
461
|
|
|
@@ -556,6 +565,7 @@ class Chat(BaseSDK):
|
|
|
556
565
|
prediction: Optional[
|
|
557
566
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
558
567
|
] = None,
|
|
568
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
559
569
|
safe_prompt: Optional[bool] = None,
|
|
560
570
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
561
571
|
server_url: Optional[str] = None,
|
|
@@ -579,6 +589,7 @@ class Chat(BaseSDK):
|
|
|
579
589
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
580
590
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
581
591
|
:param prediction:
|
|
592
|
+
:param parallel_tool_calls:
|
|
582
593
|
:param safe_prompt: Whether to inject a safety prompt before all conversations.
|
|
583
594
|
:param retries: Override the default retry configuration for this method
|
|
584
595
|
:param server_url: Override the default server URL for this method
|
|
@@ -619,6 +630,7 @@ class Chat(BaseSDK):
|
|
|
619
630
|
prediction=utils.get_pydantic_model(
|
|
620
631
|
prediction, Optional[models.Prediction]
|
|
621
632
|
),
|
|
633
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
622
634
|
safe_prompt=safe_prompt,
|
|
623
635
|
)
|
|
624
636
|
|
|
@@ -54,6 +54,14 @@ from .functioncall import (
|
|
|
54
54
|
)
|
|
55
55
|
from .functionname import FunctionName, FunctionNameTypedDict
|
|
56
56
|
from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
|
|
57
|
+
from .imageurl import ImageURL, ImageURLTypedDict
|
|
58
|
+
from .imageurlchunk import (
|
|
59
|
+
ImageURLChunk,
|
|
60
|
+
ImageURLChunkImageURL,
|
|
61
|
+
ImageURLChunkImageURLTypedDict,
|
|
62
|
+
ImageURLChunkType,
|
|
63
|
+
ImageURLChunkTypedDict,
|
|
64
|
+
)
|
|
57
65
|
from .jsonschema import JSONSchema, JSONSchemaTypedDict
|
|
58
66
|
from .prediction import Prediction, PredictionTypedDict
|
|
59
67
|
from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict
|
|
@@ -143,6 +151,13 @@ __all__ = [
|
|
|
143
151
|
"FunctionTypedDict",
|
|
144
152
|
"HTTPValidationError",
|
|
145
153
|
"HTTPValidationErrorData",
|
|
154
|
+
"ImageURL",
|
|
155
|
+
"ImageURLChunk",
|
|
156
|
+
"ImageURLChunkImageURL",
|
|
157
|
+
"ImageURLChunkImageURLTypedDict",
|
|
158
|
+
"ImageURLChunkType",
|
|
159
|
+
"ImageURLChunkTypedDict",
|
|
160
|
+
"ImageURLTypedDict",
|
|
146
161
|
"JSONSchema",
|
|
147
162
|
"JSONSchemaTypedDict",
|
|
148
163
|
"Loc",
|
|
@@ -95,6 +95,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
95
95
|
n: NotRequired[Nullable[int]]
|
|
96
96
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
97
97
|
prediction: NotRequired[PredictionTypedDict]
|
|
98
|
+
parallel_tool_calls: NotRequired[bool]
|
|
98
99
|
safe_prompt: NotRequired[bool]
|
|
99
100
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
100
101
|
|
|
@@ -141,6 +142,8 @@ class ChatCompletionRequest(BaseModel):
|
|
|
141
142
|
|
|
142
143
|
prediction: Optional[Prediction] = None
|
|
143
144
|
|
|
145
|
+
parallel_tool_calls: Optional[bool] = None
|
|
146
|
+
|
|
144
147
|
safe_prompt: Optional[bool] = None
|
|
145
148
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
146
149
|
|
|
@@ -161,6 +164,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
161
164
|
"frequency_penalty",
|
|
162
165
|
"n",
|
|
163
166
|
"prediction",
|
|
167
|
+
"parallel_tool_calls",
|
|
164
168
|
"safe_prompt",
|
|
165
169
|
]
|
|
166
170
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
@@ -90,6 +90,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
90
90
|
n: NotRequired[Nullable[int]]
|
|
91
91
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
92
92
|
prediction: NotRequired[PredictionTypedDict]
|
|
93
|
+
parallel_tool_calls: NotRequired[bool]
|
|
93
94
|
safe_prompt: NotRequired[bool]
|
|
94
95
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
95
96
|
|
|
@@ -135,6 +136,8 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
135
136
|
|
|
136
137
|
prediction: Optional[Prediction] = None
|
|
137
138
|
|
|
139
|
+
parallel_tool_calls: Optional[bool] = None
|
|
140
|
+
|
|
138
141
|
safe_prompt: Optional[bool] = None
|
|
139
142
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
140
143
|
|
|
@@ -155,6 +158,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
155
158
|
"frequency_penalty",
|
|
156
159
|
"n",
|
|
157
160
|
"prediction",
|
|
161
|
+
"parallel_tool_calls",
|
|
158
162
|
"safe_prompt",
|
|
159
163
|
]
|
|
160
164
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict
|
|
4
5
|
from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict
|
|
5
6
|
from .textchunk import TextChunk, TextChunkTypedDict
|
|
6
7
|
from mistralai_azure.utils import get_discriminator
|
|
@@ -10,13 +11,16 @@ from typing_extensions import Annotated, TypeAliasType
|
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
ContentChunkTypedDict = TypeAliasType(
|
|
13
|
-
"ContentChunkTypedDict",
|
|
14
|
+
"ContentChunkTypedDict",
|
|
15
|
+
Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict],
|
|
14
16
|
)
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
ContentChunk = Annotated[
|
|
18
20
|
Union[
|
|
19
|
-
Annotated[
|
|
21
|
+
Annotated[ImageURLChunk, Tag("image_url")],
|
|
22
|
+
Annotated[TextChunk, Tag("text")],
|
|
23
|
+
Annotated[ReferenceChunk, Tag("reference")],
|
|
20
24
|
],
|
|
21
25
|
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
22
26
|
]
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai_azure.types import (
|
|
5
|
+
BaseModel,
|
|
6
|
+
Nullable,
|
|
7
|
+
OptionalNullable,
|
|
8
|
+
UNSET,
|
|
9
|
+
UNSET_SENTINEL,
|
|
10
|
+
)
|
|
11
|
+
from pydantic import model_serializer
|
|
12
|
+
from typing_extensions import NotRequired, TypedDict
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ImageURLTypedDict(TypedDict):
|
|
16
|
+
url: str
|
|
17
|
+
detail: NotRequired[Nullable[str]]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ImageURL(BaseModel):
|
|
21
|
+
url: str
|
|
22
|
+
|
|
23
|
+
detail: OptionalNullable[str] = UNSET
|
|
24
|
+
|
|
25
|
+
@model_serializer(mode="wrap")
|
|
26
|
+
def serialize_model(self, handler):
|
|
27
|
+
optional_fields = ["detail"]
|
|
28
|
+
nullable_fields = ["detail"]
|
|
29
|
+
null_default_fields = []
|
|
30
|
+
|
|
31
|
+
serialized = handler(self)
|
|
32
|
+
|
|
33
|
+
m = {}
|
|
34
|
+
|
|
35
|
+
for n, f in self.model_fields.items():
|
|
36
|
+
k = f.alias or n
|
|
37
|
+
val = serialized.get(k)
|
|
38
|
+
serialized.pop(k, None)
|
|
39
|
+
|
|
40
|
+
optional_nullable = k in optional_fields and k in nullable_fields
|
|
41
|
+
is_set = (
|
|
42
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
43
|
+
or k in null_default_fields
|
|
44
|
+
) # pylint: disable=no-member
|
|
45
|
+
|
|
46
|
+
if val is not None and val != UNSET_SENTINEL:
|
|
47
|
+
m[k] = val
|
|
48
|
+
elif val != UNSET_SENTINEL and (
|
|
49
|
+
not k in optional_fields or (optional_nullable and is_set)
|
|
50
|
+
):
|
|
51
|
+
m[k] = val
|
|
52
|
+
|
|
53
|
+
return m
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .imageurl import ImageURL, ImageURLTypedDict
|
|
5
|
+
from mistralai_azure.types import BaseModel
|
|
6
|
+
from typing import Literal, Optional, Union
|
|
7
|
+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
ImageURLChunkImageURLTypedDict = TypeAliasType(
|
|
11
|
+
"ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str]
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str])
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
ImageURLChunkType = Literal["image_url"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ImageURLChunkTypedDict(TypedDict):
|
|
22
|
+
r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0"""
|
|
23
|
+
|
|
24
|
+
image_url: ImageURLChunkImageURLTypedDict
|
|
25
|
+
type: NotRequired[ImageURLChunkType]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ImageURLChunk(BaseModel):
|
|
29
|
+
r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0"""
|
|
30
|
+
|
|
31
|
+
image_url: ImageURLChunkImageURL
|
|
32
|
+
|
|
33
|
+
type: Optional[ImageURLChunkType] = "image_url"
|
mistralai_gcp/_version.py
CHANGED
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
import importlib.metadata
|
|
4
4
|
|
|
5
5
|
__title__: str = "mistralai-gcp"
|
|
6
|
-
__version__: str = "1.
|
|
6
|
+
__version__: str = "1.6.0"
|
|
7
7
|
__openapi_doc_version__: str = "0.0.2"
|
|
8
8
|
__gen_version__: str = "2.548.6"
|
|
9
|
-
__user_agent__: str = "speakeasy-sdk/python 1.
|
|
9
|
+
__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai-gcp"
|
|
10
10
|
|
|
11
11
|
try:
|
|
12
12
|
if __package__ is not None:
|
mistralai_gcp/chat.py
CHANGED
|
@@ -40,6 +40,7 @@ class Chat(BaseSDK):
|
|
|
40
40
|
prediction: Optional[
|
|
41
41
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
42
42
|
] = None,
|
|
43
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
43
44
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
44
45
|
server_url: Optional[str] = None,
|
|
45
46
|
timeout_ms: Optional[int] = None,
|
|
@@ -64,6 +65,7 @@ class Chat(BaseSDK):
|
|
|
64
65
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
65
66
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
66
67
|
:param prediction:
|
|
68
|
+
:param parallel_tool_calls:
|
|
67
69
|
:param retries: Override the default retry configuration for this method
|
|
68
70
|
:param server_url: Override the default server URL for this method
|
|
69
71
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -101,6 +103,7 @@ class Chat(BaseSDK):
|
|
|
101
103
|
prediction=utils.get_pydantic_model(
|
|
102
104
|
prediction, Optional[models.Prediction]
|
|
103
105
|
),
|
|
106
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
104
107
|
)
|
|
105
108
|
|
|
106
109
|
req = self._build_request(
|
|
@@ -205,6 +208,7 @@ class Chat(BaseSDK):
|
|
|
205
208
|
prediction: Optional[
|
|
206
209
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
207
210
|
] = None,
|
|
211
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
208
212
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
209
213
|
server_url: Optional[str] = None,
|
|
210
214
|
timeout_ms: Optional[int] = None,
|
|
@@ -229,6 +233,7 @@ class Chat(BaseSDK):
|
|
|
229
233
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
230
234
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
231
235
|
:param prediction:
|
|
236
|
+
:param parallel_tool_calls:
|
|
232
237
|
:param retries: Override the default retry configuration for this method
|
|
233
238
|
:param server_url: Override the default server URL for this method
|
|
234
239
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -266,6 +271,7 @@ class Chat(BaseSDK):
|
|
|
266
271
|
prediction=utils.get_pydantic_model(
|
|
267
272
|
prediction, Optional[models.Prediction]
|
|
268
273
|
),
|
|
274
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
269
275
|
)
|
|
270
276
|
|
|
271
277
|
req = self._build_request_async(
|
|
@@ -378,6 +384,7 @@ class Chat(BaseSDK):
|
|
|
378
384
|
prediction: Optional[
|
|
379
385
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
380
386
|
] = None,
|
|
387
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
381
388
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
382
389
|
server_url: Optional[str] = None,
|
|
383
390
|
timeout_ms: Optional[int] = None,
|
|
@@ -400,6 +407,7 @@ class Chat(BaseSDK):
|
|
|
400
407
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
401
408
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
402
409
|
:param prediction:
|
|
410
|
+
:param parallel_tool_calls:
|
|
403
411
|
:param retries: Override the default retry configuration for this method
|
|
404
412
|
:param server_url: Override the default server URL for this method
|
|
405
413
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -439,6 +447,7 @@ class Chat(BaseSDK):
|
|
|
439
447
|
prediction=utils.get_pydantic_model(
|
|
440
448
|
prediction, Optional[models.Prediction]
|
|
441
449
|
),
|
|
450
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
442
451
|
)
|
|
443
452
|
|
|
444
453
|
req = self._build_request(
|
|
@@ -547,6 +556,7 @@ class Chat(BaseSDK):
|
|
|
547
556
|
prediction: Optional[
|
|
548
557
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
549
558
|
] = None,
|
|
559
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
550
560
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
551
561
|
server_url: Optional[str] = None,
|
|
552
562
|
timeout_ms: Optional[int] = None,
|
|
@@ -569,6 +579,7 @@ class Chat(BaseSDK):
|
|
|
569
579
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
570
580
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
571
581
|
:param prediction:
|
|
582
|
+
:param parallel_tool_calls:
|
|
572
583
|
:param retries: Override the default retry configuration for this method
|
|
573
584
|
:param server_url: Override the default server URL for this method
|
|
574
585
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -608,6 +619,7 @@ class Chat(BaseSDK):
|
|
|
608
619
|
prediction=utils.get_pydantic_model(
|
|
609
620
|
prediction, Optional[models.Prediction]
|
|
610
621
|
),
|
|
622
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
611
623
|
)
|
|
612
624
|
|
|
613
625
|
req = self._build_request_async(
|
mistralai_gcp/models/__init__.py
CHANGED
|
@@ -67,6 +67,14 @@ from .functioncall import (
|
|
|
67
67
|
)
|
|
68
68
|
from .functionname import FunctionName, FunctionNameTypedDict
|
|
69
69
|
from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
|
|
70
|
+
from .imageurl import ImageURL, ImageURLTypedDict
|
|
71
|
+
from .imageurlchunk import (
|
|
72
|
+
ImageURLChunk,
|
|
73
|
+
ImageURLChunkImageURL,
|
|
74
|
+
ImageURLChunkImageURLTypedDict,
|
|
75
|
+
ImageURLChunkType,
|
|
76
|
+
ImageURLChunkTypedDict,
|
|
77
|
+
)
|
|
70
78
|
from .jsonschema import JSONSchema, JSONSchemaTypedDict
|
|
71
79
|
from .prediction import Prediction, PredictionTypedDict
|
|
72
80
|
from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict
|
|
@@ -166,6 +174,13 @@ __all__ = [
|
|
|
166
174
|
"FunctionTypedDict",
|
|
167
175
|
"HTTPValidationError",
|
|
168
176
|
"HTTPValidationErrorData",
|
|
177
|
+
"ImageURL",
|
|
178
|
+
"ImageURLChunk",
|
|
179
|
+
"ImageURLChunkImageURL",
|
|
180
|
+
"ImageURLChunkImageURLTypedDict",
|
|
181
|
+
"ImageURLChunkType",
|
|
182
|
+
"ImageURLChunkTypedDict",
|
|
183
|
+
"ImageURLTypedDict",
|
|
169
184
|
"JSONSchema",
|
|
170
185
|
"JSONSchemaTypedDict",
|
|
171
186
|
"Loc",
|
|
@@ -95,6 +95,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
95
95
|
n: NotRequired[Nullable[int]]
|
|
96
96
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
97
97
|
prediction: NotRequired[PredictionTypedDict]
|
|
98
|
+
parallel_tool_calls: NotRequired[bool]
|
|
98
99
|
|
|
99
100
|
|
|
100
101
|
class ChatCompletionRequest(BaseModel):
|
|
@@ -139,6 +140,8 @@ class ChatCompletionRequest(BaseModel):
|
|
|
139
140
|
|
|
140
141
|
prediction: Optional[Prediction] = None
|
|
141
142
|
|
|
143
|
+
parallel_tool_calls: Optional[bool] = None
|
|
144
|
+
|
|
142
145
|
@model_serializer(mode="wrap")
|
|
143
146
|
def serialize_model(self, handler):
|
|
144
147
|
optional_fields = [
|
|
@@ -155,6 +158,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
155
158
|
"frequency_penalty",
|
|
156
159
|
"n",
|
|
157
160
|
"prediction",
|
|
161
|
+
"parallel_tool_calls",
|
|
158
162
|
]
|
|
159
163
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
160
164
|
null_default_fields = []
|
|
@@ -90,6 +90,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
90
90
|
n: NotRequired[Nullable[int]]
|
|
91
91
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
92
92
|
prediction: NotRequired[PredictionTypedDict]
|
|
93
|
+
parallel_tool_calls: NotRequired[bool]
|
|
93
94
|
|
|
94
95
|
|
|
95
96
|
class ChatCompletionStreamRequest(BaseModel):
|
|
@@ -133,6 +134,8 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
133
134
|
|
|
134
135
|
prediction: Optional[Prediction] = None
|
|
135
136
|
|
|
137
|
+
parallel_tool_calls: Optional[bool] = None
|
|
138
|
+
|
|
136
139
|
@model_serializer(mode="wrap")
|
|
137
140
|
def serialize_model(self, handler):
|
|
138
141
|
optional_fields = [
|
|
@@ -149,6 +152,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
149
152
|
"frequency_penalty",
|
|
150
153
|
"n",
|
|
151
154
|
"prediction",
|
|
155
|
+
"parallel_tool_calls",
|
|
152
156
|
]
|
|
153
157
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
154
158
|
null_default_fields = []
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict
|
|
4
5
|
from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict
|
|
5
6
|
from .textchunk import TextChunk, TextChunkTypedDict
|
|
6
7
|
from mistralai_gcp.utils import get_discriminator
|
|
@@ -10,13 +11,16 @@ from typing_extensions import Annotated, TypeAliasType
|
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
ContentChunkTypedDict = TypeAliasType(
|
|
13
|
-
"ContentChunkTypedDict",
|
|
14
|
+
"ContentChunkTypedDict",
|
|
15
|
+
Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict],
|
|
14
16
|
)
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
ContentChunk = Annotated[
|
|
18
20
|
Union[
|
|
19
|
-
Annotated[
|
|
21
|
+
Annotated[ImageURLChunk, Tag("image_url")],
|
|
22
|
+
Annotated[TextChunk, Tag("text")],
|
|
23
|
+
Annotated[ReferenceChunk, Tag("reference")],
|
|
20
24
|
],
|
|
21
25
|
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
22
26
|
]
|
mistralai_gcp/models/function.py
CHANGED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai_gcp.types import (
|
|
5
|
+
BaseModel,
|
|
6
|
+
Nullable,
|
|
7
|
+
OptionalNullable,
|
|
8
|
+
UNSET,
|
|
9
|
+
UNSET_SENTINEL,
|
|
10
|
+
)
|
|
11
|
+
from pydantic import model_serializer
|
|
12
|
+
from typing_extensions import NotRequired, TypedDict
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ImageURLTypedDict(TypedDict):
|
|
16
|
+
url: str
|
|
17
|
+
detail: NotRequired[Nullable[str]]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ImageURL(BaseModel):
|
|
21
|
+
url: str
|
|
22
|
+
|
|
23
|
+
detail: OptionalNullable[str] = UNSET
|
|
24
|
+
|
|
25
|
+
@model_serializer(mode="wrap")
|
|
26
|
+
def serialize_model(self, handler):
|
|
27
|
+
optional_fields = ["detail"]
|
|
28
|
+
nullable_fields = ["detail"]
|
|
29
|
+
null_default_fields = []
|
|
30
|
+
|
|
31
|
+
serialized = handler(self)
|
|
32
|
+
|
|
33
|
+
m = {}
|
|
34
|
+
|
|
35
|
+
for n, f in self.model_fields.items():
|
|
36
|
+
k = f.alias or n
|
|
37
|
+
val = serialized.get(k)
|
|
38
|
+
serialized.pop(k, None)
|
|
39
|
+
|
|
40
|
+
optional_nullable = k in optional_fields and k in nullable_fields
|
|
41
|
+
is_set = (
|
|
42
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
43
|
+
or k in null_default_fields
|
|
44
|
+
) # pylint: disable=no-member
|
|
45
|
+
|
|
46
|
+
if val is not None and val != UNSET_SENTINEL:
|
|
47
|
+
m[k] = val
|
|
48
|
+
elif val != UNSET_SENTINEL and (
|
|
49
|
+
not k in optional_fields or (optional_nullable and is_set)
|
|
50
|
+
):
|
|
51
|
+
m[k] = val
|
|
52
|
+
|
|
53
|
+
return m
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .imageurl import ImageURL, ImageURLTypedDict
|
|
5
|
+
from mistralai_gcp.types import BaseModel
|
|
6
|
+
from typing import Literal, Optional, Union
|
|
7
|
+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
ImageURLChunkImageURLTypedDict = TypeAliasType(
|
|
11
|
+
"ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str]
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str])
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
ImageURLChunkType = Literal["image_url"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ImageURLChunkTypedDict(TypedDict):
|
|
22
|
+
r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0"""
|
|
23
|
+
|
|
24
|
+
image_url: ImageURLChunkImageURLTypedDict
|
|
25
|
+
type: NotRequired[ImageURLChunkType]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ImageURLChunk(BaseModel):
|
|
29
|
+
r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0"""
|
|
30
|
+
|
|
31
|
+
image_url: ImageURLChunkImageURL
|
|
32
|
+
|
|
33
|
+
type: Optional[ImageURLChunkType] = "image_url"
|
|
File without changes
|
|
File without changes
|