mistralai 1.9.11__py3-none-any.whl → 1.10.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_hooks/registration.py +5 -0
- mistralai/_hooks/tracing.py +75 -0
- mistralai/_version.py +2 -2
- mistralai/accesses.py +8 -8
- mistralai/agents.py +29 -17
- mistralai/chat.py +41 -29
- mistralai/classifiers.py +13 -1
- mistralai/conversations.py +294 -62
- mistralai/documents.py +19 -3
- mistralai/embeddings.py +13 -7
- mistralai/extra/README.md +1 -1
- mistralai/extra/mcp/auth.py +10 -11
- mistralai/extra/mcp/base.py +17 -16
- mistralai/extra/mcp/sse.py +13 -15
- mistralai/extra/mcp/stdio.py +5 -6
- mistralai/extra/observability/__init__.py +15 -0
- mistralai/extra/observability/otel.py +372 -0
- mistralai/extra/run/context.py +33 -43
- mistralai/extra/run/result.py +29 -30
- mistralai/extra/run/tools.py +34 -23
- mistralai/extra/struct_chat.py +15 -8
- mistralai/extra/utils/response_format.py +5 -3
- mistralai/files.py +6 -0
- mistralai/fim.py +17 -5
- mistralai/mistral_agents.py +229 -1
- mistralai/mistral_jobs.py +39 -13
- mistralai/models/__init__.py +99 -3
- mistralai/models/agent.py +15 -2
- mistralai/models/agentconversation.py +11 -3
- mistralai/models/agentcreationrequest.py +6 -2
- mistralai/models/agents_api_v1_agents_deleteop.py +16 -0
- mistralai/models/agents_api_v1_agents_getop.py +40 -3
- mistralai/models/agents_api_v1_agents_listop.py +72 -2
- mistralai/models/agents_api_v1_conversations_deleteop.py +18 -0
- mistralai/models/agents_api_v1_conversations_listop.py +39 -2
- mistralai/models/agentscompletionrequest.py +21 -6
- mistralai/models/agentscompletionstreamrequest.py +21 -6
- mistralai/models/agentupdaterequest.py +18 -2
- mistralai/models/audioencoding.py +13 -0
- mistralai/models/audioformat.py +19 -0
- mistralai/models/audiotranscriptionrequest.py +2 -0
- mistralai/models/batchjobin.py +26 -5
- mistralai/models/batchjobout.py +5 -0
- mistralai/models/batchrequest.py +48 -0
- mistralai/models/chatcompletionrequest.py +22 -5
- mistralai/models/chatcompletionstreamrequest.py +22 -5
- mistralai/models/classificationrequest.py +37 -3
- mistralai/models/conversationrequest.py +15 -4
- mistralai/models/conversationrestartrequest.py +50 -2
- mistralai/models/conversationrestartstreamrequest.py +50 -2
- mistralai/models/conversationstreamrequest.py +15 -4
- mistralai/models/documentout.py +26 -10
- mistralai/models/documentupdatein.py +24 -3
- mistralai/models/embeddingrequest.py +19 -11
- mistralai/models/files_api_routes_list_filesop.py +7 -0
- mistralai/models/fimcompletionrequest.py +8 -9
- mistralai/models/fimcompletionstreamrequest.py +8 -9
- mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
- mistralai/models/libraries_documents_list_v1op.py +15 -2
- mistralai/models/libraryout.py +10 -7
- mistralai/models/listfilesout.py +35 -4
- mistralai/models/modelcapabilities.py +13 -4
- mistralai/models/modelconversation.py +8 -2
- mistralai/models/ocrpageobject.py +26 -5
- mistralai/models/ocrrequest.py +17 -1
- mistralai/models/ocrtableobject.py +31 -0
- mistralai/models/prediction.py +4 -0
- mistralai/models/requestsource.py +7 -0
- mistralai/models/responseformat.py +4 -2
- mistralai/models/responseformats.py +0 -1
- mistralai/models/sharingdelete.py +36 -5
- mistralai/models/sharingin.py +36 -5
- mistralai/models/sharingout.py +3 -3
- mistralai/models/toolexecutiondeltaevent.py +13 -4
- mistralai/models/toolexecutiondoneevent.py +13 -4
- mistralai/models/toolexecutionentry.py +9 -4
- mistralai/models/toolexecutionstartedevent.py +13 -4
- mistralai/models/toolfilechunk.py +11 -4
- mistralai/models/toolreferencechunk.py +13 -4
- mistralai/models_.py +2 -14
- mistralai/ocr.py +18 -0
- mistralai/transcriptions.py +4 -4
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/METADATA +162 -152
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/RECORD +168 -144
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/WHEEL +1 -1
- mistralai_azure/_version.py +3 -3
- mistralai_azure/basesdk.py +15 -5
- mistralai_azure/chat.py +59 -98
- mistralai_azure/models/__init__.py +50 -3
- mistralai_azure/models/chatcompletionrequest.py +16 -4
- mistralai_azure/models/chatcompletionstreamrequest.py +16 -4
- mistralai_azure/models/httpvalidationerror.py +11 -6
- mistralai_azure/models/mistralazureerror.py +26 -0
- mistralai_azure/models/no_response_error.py +13 -0
- mistralai_azure/models/prediction.py +4 -0
- mistralai_azure/models/responseformat.py +4 -2
- mistralai_azure/models/responseformats.py +0 -1
- mistralai_azure/models/responsevalidationerror.py +25 -0
- mistralai_azure/models/sdkerror.py +30 -14
- mistralai_azure/models/systemmessage.py +7 -3
- mistralai_azure/models/systemmessagecontentchunks.py +21 -0
- mistralai_azure/models/thinkchunk.py +35 -0
- mistralai_azure/ocr.py +15 -36
- mistralai_azure/utils/__init__.py +18 -5
- mistralai_azure/utils/eventstreaming.py +10 -0
- mistralai_azure/utils/serializers.py +3 -2
- mistralai_azure/utils/unmarshal_json_response.py +24 -0
- mistralai_gcp/_hooks/types.py +7 -0
- mistralai_gcp/_version.py +4 -4
- mistralai_gcp/basesdk.py +27 -25
- mistralai_gcp/chat.py +75 -98
- mistralai_gcp/fim.py +39 -74
- mistralai_gcp/httpclient.py +6 -16
- mistralai_gcp/models/__init__.py +321 -116
- mistralai_gcp/models/assistantmessage.py +1 -1
- mistralai_gcp/models/chatcompletionrequest.py +36 -7
- mistralai_gcp/models/chatcompletionresponse.py +6 -6
- mistralai_gcp/models/chatcompletionstreamrequest.py +36 -7
- mistralai_gcp/models/completionresponsestreamchoice.py +1 -1
- mistralai_gcp/models/deltamessage.py +1 -1
- mistralai_gcp/models/fimcompletionrequest.py +3 -9
- mistralai_gcp/models/fimcompletionresponse.py +6 -6
- mistralai_gcp/models/fimcompletionstreamrequest.py +3 -9
- mistralai_gcp/models/httpvalidationerror.py +11 -6
- mistralai_gcp/models/imageurl.py +1 -1
- mistralai_gcp/models/jsonschema.py +1 -1
- mistralai_gcp/models/mistralgcperror.py +26 -0
- mistralai_gcp/models/mistralpromptmode.py +8 -0
- mistralai_gcp/models/no_response_error.py +13 -0
- mistralai_gcp/models/prediction.py +4 -0
- mistralai_gcp/models/responseformat.py +5 -3
- mistralai_gcp/models/responseformats.py +0 -1
- mistralai_gcp/models/responsevalidationerror.py +25 -0
- mistralai_gcp/models/sdkerror.py +30 -14
- mistralai_gcp/models/systemmessage.py +7 -3
- mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
- mistralai_gcp/models/thinkchunk.py +35 -0
- mistralai_gcp/models/toolmessage.py +1 -1
- mistralai_gcp/models/usageinfo.py +71 -8
- mistralai_gcp/models/usermessage.py +1 -1
- mistralai_gcp/sdk.py +12 -10
- mistralai_gcp/sdkconfiguration.py +0 -7
- mistralai_gcp/types/basemodel.py +3 -3
- mistralai_gcp/utils/__init__.py +143 -45
- mistralai_gcp/utils/datetimes.py +23 -0
- mistralai_gcp/utils/enums.py +67 -27
- mistralai_gcp/utils/eventstreaming.py +10 -0
- mistralai_gcp/utils/forms.py +49 -28
- mistralai_gcp/utils/serializers.py +33 -3
- mistralai_gcp/utils/unmarshal_json_response.py +24 -0
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/licenses/LICENSE +0 -0
mistralai_gcp/basesdk.py
CHANGED
|
@@ -15,9 +15,19 @@ from urllib.parse import parse_qs, urlparse
|
|
|
15
15
|
|
|
16
16
|
class BaseSDK:
|
|
17
17
|
sdk_configuration: SDKConfiguration
|
|
18
|
+
parent_ref: Optional[object] = None
|
|
19
|
+
"""
|
|
20
|
+
Reference to the root SDK instance, if any. This will prevent it from
|
|
21
|
+
being garbage collected while there are active streams.
|
|
22
|
+
"""
|
|
18
23
|
|
|
19
|
-
def __init__(
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
sdk_config: SDKConfiguration,
|
|
27
|
+
parent_ref: Optional[object] = None,
|
|
28
|
+
) -> None:
|
|
20
29
|
self.sdk_configuration = sdk_config
|
|
30
|
+
self.parent_ref = parent_ref
|
|
21
31
|
|
|
22
32
|
def _get_url(self, base_url, url_variables):
|
|
23
33
|
sdk_url, sdk_variables = self.sdk_configuration.get_server_details()
|
|
@@ -218,12 +228,12 @@ class BaseSDK:
|
|
|
218
228
|
client = self.sdk_configuration.client
|
|
219
229
|
logger = self.sdk_configuration.debug_logger
|
|
220
230
|
|
|
231
|
+
hooks = self.sdk_configuration.__dict__["_hooks"]
|
|
232
|
+
|
|
221
233
|
def do():
|
|
222
234
|
http_res = None
|
|
223
235
|
try:
|
|
224
|
-
req =
|
|
225
|
-
BeforeRequestContext(hook_ctx), request
|
|
226
|
-
)
|
|
236
|
+
req = hooks.before_request(BeforeRequestContext(hook_ctx), request)
|
|
227
237
|
logger.debug(
|
|
228
238
|
"Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s",
|
|
229
239
|
req.method,
|
|
@@ -237,16 +247,14 @@ class BaseSDK:
|
|
|
237
247
|
|
|
238
248
|
http_res = client.send(req, stream=stream)
|
|
239
249
|
except Exception as e:
|
|
240
|
-
_, e =
|
|
241
|
-
AfterErrorContext(hook_ctx), None, e
|
|
242
|
-
)
|
|
250
|
+
_, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e)
|
|
243
251
|
if e is not None:
|
|
244
252
|
logger.debug("Request Exception", exc_info=True)
|
|
245
253
|
raise e
|
|
246
254
|
|
|
247
255
|
if http_res is None:
|
|
248
256
|
logger.debug("Raising no response SDK error")
|
|
249
|
-
raise models.
|
|
257
|
+
raise models.NoResponseError("No response received")
|
|
250
258
|
|
|
251
259
|
logger.debug(
|
|
252
260
|
"Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s",
|
|
@@ -257,7 +265,7 @@ class BaseSDK:
|
|
|
257
265
|
)
|
|
258
266
|
|
|
259
267
|
if utils.match_status_codes(error_status_codes, http_res.status_code):
|
|
260
|
-
result, err =
|
|
268
|
+
result, err = hooks.after_error(
|
|
261
269
|
AfterErrorContext(hook_ctx), http_res, None
|
|
262
270
|
)
|
|
263
271
|
if err is not None:
|
|
@@ -267,7 +275,7 @@ class BaseSDK:
|
|
|
267
275
|
http_res = result
|
|
268
276
|
else:
|
|
269
277
|
logger.debug("Raising unexpected SDK error")
|
|
270
|
-
raise models.SDKError("Unexpected error occurred")
|
|
278
|
+
raise models.SDKError("Unexpected error occurred", http_res)
|
|
271
279
|
|
|
272
280
|
return http_res
|
|
273
281
|
|
|
@@ -277,9 +285,7 @@ class BaseSDK:
|
|
|
277
285
|
http_res = do()
|
|
278
286
|
|
|
279
287
|
if not utils.match_status_codes(error_status_codes, http_res.status_code):
|
|
280
|
-
http_res =
|
|
281
|
-
AfterSuccessContext(hook_ctx), http_res
|
|
282
|
-
)
|
|
288
|
+
http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res)
|
|
283
289
|
|
|
284
290
|
return http_res
|
|
285
291
|
|
|
@@ -294,12 +300,12 @@ class BaseSDK:
|
|
|
294
300
|
client = self.sdk_configuration.async_client
|
|
295
301
|
logger = self.sdk_configuration.debug_logger
|
|
296
302
|
|
|
303
|
+
hooks = self.sdk_configuration.__dict__["_hooks"]
|
|
304
|
+
|
|
297
305
|
async def do():
|
|
298
306
|
http_res = None
|
|
299
307
|
try:
|
|
300
|
-
req =
|
|
301
|
-
BeforeRequestContext(hook_ctx), request
|
|
302
|
-
)
|
|
308
|
+
req = hooks.before_request(BeforeRequestContext(hook_ctx), request)
|
|
303
309
|
logger.debug(
|
|
304
310
|
"Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s",
|
|
305
311
|
req.method,
|
|
@@ -313,16 +319,14 @@ class BaseSDK:
|
|
|
313
319
|
|
|
314
320
|
http_res = await client.send(req, stream=stream)
|
|
315
321
|
except Exception as e:
|
|
316
|
-
_, e =
|
|
317
|
-
AfterErrorContext(hook_ctx), None, e
|
|
318
|
-
)
|
|
322
|
+
_, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e)
|
|
319
323
|
if e is not None:
|
|
320
324
|
logger.debug("Request Exception", exc_info=True)
|
|
321
325
|
raise e
|
|
322
326
|
|
|
323
327
|
if http_res is None:
|
|
324
328
|
logger.debug("Raising no response SDK error")
|
|
325
|
-
raise models.
|
|
329
|
+
raise models.NoResponseError("No response received")
|
|
326
330
|
|
|
327
331
|
logger.debug(
|
|
328
332
|
"Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s",
|
|
@@ -333,7 +337,7 @@ class BaseSDK:
|
|
|
333
337
|
)
|
|
334
338
|
|
|
335
339
|
if utils.match_status_codes(error_status_codes, http_res.status_code):
|
|
336
|
-
result, err =
|
|
340
|
+
result, err = hooks.after_error(
|
|
337
341
|
AfterErrorContext(hook_ctx), http_res, None
|
|
338
342
|
)
|
|
339
343
|
if err is not None:
|
|
@@ -343,7 +347,7 @@ class BaseSDK:
|
|
|
343
347
|
http_res = result
|
|
344
348
|
else:
|
|
345
349
|
logger.debug("Raising unexpected SDK error")
|
|
346
|
-
raise models.SDKError("Unexpected error occurred")
|
|
350
|
+
raise models.SDKError("Unexpected error occurred", http_res)
|
|
347
351
|
|
|
348
352
|
return http_res
|
|
349
353
|
|
|
@@ -355,8 +359,6 @@ class BaseSDK:
|
|
|
355
359
|
http_res = await do()
|
|
356
360
|
|
|
357
361
|
if not utils.match_status_codes(error_status_codes, http_res.status_code):
|
|
358
|
-
http_res =
|
|
359
|
-
AfterSuccessContext(hook_ctx), http_res
|
|
360
|
-
)
|
|
362
|
+
http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res)
|
|
361
363
|
|
|
362
364
|
return http_res
|
mistralai_gcp/chat.py
CHANGED
|
@@ -5,6 +5,7 @@ from mistralai_gcp import models, utils
|
|
|
5
5
|
from mistralai_gcp._hooks import HookContext
|
|
6
6
|
from mistralai_gcp.types import OptionalNullable, UNSET
|
|
7
7
|
from mistralai_gcp.utils import eventstreaming
|
|
8
|
+
from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response
|
|
8
9
|
from typing import Any, List, Mapping, Optional, Union
|
|
9
10
|
|
|
10
11
|
|
|
@@ -41,6 +42,7 @@ class Chat(BaseSDK):
|
|
|
41
42
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
42
43
|
] = None,
|
|
43
44
|
parallel_tool_calls: Optional[bool] = None,
|
|
45
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
44
46
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
45
47
|
server_url: Optional[str] = None,
|
|
46
48
|
timeout_ms: Optional[int] = None,
|
|
@@ -58,14 +60,15 @@ class Chat(BaseSDK):
|
|
|
58
60
|
:param stream:
|
|
59
61
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
60
62
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
61
|
-
:param response_format:
|
|
62
|
-
:param tools:
|
|
63
|
-
:param tool_choice:
|
|
64
|
-
:param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
65
|
-
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
63
|
+
:param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
|
|
64
|
+
:param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
|
|
65
|
+
:param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
|
|
66
|
+
:param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
67
|
+
:param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
66
68
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
67
|
-
:param prediction:
|
|
68
|
-
:param parallel_tool_calls:
|
|
69
|
+
:param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
|
|
70
|
+
:param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.
|
|
71
|
+
:param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
|
|
69
72
|
:param retries: Override the default retry configuration for this method
|
|
70
73
|
:param server_url: Override the default server URL for this method
|
|
71
74
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -104,6 +107,7 @@ class Chat(BaseSDK):
|
|
|
104
107
|
prediction, Optional[models.Prediction]
|
|
105
108
|
),
|
|
106
109
|
parallel_tool_calls=parallel_tool_calls,
|
|
110
|
+
prompt_mode=prompt_mode,
|
|
107
111
|
)
|
|
108
112
|
|
|
109
113
|
req = self._build_request(
|
|
@@ -135,6 +139,7 @@ class Chat(BaseSDK):
|
|
|
135
139
|
|
|
136
140
|
http_res = self.do_request(
|
|
137
141
|
hook_ctx=HookContext(
|
|
142
|
+
config=self.sdk_configuration,
|
|
138
143
|
base_url=base_url or "",
|
|
139
144
|
operation_id="stream_chat",
|
|
140
145
|
oauth2_scopes=[],
|
|
@@ -152,32 +157,23 @@ class Chat(BaseSDK):
|
|
|
152
157
|
http_res,
|
|
153
158
|
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
|
|
154
159
|
sentinel="[DONE]",
|
|
160
|
+
client_ref=self,
|
|
155
161
|
)
|
|
156
162
|
if utils.match_response(http_res, "422", "application/json"):
|
|
157
163
|
http_res_text = utils.stream_to_text(http_res)
|
|
158
|
-
response_data =
|
|
159
|
-
|
|
164
|
+
response_data = unmarshal_json_response(
|
|
165
|
+
models.HTTPValidationErrorData, http_res, http_res_text
|
|
160
166
|
)
|
|
161
|
-
raise models.HTTPValidationError(
|
|
167
|
+
raise models.HTTPValidationError(response_data, http_res, http_res_text)
|
|
162
168
|
if utils.match_response(http_res, "4XX", "*"):
|
|
163
169
|
http_res_text = utils.stream_to_text(http_res)
|
|
164
|
-
raise models.SDKError(
|
|
165
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
166
|
-
)
|
|
170
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
167
171
|
if utils.match_response(http_res, "5XX", "*"):
|
|
168
172
|
http_res_text = utils.stream_to_text(http_res)
|
|
169
|
-
raise models.SDKError(
|
|
170
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
171
|
-
)
|
|
173
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
172
174
|
|
|
173
|
-
content_type = http_res.headers.get("Content-Type")
|
|
174
175
|
http_res_text = utils.stream_to_text(http_res)
|
|
175
|
-
raise models.SDKError(
|
|
176
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
177
|
-
http_res.status_code,
|
|
178
|
-
http_res_text,
|
|
179
|
-
http_res,
|
|
180
|
-
)
|
|
176
|
+
raise models.SDKError("Unexpected response received", http_res, http_res_text)
|
|
181
177
|
|
|
182
178
|
async def stream_async(
|
|
183
179
|
self,
|
|
@@ -209,6 +205,7 @@ class Chat(BaseSDK):
|
|
|
209
205
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
210
206
|
] = None,
|
|
211
207
|
parallel_tool_calls: Optional[bool] = None,
|
|
208
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
212
209
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
213
210
|
server_url: Optional[str] = None,
|
|
214
211
|
timeout_ms: Optional[int] = None,
|
|
@@ -226,14 +223,15 @@ class Chat(BaseSDK):
|
|
|
226
223
|
:param stream:
|
|
227
224
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
228
225
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
229
|
-
:param response_format:
|
|
230
|
-
:param tools:
|
|
231
|
-
:param tool_choice:
|
|
232
|
-
:param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
233
|
-
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
226
|
+
:param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
|
|
227
|
+
:param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
|
|
228
|
+
:param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
|
|
229
|
+
:param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
230
|
+
:param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
234
231
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
235
|
-
:param prediction:
|
|
236
|
-
:param parallel_tool_calls:
|
|
232
|
+
:param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
|
|
233
|
+
:param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.
|
|
234
|
+
:param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
|
|
237
235
|
:param retries: Override the default retry configuration for this method
|
|
238
236
|
:param server_url: Override the default server URL for this method
|
|
239
237
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -272,6 +270,7 @@ class Chat(BaseSDK):
|
|
|
272
270
|
prediction, Optional[models.Prediction]
|
|
273
271
|
),
|
|
274
272
|
parallel_tool_calls=parallel_tool_calls,
|
|
273
|
+
prompt_mode=prompt_mode,
|
|
275
274
|
)
|
|
276
275
|
|
|
277
276
|
req = self._build_request_async(
|
|
@@ -303,6 +302,7 @@ class Chat(BaseSDK):
|
|
|
303
302
|
|
|
304
303
|
http_res = await self.do_request_async(
|
|
305
304
|
hook_ctx=HookContext(
|
|
305
|
+
config=self.sdk_configuration,
|
|
306
306
|
base_url=base_url or "",
|
|
307
307
|
operation_id="stream_chat",
|
|
308
308
|
oauth2_scopes=[],
|
|
@@ -320,32 +320,23 @@ class Chat(BaseSDK):
|
|
|
320
320
|
http_res,
|
|
321
321
|
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
|
|
322
322
|
sentinel="[DONE]",
|
|
323
|
+
client_ref=self,
|
|
323
324
|
)
|
|
324
325
|
if utils.match_response(http_res, "422", "application/json"):
|
|
325
326
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
326
|
-
response_data =
|
|
327
|
-
|
|
327
|
+
response_data = unmarshal_json_response(
|
|
328
|
+
models.HTTPValidationErrorData, http_res, http_res_text
|
|
328
329
|
)
|
|
329
|
-
raise models.HTTPValidationError(
|
|
330
|
+
raise models.HTTPValidationError(response_data, http_res, http_res_text)
|
|
330
331
|
if utils.match_response(http_res, "4XX", "*"):
|
|
331
332
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
332
|
-
raise models.SDKError(
|
|
333
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
334
|
-
)
|
|
333
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
335
334
|
if utils.match_response(http_res, "5XX", "*"):
|
|
336
335
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
337
|
-
raise models.SDKError(
|
|
338
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
339
|
-
)
|
|
336
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
340
337
|
|
|
341
|
-
content_type = http_res.headers.get("Content-Type")
|
|
342
338
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
343
|
-
raise models.SDKError(
|
|
344
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
345
|
-
http_res.status_code,
|
|
346
|
-
http_res_text,
|
|
347
|
-
http_res,
|
|
348
|
-
)
|
|
339
|
+
raise models.SDKError("Unexpected response received", http_res, http_res_text)
|
|
349
340
|
|
|
350
341
|
def complete(
|
|
351
342
|
self,
|
|
@@ -385,6 +376,7 @@ class Chat(BaseSDK):
|
|
|
385
376
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
386
377
|
] = None,
|
|
387
378
|
parallel_tool_calls: Optional[bool] = None,
|
|
379
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
388
380
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
389
381
|
server_url: Optional[str] = None,
|
|
390
382
|
timeout_ms: Optional[int] = None,
|
|
@@ -400,14 +392,15 @@ class Chat(BaseSDK):
|
|
|
400
392
|
:param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
401
393
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
402
394
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
403
|
-
:param response_format:
|
|
404
|
-
:param tools:
|
|
405
|
-
:param tool_choice:
|
|
406
|
-
:param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
407
|
-
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
395
|
+
:param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
|
|
396
|
+
:param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
|
|
397
|
+
:param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
|
|
398
|
+
:param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
399
|
+
:param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
408
400
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
409
|
-
:param prediction:
|
|
410
|
-
:param parallel_tool_calls:
|
|
401
|
+
:param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
|
|
402
|
+
:param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.
|
|
403
|
+
:param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
|
|
411
404
|
:param retries: Override the default retry configuration for this method
|
|
412
405
|
:param server_url: Override the default server URL for this method
|
|
413
406
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -448,6 +441,7 @@ class Chat(BaseSDK):
|
|
|
448
441
|
prediction, Optional[models.Prediction]
|
|
449
442
|
),
|
|
450
443
|
parallel_tool_calls=parallel_tool_calls,
|
|
444
|
+
prompt_mode=prompt_mode,
|
|
451
445
|
)
|
|
452
446
|
|
|
453
447
|
req = self._build_request(
|
|
@@ -479,6 +473,7 @@ class Chat(BaseSDK):
|
|
|
479
473
|
|
|
480
474
|
http_res = self.do_request(
|
|
481
475
|
hook_ctx=HookContext(
|
|
476
|
+
config=self.sdk_configuration,
|
|
482
477
|
base_url=base_url or "",
|
|
483
478
|
operation_id="chat_completion_v1_chat_completions_post",
|
|
484
479
|
oauth2_scopes=[],
|
|
@@ -491,33 +486,22 @@ class Chat(BaseSDK):
|
|
|
491
486
|
|
|
492
487
|
response_data: Any = None
|
|
493
488
|
if utils.match_response(http_res, "200", "application/json"):
|
|
494
|
-
return
|
|
495
|
-
|
|
489
|
+
return unmarshal_json_response(
|
|
490
|
+
Optional[models.ChatCompletionResponse], http_res
|
|
496
491
|
)
|
|
497
492
|
if utils.match_response(http_res, "422", "application/json"):
|
|
498
|
-
response_data =
|
|
499
|
-
|
|
493
|
+
response_data = unmarshal_json_response(
|
|
494
|
+
models.HTTPValidationErrorData, http_res
|
|
500
495
|
)
|
|
501
|
-
raise models.HTTPValidationError(
|
|
496
|
+
raise models.HTTPValidationError(response_data, http_res)
|
|
502
497
|
if utils.match_response(http_res, "4XX", "*"):
|
|
503
498
|
http_res_text = utils.stream_to_text(http_res)
|
|
504
|
-
raise models.SDKError(
|
|
505
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
506
|
-
)
|
|
499
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
507
500
|
if utils.match_response(http_res, "5XX", "*"):
|
|
508
501
|
http_res_text = utils.stream_to_text(http_res)
|
|
509
|
-
raise models.SDKError(
|
|
510
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
511
|
-
)
|
|
502
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
512
503
|
|
|
513
|
-
|
|
514
|
-
http_res_text = utils.stream_to_text(http_res)
|
|
515
|
-
raise models.SDKError(
|
|
516
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
517
|
-
http_res.status_code,
|
|
518
|
-
http_res_text,
|
|
519
|
-
http_res,
|
|
520
|
-
)
|
|
504
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
521
505
|
|
|
522
506
|
async def complete_async(
|
|
523
507
|
self,
|
|
@@ -557,6 +541,7 @@ class Chat(BaseSDK):
|
|
|
557
541
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
558
542
|
] = None,
|
|
559
543
|
parallel_tool_calls: Optional[bool] = None,
|
|
544
|
+
prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
|
|
560
545
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
561
546
|
server_url: Optional[str] = None,
|
|
562
547
|
timeout_ms: Optional[int] = None,
|
|
@@ -572,14 +557,15 @@ class Chat(BaseSDK):
|
|
|
572
557
|
:param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
573
558
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
574
559
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
575
|
-
:param response_format:
|
|
576
|
-
:param tools:
|
|
577
|
-
:param tool_choice:
|
|
578
|
-
:param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
579
|
-
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
560
|
+
:param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
|
|
561
|
+
:param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
|
|
562
|
+
:param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
|
|
563
|
+
:param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
564
|
+
:param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
580
565
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
581
|
-
:param prediction:
|
|
582
|
-
:param parallel_tool_calls:
|
|
566
|
+
:param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
|
|
567
|
+
:param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.
|
|
568
|
+
:param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
|
|
583
569
|
:param retries: Override the default retry configuration for this method
|
|
584
570
|
:param server_url: Override the default server URL for this method
|
|
585
571
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -620,6 +606,7 @@ class Chat(BaseSDK):
|
|
|
620
606
|
prediction, Optional[models.Prediction]
|
|
621
607
|
),
|
|
622
608
|
parallel_tool_calls=parallel_tool_calls,
|
|
609
|
+
prompt_mode=prompt_mode,
|
|
623
610
|
)
|
|
624
611
|
|
|
625
612
|
req = self._build_request_async(
|
|
@@ -651,6 +638,7 @@ class Chat(BaseSDK):
|
|
|
651
638
|
|
|
652
639
|
http_res = await self.do_request_async(
|
|
653
640
|
hook_ctx=HookContext(
|
|
641
|
+
config=self.sdk_configuration,
|
|
654
642
|
base_url=base_url or "",
|
|
655
643
|
operation_id="chat_completion_v1_chat_completions_post",
|
|
656
644
|
oauth2_scopes=[],
|
|
@@ -663,30 +651,19 @@ class Chat(BaseSDK):
|
|
|
663
651
|
|
|
664
652
|
response_data: Any = None
|
|
665
653
|
if utils.match_response(http_res, "200", "application/json"):
|
|
666
|
-
return
|
|
667
|
-
|
|
654
|
+
return unmarshal_json_response(
|
|
655
|
+
Optional[models.ChatCompletionResponse], http_res
|
|
668
656
|
)
|
|
669
657
|
if utils.match_response(http_res, "422", "application/json"):
|
|
670
|
-
response_data =
|
|
671
|
-
|
|
658
|
+
response_data = unmarshal_json_response(
|
|
659
|
+
models.HTTPValidationErrorData, http_res
|
|
672
660
|
)
|
|
673
|
-
raise models.HTTPValidationError(
|
|
661
|
+
raise models.HTTPValidationError(response_data, http_res)
|
|
674
662
|
if utils.match_response(http_res, "4XX", "*"):
|
|
675
663
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
676
|
-
raise models.SDKError(
|
|
677
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
678
|
-
)
|
|
664
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
679
665
|
if utils.match_response(http_res, "5XX", "*"):
|
|
680
666
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
681
|
-
raise models.SDKError(
|
|
682
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
683
|
-
)
|
|
667
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
684
668
|
|
|
685
|
-
|
|
686
|
-
http_res_text = await utils.stream_to_text_async(http_res)
|
|
687
|
-
raise models.SDKError(
|
|
688
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
689
|
-
http_res.status_code,
|
|
690
|
-
http_res_text,
|
|
691
|
-
http_res,
|
|
692
|
-
)
|
|
669
|
+
raise models.SDKError("Unexpected response received", http_res)
|