mistralai 1.9.11__py3-none-any.whl → 1.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_hooks/registration.py +5 -0
- mistralai/_hooks/tracing.py +50 -0
- mistralai/_version.py +2 -2
- mistralai/accesses.py +8 -8
- mistralai/agents.py +29 -17
- mistralai/chat.py +41 -29
- mistralai/conversations.py +294 -62
- mistralai/documents.py +19 -3
- mistralai/embeddings.py +6 -6
- mistralai/extra/observability/__init__.py +15 -0
- mistralai/extra/observability/otel.py +393 -0
- mistralai/extra/run/tools.py +28 -16
- mistralai/files.py +6 -0
- mistralai/fim.py +17 -5
- mistralai/mistral_agents.py +229 -1
- mistralai/mistral_jobs.py +10 -10
- mistralai/models/__init__.py +69 -2
- mistralai/models/agent.py +15 -2
- mistralai/models/agentconversation.py +11 -3
- mistralai/models/agentcreationrequest.py +6 -2
- mistralai/models/agents_api_v1_agents_deleteop.py +16 -0
- mistralai/models/agents_api_v1_agents_getop.py +40 -3
- mistralai/models/agents_api_v1_agents_listop.py +72 -2
- mistralai/models/agents_api_v1_conversations_deleteop.py +18 -0
- mistralai/models/agents_api_v1_conversations_listop.py +39 -2
- mistralai/models/agentscompletionrequest.py +21 -6
- mistralai/models/agentscompletionstreamrequest.py +21 -6
- mistralai/models/agentupdaterequest.py +18 -2
- mistralai/models/audiotranscriptionrequest.py +2 -0
- mistralai/models/batchjobin.py +10 -0
- mistralai/models/chatcompletionrequest.py +22 -5
- mistralai/models/chatcompletionstreamrequest.py +22 -5
- mistralai/models/conversationrequest.py +15 -4
- mistralai/models/conversationrestartrequest.py +50 -2
- mistralai/models/conversationrestartstreamrequest.py +50 -2
- mistralai/models/conversationstreamrequest.py +15 -4
- mistralai/models/documentout.py +26 -10
- mistralai/models/documentupdatein.py +24 -3
- mistralai/models/embeddingrequest.py +8 -8
- mistralai/models/files_api_routes_list_filesop.py +7 -0
- mistralai/models/fimcompletionrequest.py +8 -9
- mistralai/models/fimcompletionstreamrequest.py +8 -9
- mistralai/models/libraries_documents_list_v1op.py +15 -2
- mistralai/models/libraryout.py +10 -7
- mistralai/models/listfilesout.py +35 -4
- mistralai/models/modelcapabilities.py +13 -4
- mistralai/models/modelconversation.py +8 -2
- mistralai/models/ocrpageobject.py +26 -5
- mistralai/models/ocrrequest.py +17 -1
- mistralai/models/ocrtableobject.py +31 -0
- mistralai/models/prediction.py +4 -0
- mistralai/models/requestsource.py +7 -0
- mistralai/models/responseformat.py +4 -2
- mistralai/models/responseformats.py +0 -1
- mistralai/models/sharingdelete.py +36 -5
- mistralai/models/sharingin.py +36 -5
- mistralai/models/sharingout.py +3 -3
- mistralai/models/toolexecutiondeltaevent.py +13 -4
- mistralai/models/toolexecutiondoneevent.py +13 -4
- mistralai/models/toolexecutionentry.py +9 -4
- mistralai/models/toolexecutionstartedevent.py +13 -4
- mistralai/models_.py +2 -14
- mistralai/ocr.py +18 -0
- mistralai/transcriptions.py +4 -4
- {mistralai-1.9.11.dist-info → mistralai-1.10.0.dist-info}/METADATA +30 -12
- {mistralai-1.9.11.dist-info → mistralai-1.10.0.dist-info}/RECORD +68 -61
- {mistralai-1.9.11.dist-info → mistralai-1.10.0.dist-info}/WHEEL +0 -0
- {mistralai-1.9.11.dist-info → mistralai-1.10.0.dist-info}/licenses/LICENSE +0 -0
mistralai/fim.py
CHANGED
|
@@ -6,7 +6,7 @@ from mistralai._hooks import HookContext
|
|
|
6
6
|
from mistralai.types import OptionalNullable, UNSET
|
|
7
7
|
from mistralai.utils import eventstreaming, get_security_from_env
|
|
8
8
|
from mistralai.utils.unmarshal_json_response import unmarshal_json_response
|
|
9
|
-
from typing import Any, Mapping, Optional, Union
|
|
9
|
+
from typing import Any, Dict, Mapping, Optional, Union
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class Fim(BaseSDK):
|
|
@@ -28,6 +28,7 @@ class Fim(BaseSDK):
|
|
|
28
28
|
]
|
|
29
29
|
] = None,
|
|
30
30
|
random_seed: OptionalNullable[int] = UNSET,
|
|
31
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
31
32
|
suffix: OptionalNullable[str] = UNSET,
|
|
32
33
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
33
34
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
@@ -39,7 +40,7 @@ class Fim(BaseSDK):
|
|
|
39
40
|
|
|
40
41
|
FIM completion.
|
|
41
42
|
|
|
42
|
-
:param model: ID of the model to use.
|
|
43
|
+
:param model: ID of the model with FIM to use.
|
|
43
44
|
:param prompt: The text/code to complete.
|
|
44
45
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
45
46
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -47,6 +48,7 @@ class Fim(BaseSDK):
|
|
|
47
48
|
:param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
48
49
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
49
50
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
51
|
+
:param metadata:
|
|
50
52
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
51
53
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
52
54
|
:param retries: Override the default retry configuration for this method
|
|
@@ -72,6 +74,7 @@ class Fim(BaseSDK):
|
|
|
72
74
|
stream=stream,
|
|
73
75
|
stop=stop,
|
|
74
76
|
random_seed=random_seed,
|
|
77
|
+
metadata=metadata,
|
|
75
78
|
prompt=prompt,
|
|
76
79
|
suffix=suffix,
|
|
77
80
|
min_tokens=min_tokens,
|
|
@@ -152,6 +155,7 @@ class Fim(BaseSDK):
|
|
|
152
155
|
]
|
|
153
156
|
] = None,
|
|
154
157
|
random_seed: OptionalNullable[int] = UNSET,
|
|
158
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
155
159
|
suffix: OptionalNullable[str] = UNSET,
|
|
156
160
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
157
161
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
@@ -163,7 +167,7 @@ class Fim(BaseSDK):
|
|
|
163
167
|
|
|
164
168
|
FIM completion.
|
|
165
169
|
|
|
166
|
-
:param model: ID of the model to use.
|
|
170
|
+
:param model: ID of the model with FIM to use.
|
|
167
171
|
:param prompt: The text/code to complete.
|
|
168
172
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
169
173
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -171,6 +175,7 @@ class Fim(BaseSDK):
|
|
|
171
175
|
:param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
172
176
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
173
177
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
178
|
+
:param metadata:
|
|
174
179
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
175
180
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
176
181
|
:param retries: Override the default retry configuration for this method
|
|
@@ -196,6 +201,7 @@ class Fim(BaseSDK):
|
|
|
196
201
|
stream=stream,
|
|
197
202
|
stop=stop,
|
|
198
203
|
random_seed=random_seed,
|
|
204
|
+
metadata=metadata,
|
|
199
205
|
prompt=prompt,
|
|
200
206
|
suffix=suffix,
|
|
201
207
|
min_tokens=min_tokens,
|
|
@@ -276,6 +282,7 @@ class Fim(BaseSDK):
|
|
|
276
282
|
]
|
|
277
283
|
] = None,
|
|
278
284
|
random_seed: OptionalNullable[int] = UNSET,
|
|
285
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
279
286
|
suffix: OptionalNullable[str] = UNSET,
|
|
280
287
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
281
288
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
@@ -287,7 +294,7 @@ class Fim(BaseSDK):
|
|
|
287
294
|
|
|
288
295
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
289
296
|
|
|
290
|
-
:param model: ID of the model to use.
|
|
297
|
+
:param model: ID of the model with FIM to use.
|
|
291
298
|
:param prompt: The text/code to complete.
|
|
292
299
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
293
300
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -295,6 +302,7 @@ class Fim(BaseSDK):
|
|
|
295
302
|
:param stream:
|
|
296
303
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
297
304
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
305
|
+
:param metadata:
|
|
298
306
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
299
307
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
300
308
|
:param retries: Override the default retry configuration for this method
|
|
@@ -320,6 +328,7 @@ class Fim(BaseSDK):
|
|
|
320
328
|
stream=stream,
|
|
321
329
|
stop=stop,
|
|
322
330
|
random_seed=random_seed,
|
|
331
|
+
metadata=metadata,
|
|
323
332
|
prompt=prompt,
|
|
324
333
|
suffix=suffix,
|
|
325
334
|
min_tokens=min_tokens,
|
|
@@ -408,6 +417,7 @@ class Fim(BaseSDK):
|
|
|
408
417
|
]
|
|
409
418
|
] = None,
|
|
410
419
|
random_seed: OptionalNullable[int] = UNSET,
|
|
420
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
411
421
|
suffix: OptionalNullable[str] = UNSET,
|
|
412
422
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
413
423
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
@@ -419,7 +429,7 @@ class Fim(BaseSDK):
|
|
|
419
429
|
|
|
420
430
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
421
431
|
|
|
422
|
-
:param model: ID of the model to use.
|
|
432
|
+
:param model: ID of the model with FIM to use.
|
|
423
433
|
:param prompt: The text/code to complete.
|
|
424
434
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
425
435
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -427,6 +437,7 @@ class Fim(BaseSDK):
|
|
|
427
437
|
:param stream:
|
|
428
438
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
429
439
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
440
|
+
:param metadata:
|
|
430
441
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
431
442
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
432
443
|
:param retries: Override the default retry configuration for this method
|
|
@@ -452,6 +463,7 @@ class Fim(BaseSDK):
|
|
|
452
463
|
stream=stream,
|
|
453
464
|
stop=stop,
|
|
454
465
|
random_seed=random_seed,
|
|
466
|
+
metadata=metadata,
|
|
455
467
|
prompt=prompt,
|
|
456
468
|
suffix=suffix,
|
|
457
469
|
min_tokens=min_tokens,
|
mistralai/mistral_agents.py
CHANGED
|
@@ -6,7 +6,7 @@ from mistralai._hooks import HookContext
|
|
|
6
6
|
from mistralai.types import OptionalNullable, UNSET
|
|
7
7
|
from mistralai.utils import get_security_from_env
|
|
8
8
|
from mistralai.utils.unmarshal_json_response import unmarshal_json_response
|
|
9
|
-
from typing import Any, List, Mapping, Optional, Union
|
|
9
|
+
from typing import Any, Dict, List, Mapping, Optional, Union
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class MistralAgents(BaseSDK):
|
|
@@ -29,6 +29,7 @@ class MistralAgents(BaseSDK):
|
|
|
29
29
|
] = None,
|
|
30
30
|
description: OptionalNullable[str] = UNSET,
|
|
31
31
|
handoffs: OptionalNullable[List[str]] = UNSET,
|
|
32
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
32
33
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
33
34
|
server_url: Optional[str] = None,
|
|
34
35
|
timeout_ms: Optional[int] = None,
|
|
@@ -45,6 +46,7 @@ class MistralAgents(BaseSDK):
|
|
|
45
46
|
:param completion_args: White-listed arguments from the completion API
|
|
46
47
|
:param description:
|
|
47
48
|
:param handoffs:
|
|
49
|
+
:param metadata:
|
|
48
50
|
:param retries: Override the default retry configuration for this method
|
|
49
51
|
:param server_url: Override the default server URL for this method
|
|
50
52
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -72,6 +74,7 @@ class MistralAgents(BaseSDK):
|
|
|
72
74
|
name=name,
|
|
73
75
|
description=description,
|
|
74
76
|
handoffs=handoffs,
|
|
77
|
+
metadata=metadata,
|
|
75
78
|
)
|
|
76
79
|
|
|
77
80
|
req = self._build_request(
|
|
@@ -150,6 +153,7 @@ class MistralAgents(BaseSDK):
|
|
|
150
153
|
] = None,
|
|
151
154
|
description: OptionalNullable[str] = UNSET,
|
|
152
155
|
handoffs: OptionalNullable[List[str]] = UNSET,
|
|
156
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
153
157
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
154
158
|
server_url: Optional[str] = None,
|
|
155
159
|
timeout_ms: Optional[int] = None,
|
|
@@ -166,6 +170,7 @@ class MistralAgents(BaseSDK):
|
|
|
166
170
|
:param completion_args: White-listed arguments from the completion API
|
|
167
171
|
:param description:
|
|
168
172
|
:param handoffs:
|
|
173
|
+
:param metadata:
|
|
169
174
|
:param retries: Override the default retry configuration for this method
|
|
170
175
|
:param server_url: Override the default server URL for this method
|
|
171
176
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -193,6 +198,7 @@ class MistralAgents(BaseSDK):
|
|
|
193
198
|
name=name,
|
|
194
199
|
description=description,
|
|
195
200
|
handoffs=handoffs,
|
|
201
|
+
metadata=metadata,
|
|
196
202
|
)
|
|
197
203
|
|
|
198
204
|
req = self._build_request_async(
|
|
@@ -259,6 +265,11 @@ class MistralAgents(BaseSDK):
|
|
|
259
265
|
*,
|
|
260
266
|
page: Optional[int] = 0,
|
|
261
267
|
page_size: Optional[int] = 20,
|
|
268
|
+
deployment_chat: OptionalNullable[bool] = UNSET,
|
|
269
|
+
sources: OptionalNullable[List[models.RequestSource]] = UNSET,
|
|
270
|
+
name: OptionalNullable[str] = UNSET,
|
|
271
|
+
id: OptionalNullable[str] = UNSET,
|
|
272
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
262
273
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
263
274
|
server_url: Optional[str] = None,
|
|
264
275
|
timeout_ms: Optional[int] = None,
|
|
@@ -270,6 +281,11 @@ class MistralAgents(BaseSDK):
|
|
|
270
281
|
|
|
271
282
|
:param page:
|
|
272
283
|
:param page_size:
|
|
284
|
+
:param deployment_chat:
|
|
285
|
+
:param sources:
|
|
286
|
+
:param name:
|
|
287
|
+
:param id:
|
|
288
|
+
:param metadata:
|
|
273
289
|
:param retries: Override the default retry configuration for this method
|
|
274
290
|
:param server_url: Override the default server URL for this method
|
|
275
291
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -288,6 +304,11 @@ class MistralAgents(BaseSDK):
|
|
|
288
304
|
request = models.AgentsAPIV1AgentsListRequest(
|
|
289
305
|
page=page,
|
|
290
306
|
page_size=page_size,
|
|
307
|
+
deployment_chat=deployment_chat,
|
|
308
|
+
sources=sources,
|
|
309
|
+
name=name,
|
|
310
|
+
id=id,
|
|
311
|
+
metadata=metadata,
|
|
291
312
|
)
|
|
292
313
|
|
|
293
314
|
req = self._build_request(
|
|
@@ -351,6 +372,11 @@ class MistralAgents(BaseSDK):
|
|
|
351
372
|
*,
|
|
352
373
|
page: Optional[int] = 0,
|
|
353
374
|
page_size: Optional[int] = 20,
|
|
375
|
+
deployment_chat: OptionalNullable[bool] = UNSET,
|
|
376
|
+
sources: OptionalNullable[List[models.RequestSource]] = UNSET,
|
|
377
|
+
name: OptionalNullable[str] = UNSET,
|
|
378
|
+
id: OptionalNullable[str] = UNSET,
|
|
379
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
354
380
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
355
381
|
server_url: Optional[str] = None,
|
|
356
382
|
timeout_ms: Optional[int] = None,
|
|
@@ -362,6 +388,11 @@ class MistralAgents(BaseSDK):
|
|
|
362
388
|
|
|
363
389
|
:param page:
|
|
364
390
|
:param page_size:
|
|
391
|
+
:param deployment_chat:
|
|
392
|
+
:param sources:
|
|
393
|
+
:param name:
|
|
394
|
+
:param id:
|
|
395
|
+
:param metadata:
|
|
365
396
|
:param retries: Override the default retry configuration for this method
|
|
366
397
|
:param server_url: Override the default server URL for this method
|
|
367
398
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -380,6 +411,11 @@ class MistralAgents(BaseSDK):
|
|
|
380
411
|
request = models.AgentsAPIV1AgentsListRequest(
|
|
381
412
|
page=page,
|
|
382
413
|
page_size=page_size,
|
|
414
|
+
deployment_chat=deployment_chat,
|
|
415
|
+
sources=sources,
|
|
416
|
+
name=name,
|
|
417
|
+
id=id,
|
|
418
|
+
metadata=metadata,
|
|
383
419
|
)
|
|
384
420
|
|
|
385
421
|
req = self._build_request_async(
|
|
@@ -442,6 +478,7 @@ class MistralAgents(BaseSDK):
|
|
|
442
478
|
self,
|
|
443
479
|
*,
|
|
444
480
|
agent_id: str,
|
|
481
|
+
agent_version: OptionalNullable[int] = UNSET,
|
|
445
482
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
446
483
|
server_url: Optional[str] = None,
|
|
447
484
|
timeout_ms: Optional[int] = None,
|
|
@@ -452,6 +489,7 @@ class MistralAgents(BaseSDK):
|
|
|
452
489
|
Given an agent retrieve an agent entity with its attributes.
|
|
453
490
|
|
|
454
491
|
:param agent_id:
|
|
492
|
+
:param agent_version:
|
|
455
493
|
:param retries: Override the default retry configuration for this method
|
|
456
494
|
:param server_url: Override the default server URL for this method
|
|
457
495
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -469,6 +507,7 @@ class MistralAgents(BaseSDK):
|
|
|
469
507
|
|
|
470
508
|
request = models.AgentsAPIV1AgentsGetRequest(
|
|
471
509
|
agent_id=agent_id,
|
|
510
|
+
agent_version=agent_version,
|
|
472
511
|
)
|
|
473
512
|
|
|
474
513
|
req = self._build_request(
|
|
@@ -531,6 +570,7 @@ class MistralAgents(BaseSDK):
|
|
|
531
570
|
self,
|
|
532
571
|
*,
|
|
533
572
|
agent_id: str,
|
|
573
|
+
agent_version: OptionalNullable[int] = UNSET,
|
|
534
574
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
535
575
|
server_url: Optional[str] = None,
|
|
536
576
|
timeout_ms: Optional[int] = None,
|
|
@@ -541,6 +581,7 @@ class MistralAgents(BaseSDK):
|
|
|
541
581
|
Given an agent retrieve an agent entity with its attributes.
|
|
542
582
|
|
|
543
583
|
:param agent_id:
|
|
584
|
+
:param agent_version:
|
|
544
585
|
:param retries: Override the default retry configuration for this method
|
|
545
586
|
:param server_url: Override the default server URL for this method
|
|
546
587
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -558,6 +599,7 @@ class MistralAgents(BaseSDK):
|
|
|
558
599
|
|
|
559
600
|
request = models.AgentsAPIV1AgentsGetRequest(
|
|
560
601
|
agent_id=agent_id,
|
|
602
|
+
agent_version=agent_version,
|
|
561
603
|
)
|
|
562
604
|
|
|
563
605
|
req = self._build_request_async(
|
|
@@ -634,6 +676,8 @@ class MistralAgents(BaseSDK):
|
|
|
634
676
|
name: OptionalNullable[str] = UNSET,
|
|
635
677
|
description: OptionalNullable[str] = UNSET,
|
|
636
678
|
handoffs: OptionalNullable[List[str]] = UNSET,
|
|
679
|
+
deployment_chat: OptionalNullable[bool] = UNSET,
|
|
680
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
637
681
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
638
682
|
server_url: Optional[str] = None,
|
|
639
683
|
timeout_ms: Optional[int] = None,
|
|
@@ -651,6 +695,8 @@ class MistralAgents(BaseSDK):
|
|
|
651
695
|
:param name:
|
|
652
696
|
:param description:
|
|
653
697
|
:param handoffs:
|
|
698
|
+
:param deployment_chat:
|
|
699
|
+
:param metadata:
|
|
654
700
|
:param retries: Override the default retry configuration for this method
|
|
655
701
|
:param server_url: Override the default server URL for this method
|
|
656
702
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -680,6 +726,8 @@ class MistralAgents(BaseSDK):
|
|
|
680
726
|
name=name,
|
|
681
727
|
description=description,
|
|
682
728
|
handoffs=handoffs,
|
|
729
|
+
deployment_chat=deployment_chat,
|
|
730
|
+
metadata=metadata,
|
|
683
731
|
),
|
|
684
732
|
)
|
|
685
733
|
|
|
@@ -764,6 +812,8 @@ class MistralAgents(BaseSDK):
|
|
|
764
812
|
name: OptionalNullable[str] = UNSET,
|
|
765
813
|
description: OptionalNullable[str] = UNSET,
|
|
766
814
|
handoffs: OptionalNullable[List[str]] = UNSET,
|
|
815
|
+
deployment_chat: OptionalNullable[bool] = UNSET,
|
|
816
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
767
817
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
768
818
|
server_url: Optional[str] = None,
|
|
769
819
|
timeout_ms: Optional[int] = None,
|
|
@@ -781,6 +831,8 @@ class MistralAgents(BaseSDK):
|
|
|
781
831
|
:param name:
|
|
782
832
|
:param description:
|
|
783
833
|
:param handoffs:
|
|
834
|
+
:param deployment_chat:
|
|
835
|
+
:param metadata:
|
|
784
836
|
:param retries: Override the default retry configuration for this method
|
|
785
837
|
:param server_url: Override the default server URL for this method
|
|
786
838
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -810,6 +862,8 @@ class MistralAgents(BaseSDK):
|
|
|
810
862
|
name=name,
|
|
811
863
|
description=description,
|
|
812
864
|
handoffs=handoffs,
|
|
865
|
+
deployment_chat=deployment_chat,
|
|
866
|
+
metadata=metadata,
|
|
813
867
|
),
|
|
814
868
|
)
|
|
815
869
|
|
|
@@ -876,6 +930,180 @@ class MistralAgents(BaseSDK):
|
|
|
876
930
|
|
|
877
931
|
raise models.SDKError("Unexpected response received", http_res)
|
|
878
932
|
|
|
933
|
+
def delete(
|
|
934
|
+
self,
|
|
935
|
+
*,
|
|
936
|
+
agent_id: str,
|
|
937
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
938
|
+
server_url: Optional[str] = None,
|
|
939
|
+
timeout_ms: Optional[int] = None,
|
|
940
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
941
|
+
):
|
|
942
|
+
r"""Delete an agent entity.
|
|
943
|
+
|
|
944
|
+
:param agent_id:
|
|
945
|
+
:param retries: Override the default retry configuration for this method
|
|
946
|
+
:param server_url: Override the default server URL for this method
|
|
947
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
948
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
949
|
+
"""
|
|
950
|
+
base_url = None
|
|
951
|
+
url_variables = None
|
|
952
|
+
if timeout_ms is None:
|
|
953
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
954
|
+
|
|
955
|
+
if server_url is not None:
|
|
956
|
+
base_url = server_url
|
|
957
|
+
else:
|
|
958
|
+
base_url = self._get_url(base_url, url_variables)
|
|
959
|
+
|
|
960
|
+
request = models.AgentsAPIV1AgentsDeleteRequest(
|
|
961
|
+
agent_id=agent_id,
|
|
962
|
+
)
|
|
963
|
+
|
|
964
|
+
req = self._build_request(
|
|
965
|
+
method="DELETE",
|
|
966
|
+
path="/v1/agents/{agent_id}",
|
|
967
|
+
base_url=base_url,
|
|
968
|
+
url_variables=url_variables,
|
|
969
|
+
request=request,
|
|
970
|
+
request_body_required=False,
|
|
971
|
+
request_has_path_params=True,
|
|
972
|
+
request_has_query_params=True,
|
|
973
|
+
user_agent_header="user-agent",
|
|
974
|
+
accept_header_value="application/json",
|
|
975
|
+
http_headers=http_headers,
|
|
976
|
+
security=self.sdk_configuration.security,
|
|
977
|
+
timeout_ms=timeout_ms,
|
|
978
|
+
)
|
|
979
|
+
|
|
980
|
+
if retries == UNSET:
|
|
981
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
982
|
+
retries = self.sdk_configuration.retry_config
|
|
983
|
+
|
|
984
|
+
retry_config = None
|
|
985
|
+
if isinstance(retries, utils.RetryConfig):
|
|
986
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
987
|
+
|
|
988
|
+
http_res = self.do_request(
|
|
989
|
+
hook_ctx=HookContext(
|
|
990
|
+
config=self.sdk_configuration,
|
|
991
|
+
base_url=base_url or "",
|
|
992
|
+
operation_id="agents_api_v1_agents_delete",
|
|
993
|
+
oauth2_scopes=[],
|
|
994
|
+
security_source=get_security_from_env(
|
|
995
|
+
self.sdk_configuration.security, models.Security
|
|
996
|
+
),
|
|
997
|
+
),
|
|
998
|
+
request=req,
|
|
999
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1000
|
+
retry_config=retry_config,
|
|
1001
|
+
)
|
|
1002
|
+
|
|
1003
|
+
response_data: Any = None
|
|
1004
|
+
if utils.match_response(http_res, "204", "*"):
|
|
1005
|
+
return
|
|
1006
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1007
|
+
response_data = unmarshal_json_response(
|
|
1008
|
+
models.HTTPValidationErrorData, http_res
|
|
1009
|
+
)
|
|
1010
|
+
raise models.HTTPValidationError(response_data, http_res)
|
|
1011
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1012
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1013
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
1014
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1015
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1016
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
1017
|
+
|
|
1018
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
1019
|
+
|
|
1020
|
+
async def delete_async(
|
|
1021
|
+
self,
|
|
1022
|
+
*,
|
|
1023
|
+
agent_id: str,
|
|
1024
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
1025
|
+
server_url: Optional[str] = None,
|
|
1026
|
+
timeout_ms: Optional[int] = None,
|
|
1027
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
1028
|
+
):
|
|
1029
|
+
r"""Delete an agent entity.
|
|
1030
|
+
|
|
1031
|
+
:param agent_id:
|
|
1032
|
+
:param retries: Override the default retry configuration for this method
|
|
1033
|
+
:param server_url: Override the default server URL for this method
|
|
1034
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
1035
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
1036
|
+
"""
|
|
1037
|
+
base_url = None
|
|
1038
|
+
url_variables = None
|
|
1039
|
+
if timeout_ms is None:
|
|
1040
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
1041
|
+
|
|
1042
|
+
if server_url is not None:
|
|
1043
|
+
base_url = server_url
|
|
1044
|
+
else:
|
|
1045
|
+
base_url = self._get_url(base_url, url_variables)
|
|
1046
|
+
|
|
1047
|
+
request = models.AgentsAPIV1AgentsDeleteRequest(
|
|
1048
|
+
agent_id=agent_id,
|
|
1049
|
+
)
|
|
1050
|
+
|
|
1051
|
+
req = self._build_request_async(
|
|
1052
|
+
method="DELETE",
|
|
1053
|
+
path="/v1/agents/{agent_id}",
|
|
1054
|
+
base_url=base_url,
|
|
1055
|
+
url_variables=url_variables,
|
|
1056
|
+
request=request,
|
|
1057
|
+
request_body_required=False,
|
|
1058
|
+
request_has_path_params=True,
|
|
1059
|
+
request_has_query_params=True,
|
|
1060
|
+
user_agent_header="user-agent",
|
|
1061
|
+
accept_header_value="application/json",
|
|
1062
|
+
http_headers=http_headers,
|
|
1063
|
+
security=self.sdk_configuration.security,
|
|
1064
|
+
timeout_ms=timeout_ms,
|
|
1065
|
+
)
|
|
1066
|
+
|
|
1067
|
+
if retries == UNSET:
|
|
1068
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
1069
|
+
retries = self.sdk_configuration.retry_config
|
|
1070
|
+
|
|
1071
|
+
retry_config = None
|
|
1072
|
+
if isinstance(retries, utils.RetryConfig):
|
|
1073
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
1074
|
+
|
|
1075
|
+
http_res = await self.do_request_async(
|
|
1076
|
+
hook_ctx=HookContext(
|
|
1077
|
+
config=self.sdk_configuration,
|
|
1078
|
+
base_url=base_url or "",
|
|
1079
|
+
operation_id="agents_api_v1_agents_delete",
|
|
1080
|
+
oauth2_scopes=[],
|
|
1081
|
+
security_source=get_security_from_env(
|
|
1082
|
+
self.sdk_configuration.security, models.Security
|
|
1083
|
+
),
|
|
1084
|
+
),
|
|
1085
|
+
request=req,
|
|
1086
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1087
|
+
retry_config=retry_config,
|
|
1088
|
+
)
|
|
1089
|
+
|
|
1090
|
+
response_data: Any = None
|
|
1091
|
+
if utils.match_response(http_res, "204", "*"):
|
|
1092
|
+
return
|
|
1093
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1094
|
+
response_data = unmarshal_json_response(
|
|
1095
|
+
models.HTTPValidationErrorData, http_res
|
|
1096
|
+
)
|
|
1097
|
+
raise models.HTTPValidationError(response_data, http_res)
|
|
1098
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1099
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1100
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
1101
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1102
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1103
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
1104
|
+
|
|
1105
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
1106
|
+
|
|
879
1107
|
def update_version(
|
|
880
1108
|
self,
|
|
881
1109
|
*,
|
mistralai/mistral_jobs.py
CHANGED
|
@@ -237,12 +237,12 @@ class MistralJobs(BaseSDK):
|
|
|
237
237
|
|
|
238
238
|
Create a new batch job, it will be queued for processing.
|
|
239
239
|
|
|
240
|
-
:param input_files:
|
|
240
|
+
:param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```
|
|
241
241
|
:param endpoint:
|
|
242
|
-
:param model:
|
|
243
|
-
:param agent_id:
|
|
244
|
-
:param metadata:
|
|
245
|
-
:param timeout_hours:
|
|
242
|
+
:param model: The model to be used for batch inference.
|
|
243
|
+
:param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.
|
|
244
|
+
:param metadata: The metadata of your choice to be associated with the batch inference job.
|
|
245
|
+
:param timeout_hours: The timeout in hours for the batch inference job.
|
|
246
246
|
:param retries: Override the default retry configuration for this method
|
|
247
247
|
:param server_url: Override the default server URL for this method
|
|
248
248
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -338,12 +338,12 @@ class MistralJobs(BaseSDK):
|
|
|
338
338
|
|
|
339
339
|
Create a new batch job, it will be queued for processing.
|
|
340
340
|
|
|
341
|
-
:param input_files:
|
|
341
|
+
:param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```
|
|
342
342
|
:param endpoint:
|
|
343
|
-
:param model:
|
|
344
|
-
:param agent_id:
|
|
345
|
-
:param metadata:
|
|
346
|
-
:param timeout_hours:
|
|
343
|
+
:param model: The model to be used for batch inference.
|
|
344
|
+
:param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.
|
|
345
|
+
:param metadata: The metadata of your choice to be associated with the batch inference job.
|
|
346
|
+
:param timeout_hours: The timeout in hours for the batch inference job.
|
|
347
347
|
:param retries: Override the default retry configuration for this method
|
|
348
348
|
:param server_url: Override the default server URL for this method
|
|
349
349
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|