mistralai 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/__init__.py +4 -0
- mistralai/_version.py +12 -0
- mistralai/agents.py +56 -22
- mistralai/batch.py +17 -0
- mistralai/chat.py +64 -30
- mistralai/classifiers.py +396 -0
- mistralai/embeddings.py +10 -6
- mistralai/files.py +252 -19
- mistralai/fim.py +40 -30
- mistralai/jobs.py +40 -20
- mistralai/mistral_jobs.py +733 -0
- mistralai/models/__init__.py +108 -18
- mistralai/models/agentscompletionrequest.py +27 -10
- mistralai/models/agentscompletionstreamrequest.py +27 -10
- mistralai/models/apiendpoint.py +9 -0
- mistralai/models/archiveftmodelout.py +11 -5
- mistralai/models/assistantmessage.py +11 -6
- mistralai/models/basemodelcard.py +22 -6
- mistralai/models/batcherror.py +17 -0
- mistralai/models/batchjobin.py +58 -0
- mistralai/models/batchjobout.py +117 -0
- mistralai/models/batchjobsout.py +30 -0
- mistralai/models/batchjobstatus.py +15 -0
- mistralai/models/chatclassificationrequest.py +104 -0
- mistralai/models/chatcompletionchoice.py +9 -4
- mistralai/models/chatcompletionrequest.py +32 -13
- mistralai/models/chatcompletionresponse.py +2 -2
- mistralai/models/chatcompletionstreamrequest.py +32 -13
- mistralai/models/checkpointout.py +1 -1
- mistralai/models/classificationobject.py +21 -0
- mistralai/models/classificationrequest.py +59 -0
- mistralai/models/classificationresponse.py +21 -0
- mistralai/models/completionchunk.py +2 -2
- mistralai/models/completionevent.py +1 -1
- mistralai/models/completionresponsestreamchoice.py +11 -5
- mistralai/models/delete_model_v1_models_model_id_deleteop.py +1 -2
- mistralai/models/deletefileout.py +1 -1
- mistralai/models/deletemodelout.py +2 -2
- mistralai/models/deltamessage.py +14 -7
- mistralai/models/detailedjobout.py +11 -5
- mistralai/models/embeddingrequest.py +5 -5
- mistralai/models/embeddingresponse.py +2 -1
- mistralai/models/embeddingresponsedata.py +2 -2
- mistralai/models/eventout.py +2 -2
- mistralai/models/filepurpose.py +8 -0
- mistralai/models/files_api_routes_delete_fileop.py +1 -2
- mistralai/models/files_api_routes_download_fileop.py +16 -0
- mistralai/models/files_api_routes_list_filesop.py +96 -0
- mistralai/models/files_api_routes_retrieve_fileop.py +1 -2
- mistralai/models/files_api_routes_upload_fileop.py +9 -9
- mistralai/models/fileschema.py +7 -21
- mistralai/models/fimcompletionrequest.py +20 -13
- mistralai/models/fimcompletionresponse.py +2 -2
- mistralai/models/fimcompletionstreamrequest.py +20 -13
- mistralai/models/ftmodelcapabilitiesout.py +2 -2
- mistralai/models/ftmodelcard.py +24 -6
- mistralai/models/ftmodelout.py +9 -5
- mistralai/models/function.py +2 -2
- mistralai/models/functioncall.py +2 -1
- mistralai/models/functionname.py +1 -1
- mistralai/models/githubrepositoryin.py +11 -5
- mistralai/models/githubrepositoryout.py +11 -5
- mistralai/models/httpvalidationerror.py +0 -2
- mistralai/models/imageurl.py +1 -2
- mistralai/models/imageurlchunk.py +11 -5
- mistralai/models/jobin.py +2 -2
- mistralai/models/jobmetadataout.py +1 -2
- mistralai/models/jobout.py +10 -5
- mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py +16 -0
- mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +16 -0
- mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +95 -0
- mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +1 -2
- mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +1 -2
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +1 -2
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +2 -2
- mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +1 -2
- mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +1 -2
- mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +1 -2
- mistralai/models/jobsout.py +9 -5
- mistralai/models/legacyjobmetadataout.py +12 -5
- mistralai/models/listfilesout.py +5 -1
- mistralai/models/metricout.py +1 -2
- mistralai/models/modelcapabilities.py +2 -2
- mistralai/models/modellist.py +2 -2
- mistralai/models/responseformat.py +2 -2
- mistralai/models/retrieve_model_v1_models_model_id_getop.py +2 -2
- mistralai/models/retrievefileout.py +10 -21
- mistralai/models/sampletype.py +6 -2
- mistralai/models/security.py +2 -2
- mistralai/models/source.py +3 -2
- mistralai/models/systemmessage.py +6 -6
- mistralai/models/textchunk.py +9 -5
- mistralai/models/tool.py +2 -2
- mistralai/models/toolcall.py +2 -2
- mistralai/models/toolchoice.py +2 -2
- mistralai/models/toolmessage.py +2 -2
- mistralai/models/trainingfile.py +2 -2
- mistralai/models/trainingparameters.py +7 -2
- mistralai/models/trainingparametersin.py +7 -2
- mistralai/models/unarchiveftmodelout.py +11 -5
- mistralai/models/updateftmodelin.py +1 -2
- mistralai/models/uploadfileout.py +7 -21
- mistralai/models/usageinfo.py +1 -1
- mistralai/models/usermessage.py +36 -5
- mistralai/models/validationerror.py +2 -1
- mistralai/models/wandbintegration.py +11 -5
- mistralai/models/wandbintegrationout.py +12 -6
- mistralai/models_.py +48 -24
- mistralai/sdk.py +7 -0
- mistralai/sdkconfiguration.py +7 -7
- mistralai/utils/__init__.py +8 -0
- mistralai/utils/annotations.py +13 -2
- mistralai/utils/serializers.py +25 -0
- {mistralai-1.1.0.dist-info → mistralai-1.2.1.dist-info}/METADATA +90 -14
- mistralai-1.2.1.dist-info/RECORD +276 -0
- {mistralai-1.1.0.dist-info → mistralai-1.2.1.dist-info}/WHEEL +1 -1
- mistralai_azure/__init__.py +4 -0
- mistralai_azure/_version.py +12 -0
- mistralai_azure/chat.py +64 -30
- mistralai_azure/models/__init__.py +9 -3
- mistralai_azure/models/assistantmessage.py +11 -6
- mistralai_azure/models/chatcompletionchoice.py +10 -5
- mistralai_azure/models/chatcompletionrequest.py +32 -13
- mistralai_azure/models/chatcompletionresponse.py +2 -2
- mistralai_azure/models/chatcompletionstreamrequest.py +32 -13
- mistralai_azure/models/completionchunk.py +2 -2
- mistralai_azure/models/completionevent.py +1 -1
- mistralai_azure/models/completionresponsestreamchoice.py +9 -4
- mistralai_azure/models/deltamessage.py +14 -7
- mistralai_azure/models/function.py +2 -2
- mistralai_azure/models/functioncall.py +2 -1
- mistralai_azure/models/functionname.py +1 -1
- mistralai_azure/models/httpvalidationerror.py +0 -2
- mistralai_azure/models/responseformat.py +2 -2
- mistralai_azure/models/security.py +1 -2
- mistralai_azure/models/systemmessage.py +6 -6
- mistralai_azure/models/textchunk.py +9 -5
- mistralai_azure/models/tool.py +2 -2
- mistralai_azure/models/toolcall.py +2 -2
- mistralai_azure/models/toolchoice.py +2 -2
- mistralai_azure/models/toolmessage.py +2 -2
- mistralai_azure/models/usageinfo.py +1 -1
- mistralai_azure/models/usermessage.py +36 -5
- mistralai_azure/models/validationerror.py +2 -1
- mistralai_azure/sdkconfiguration.py +7 -7
- mistralai_azure/utils/__init__.py +8 -0
- mistralai_azure/utils/annotations.py +13 -2
- mistralai_azure/utils/serializers.py +25 -0
- mistralai_gcp/__init__.py +4 -0
- mistralai_gcp/_version.py +12 -0
- mistralai_gcp/chat.py +64 -30
- mistralai_gcp/fim.py +40 -30
- mistralai_gcp/models/__init__.py +9 -3
- mistralai_gcp/models/assistantmessage.py +11 -6
- mistralai_gcp/models/chatcompletionchoice.py +10 -5
- mistralai_gcp/models/chatcompletionrequest.py +32 -13
- mistralai_gcp/models/chatcompletionresponse.py +2 -2
- mistralai_gcp/models/chatcompletionstreamrequest.py +32 -13
- mistralai_gcp/models/completionchunk.py +2 -2
- mistralai_gcp/models/completionevent.py +1 -1
- mistralai_gcp/models/completionresponsestreamchoice.py +9 -4
- mistralai_gcp/models/deltamessage.py +14 -7
- mistralai_gcp/models/fimcompletionrequest.py +20 -13
- mistralai_gcp/models/fimcompletionresponse.py +2 -2
- mistralai_gcp/models/fimcompletionstreamrequest.py +20 -13
- mistralai_gcp/models/function.py +2 -2
- mistralai_gcp/models/functioncall.py +2 -1
- mistralai_gcp/models/functionname.py +1 -1
- mistralai_gcp/models/httpvalidationerror.py +0 -2
- mistralai_gcp/models/responseformat.py +2 -2
- mistralai_gcp/models/security.py +1 -2
- mistralai_gcp/models/systemmessage.py +6 -6
- mistralai_gcp/models/textchunk.py +9 -5
- mistralai_gcp/models/tool.py +2 -2
- mistralai_gcp/models/toolcall.py +2 -2
- mistralai_gcp/models/toolchoice.py +2 -2
- mistralai_gcp/models/toolmessage.py +2 -2
- mistralai_gcp/models/usageinfo.py +1 -1
- mistralai_gcp/models/usermessage.py +36 -5
- mistralai_gcp/models/validationerror.py +2 -1
- mistralai_gcp/sdk.py +20 -11
- mistralai_gcp/sdkconfiguration.py +7 -7
- mistralai_gcp/utils/__init__.py +8 -0
- mistralai_gcp/utils/annotations.py +13 -2
- mistralai_gcp/utils/serializers.py +25 -0
- mistralai-1.1.0.dist-info/RECORD +0 -254
- {mistralai-1.1.0.dist-info → mistralai-1.2.1.dist-info}/LICENSE +0 -0
mistralai/__init__.py
CHANGED
mistralai/_version.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
import importlib.metadata
|
|
4
|
+
|
|
5
|
+
__title__: str = "mistralai"
|
|
6
|
+
__version__: str = "1.2.1"
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
if __package__ is not None:
|
|
10
|
+
__version__ = importlib.metadata.version(__package__)
|
|
11
|
+
except importlib.metadata.PackageNotFoundError:
|
|
12
|
+
pass
|
mistralai/agents.py
CHANGED
|
@@ -20,7 +20,6 @@ class Agents(BaseSDK):
|
|
|
20
20
|
],
|
|
21
21
|
agent_id: str,
|
|
22
22
|
max_tokens: OptionalNullable[int] = UNSET,
|
|
23
|
-
min_tokens: OptionalNullable[int] = UNSET,
|
|
24
23
|
stream: Optional[bool] = False,
|
|
25
24
|
stop: Optional[
|
|
26
25
|
Union[
|
|
@@ -41,6 +40,9 @@ class Agents(BaseSDK):
|
|
|
41
40
|
models.AgentsCompletionRequestToolChoiceTypedDict,
|
|
42
41
|
]
|
|
43
42
|
] = None,
|
|
43
|
+
presence_penalty: Optional[float] = 0,
|
|
44
|
+
frequency_penalty: Optional[float] = 0,
|
|
45
|
+
n: OptionalNullable[int] = UNSET,
|
|
44
46
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
45
47
|
server_url: Optional[str] = None,
|
|
46
48
|
timeout_ms: Optional[int] = None,
|
|
@@ -50,13 +52,15 @@ class Agents(BaseSDK):
|
|
|
50
52
|
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
|
|
51
53
|
:param agent_id: The ID of the agent to use for this completion.
|
|
52
54
|
:param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
|
|
53
|
-
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
54
55
|
:param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
55
56
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
56
57
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
57
58
|
:param response_format:
|
|
58
59
|
:param tools:
|
|
59
60
|
:param tool_choice:
|
|
61
|
+
:param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
62
|
+
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
63
|
+
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
60
64
|
:param retries: Override the default retry configuration for this method
|
|
61
65
|
:param server_url: Override the default server URL for this method
|
|
62
66
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -71,7 +75,6 @@ class Agents(BaseSDK):
|
|
|
71
75
|
|
|
72
76
|
request = models.AgentsCompletionRequest(
|
|
73
77
|
max_tokens=max_tokens,
|
|
74
|
-
min_tokens=min_tokens,
|
|
75
78
|
stream=stream,
|
|
76
79
|
stop=stop,
|
|
77
80
|
random_seed=random_seed,
|
|
@@ -85,6 +88,9 @@ class Agents(BaseSDK):
|
|
|
85
88
|
tool_choice=utils.get_pydantic_model(
|
|
86
89
|
tool_choice, Optional[models.AgentsCompletionRequestToolChoice]
|
|
87
90
|
),
|
|
91
|
+
presence_penalty=presence_penalty,
|
|
92
|
+
frequency_penalty=frequency_penalty,
|
|
93
|
+
n=n,
|
|
88
94
|
agent_id=agent_id,
|
|
89
95
|
)
|
|
90
96
|
|
|
@@ -136,15 +142,17 @@ class Agents(BaseSDK):
|
|
|
136
142
|
data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData)
|
|
137
143
|
raise models.HTTPValidationError(data=data)
|
|
138
144
|
if utils.match_response(http_res, ["4XX", "5XX"], "*"):
|
|
145
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
139
146
|
raise models.SDKError(
|
|
140
|
-
"API error occurred", http_res.status_code,
|
|
147
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
141
148
|
)
|
|
142
149
|
|
|
143
150
|
content_type = http_res.headers.get("Content-Type")
|
|
151
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
144
152
|
raise models.SDKError(
|
|
145
153
|
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
146
154
|
http_res.status_code,
|
|
147
|
-
|
|
155
|
+
http_res_text,
|
|
148
156
|
http_res,
|
|
149
157
|
)
|
|
150
158
|
|
|
@@ -157,7 +165,6 @@ class Agents(BaseSDK):
|
|
|
157
165
|
],
|
|
158
166
|
agent_id: str,
|
|
159
167
|
max_tokens: OptionalNullable[int] = UNSET,
|
|
160
|
-
min_tokens: OptionalNullable[int] = UNSET,
|
|
161
168
|
stream: Optional[bool] = False,
|
|
162
169
|
stop: Optional[
|
|
163
170
|
Union[
|
|
@@ -178,6 +185,9 @@ class Agents(BaseSDK):
|
|
|
178
185
|
models.AgentsCompletionRequestToolChoiceTypedDict,
|
|
179
186
|
]
|
|
180
187
|
] = None,
|
|
188
|
+
presence_penalty: Optional[float] = 0,
|
|
189
|
+
frequency_penalty: Optional[float] = 0,
|
|
190
|
+
n: OptionalNullable[int] = UNSET,
|
|
181
191
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
182
192
|
server_url: Optional[str] = None,
|
|
183
193
|
timeout_ms: Optional[int] = None,
|
|
@@ -187,13 +197,15 @@ class Agents(BaseSDK):
|
|
|
187
197
|
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
|
|
188
198
|
:param agent_id: The ID of the agent to use for this completion.
|
|
189
199
|
:param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
|
|
190
|
-
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
191
200
|
:param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
192
201
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
193
202
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
194
203
|
:param response_format:
|
|
195
204
|
:param tools:
|
|
196
205
|
:param tool_choice:
|
|
206
|
+
:param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
207
|
+
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
208
|
+
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
197
209
|
:param retries: Override the default retry configuration for this method
|
|
198
210
|
:param server_url: Override the default server URL for this method
|
|
199
211
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -208,7 +220,6 @@ class Agents(BaseSDK):
|
|
|
208
220
|
|
|
209
221
|
request = models.AgentsCompletionRequest(
|
|
210
222
|
max_tokens=max_tokens,
|
|
211
|
-
min_tokens=min_tokens,
|
|
212
223
|
stream=stream,
|
|
213
224
|
stop=stop,
|
|
214
225
|
random_seed=random_seed,
|
|
@@ -222,6 +233,9 @@ class Agents(BaseSDK):
|
|
|
222
233
|
tool_choice=utils.get_pydantic_model(
|
|
223
234
|
tool_choice, Optional[models.AgentsCompletionRequestToolChoice]
|
|
224
235
|
),
|
|
236
|
+
presence_penalty=presence_penalty,
|
|
237
|
+
frequency_penalty=frequency_penalty,
|
|
238
|
+
n=n,
|
|
225
239
|
agent_id=agent_id,
|
|
226
240
|
)
|
|
227
241
|
|
|
@@ -273,15 +287,17 @@ class Agents(BaseSDK):
|
|
|
273
287
|
data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData)
|
|
274
288
|
raise models.HTTPValidationError(data=data)
|
|
275
289
|
if utils.match_response(http_res, ["4XX", "5XX"], "*"):
|
|
290
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
276
291
|
raise models.SDKError(
|
|
277
|
-
"API error occurred", http_res.status_code,
|
|
292
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
278
293
|
)
|
|
279
294
|
|
|
280
295
|
content_type = http_res.headers.get("Content-Type")
|
|
296
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
281
297
|
raise models.SDKError(
|
|
282
298
|
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
283
299
|
http_res.status_code,
|
|
284
|
-
|
|
300
|
+
http_res_text,
|
|
285
301
|
http_res,
|
|
286
302
|
)
|
|
287
303
|
|
|
@@ -294,7 +310,6 @@ class Agents(BaseSDK):
|
|
|
294
310
|
],
|
|
295
311
|
agent_id: str,
|
|
296
312
|
max_tokens: OptionalNullable[int] = UNSET,
|
|
297
|
-
min_tokens: OptionalNullable[int] = UNSET,
|
|
298
313
|
stream: Optional[bool] = True,
|
|
299
314
|
stop: Optional[
|
|
300
315
|
Union[
|
|
@@ -315,6 +330,9 @@ class Agents(BaseSDK):
|
|
|
315
330
|
models.AgentsCompletionStreamRequestToolChoiceTypedDict,
|
|
316
331
|
]
|
|
317
332
|
] = None,
|
|
333
|
+
presence_penalty: Optional[float] = 0,
|
|
334
|
+
frequency_penalty: Optional[float] = 0,
|
|
335
|
+
n: OptionalNullable[int] = UNSET,
|
|
318
336
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
319
337
|
server_url: Optional[str] = None,
|
|
320
338
|
timeout_ms: Optional[int] = None,
|
|
@@ -326,13 +344,15 @@ class Agents(BaseSDK):
|
|
|
326
344
|
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
|
|
327
345
|
:param agent_id: The ID of the agent to use for this completion.
|
|
328
346
|
:param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
|
|
329
|
-
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
330
347
|
:param stream:
|
|
331
348
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
332
349
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
333
350
|
:param response_format:
|
|
334
351
|
:param tools:
|
|
335
352
|
:param tool_choice:
|
|
353
|
+
:param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
354
|
+
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
355
|
+
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
336
356
|
:param retries: Override the default retry configuration for this method
|
|
337
357
|
:param server_url: Override the default server URL for this method
|
|
338
358
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -347,7 +367,6 @@ class Agents(BaseSDK):
|
|
|
347
367
|
|
|
348
368
|
request = models.AgentsCompletionStreamRequest(
|
|
349
369
|
max_tokens=max_tokens,
|
|
350
|
-
min_tokens=min_tokens,
|
|
351
370
|
stream=stream,
|
|
352
371
|
stop=stop,
|
|
353
372
|
random_seed=random_seed,
|
|
@@ -361,6 +380,9 @@ class Agents(BaseSDK):
|
|
|
361
380
|
tool_choice=utils.get_pydantic_model(
|
|
362
381
|
tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice]
|
|
363
382
|
),
|
|
383
|
+
presence_penalty=presence_penalty,
|
|
384
|
+
frequency_penalty=frequency_penalty,
|
|
385
|
+
n=n,
|
|
364
386
|
agent_id=agent_id,
|
|
365
387
|
)
|
|
366
388
|
|
|
@@ -412,18 +434,21 @@ class Agents(BaseSDK):
|
|
|
412
434
|
sentinel="[DONE]",
|
|
413
435
|
)
|
|
414
436
|
if utils.match_response(http_res, "422", "application/json"):
|
|
415
|
-
|
|
437
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
438
|
+
data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData)
|
|
416
439
|
raise models.HTTPValidationError(data=data)
|
|
417
440
|
if utils.match_response(http_res, ["4XX", "5XX"], "*"):
|
|
441
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
418
442
|
raise models.SDKError(
|
|
419
|
-
"API error occurred", http_res.status_code,
|
|
443
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
420
444
|
)
|
|
421
445
|
|
|
422
446
|
content_type = http_res.headers.get("Content-Type")
|
|
447
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
423
448
|
raise models.SDKError(
|
|
424
449
|
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
425
450
|
http_res.status_code,
|
|
426
|
-
|
|
451
|
+
http_res_text,
|
|
427
452
|
http_res,
|
|
428
453
|
)
|
|
429
454
|
|
|
@@ -436,7 +461,6 @@ class Agents(BaseSDK):
|
|
|
436
461
|
],
|
|
437
462
|
agent_id: str,
|
|
438
463
|
max_tokens: OptionalNullable[int] = UNSET,
|
|
439
|
-
min_tokens: OptionalNullable[int] = UNSET,
|
|
440
464
|
stream: Optional[bool] = True,
|
|
441
465
|
stop: Optional[
|
|
442
466
|
Union[
|
|
@@ -457,6 +481,9 @@ class Agents(BaseSDK):
|
|
|
457
481
|
models.AgentsCompletionStreamRequestToolChoiceTypedDict,
|
|
458
482
|
]
|
|
459
483
|
] = None,
|
|
484
|
+
presence_penalty: Optional[float] = 0,
|
|
485
|
+
frequency_penalty: Optional[float] = 0,
|
|
486
|
+
n: OptionalNullable[int] = UNSET,
|
|
460
487
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
461
488
|
server_url: Optional[str] = None,
|
|
462
489
|
timeout_ms: Optional[int] = None,
|
|
@@ -468,13 +495,15 @@ class Agents(BaseSDK):
|
|
|
468
495
|
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
|
|
469
496
|
:param agent_id: The ID of the agent to use for this completion.
|
|
470
497
|
:param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
|
|
471
|
-
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
472
498
|
:param stream:
|
|
473
499
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
474
500
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
475
501
|
:param response_format:
|
|
476
502
|
:param tools:
|
|
477
503
|
:param tool_choice:
|
|
504
|
+
:param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
|
|
505
|
+
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
506
|
+
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
478
507
|
:param retries: Override the default retry configuration for this method
|
|
479
508
|
:param server_url: Override the default server URL for this method
|
|
480
509
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -489,7 +518,6 @@ class Agents(BaseSDK):
|
|
|
489
518
|
|
|
490
519
|
request = models.AgentsCompletionStreamRequest(
|
|
491
520
|
max_tokens=max_tokens,
|
|
492
|
-
min_tokens=min_tokens,
|
|
493
521
|
stream=stream,
|
|
494
522
|
stop=stop,
|
|
495
523
|
random_seed=random_seed,
|
|
@@ -503,6 +531,9 @@ class Agents(BaseSDK):
|
|
|
503
531
|
tool_choice=utils.get_pydantic_model(
|
|
504
532
|
tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice]
|
|
505
533
|
),
|
|
534
|
+
presence_penalty=presence_penalty,
|
|
535
|
+
frequency_penalty=frequency_penalty,
|
|
536
|
+
n=n,
|
|
506
537
|
agent_id=agent_id,
|
|
507
538
|
)
|
|
508
539
|
|
|
@@ -554,17 +585,20 @@ class Agents(BaseSDK):
|
|
|
554
585
|
sentinel="[DONE]",
|
|
555
586
|
)
|
|
556
587
|
if utils.match_response(http_res, "422", "application/json"):
|
|
557
|
-
|
|
588
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
589
|
+
data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData)
|
|
558
590
|
raise models.HTTPValidationError(data=data)
|
|
559
591
|
if utils.match_response(http_res, ["4XX", "5XX"], "*"):
|
|
592
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
560
593
|
raise models.SDKError(
|
|
561
|
-
"API error occurred", http_res.status_code,
|
|
594
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
562
595
|
)
|
|
563
596
|
|
|
564
597
|
content_type = http_res.headers.get("Content-Type")
|
|
598
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
565
599
|
raise models.SDKError(
|
|
566
600
|
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
567
601
|
http_res.status_code,
|
|
568
|
-
|
|
602
|
+
http_res_text,
|
|
569
603
|
http_res,
|
|
570
604
|
)
|
mistralai/batch.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from .basesdk import BaseSDK
|
|
4
|
+
from .sdkconfiguration import SDKConfiguration
|
|
5
|
+
from mistralai.mistral_jobs import MistralJobs
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Batch(BaseSDK):
|
|
9
|
+
jobs: MistralJobs
|
|
10
|
+
|
|
11
|
+
def __init__(self, sdk_config: SDKConfiguration) -> None:
|
|
12
|
+
BaseSDK.__init__(self, sdk_config)
|
|
13
|
+
self.sdk_configuration = sdk_config
|
|
14
|
+
self._init_sdks()
|
|
15
|
+
|
|
16
|
+
def _init_sdks(self):
|
|
17
|
+
self.jobs = MistralJobs(self.sdk_configuration)
|