mistralai 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. mistralai/agents.py +31 -31
  2. mistralai/chat.py +4 -4
  3. mistralai/jobs.py +8 -8
  4. mistralai/models/__init__.py +20 -20
  5. mistralai/models/agentscompletionstreamrequest.py +33 -31
  6. mistralai/models/archiveftmodelout.py +4 -2
  7. mistralai/models/chatcompletionchoice.py +3 -4
  8. mistralai/models/chatcompletionrequest.py +2 -2
  9. mistralai/models/chatcompletionstreamrequest.py +2 -2
  10. mistralai/models/deltamessage.py +3 -3
  11. mistralai/models/detailedjobout.py +19 -5
  12. mistralai/models/files_api_routes_upload_fileop.py +7 -4
  13. mistralai/models/fileschema.py +8 -3
  14. mistralai/models/ftmodelout.py +4 -2
  15. mistralai/models/githubrepositoryin.py +4 -2
  16. mistralai/models/githubrepositoryout.py +4 -2
  17. mistralai/models/jobin.py +16 -4
  18. mistralai/models/jobout.py +20 -5
  19. mistralai/models/jobsout.py +4 -2
  20. mistralai/models/legacyjobmetadataout.py +4 -2
  21. mistralai/models/retrievefileout.py +8 -3
  22. mistralai/models/tool.py +9 -5
  23. mistralai/models/toolcall.py +8 -4
  24. mistralai/models/trainingparameters.py +6 -2
  25. mistralai/models/trainingparametersin.py +10 -2
  26. mistralai/models/unarchiveftmodelout.py +4 -2
  27. mistralai/models/uploadfileout.py +8 -3
  28. mistralai/models/wandbintegration.py +4 -2
  29. mistralai/models/wandbintegrationout.py +4 -2
  30. mistralai/sdk.py +2 -2
  31. mistralai/sdkconfiguration.py +3 -3
  32. mistralai/utils/__init__.py +2 -2
  33. mistralai/utils/forms.py +10 -9
  34. mistralai/utils/headers.py +8 -8
  35. mistralai/utils/logger.py +8 -0
  36. mistralai/utils/queryparams.py +16 -14
  37. mistralai/utils/serializers.py +17 -8
  38. mistralai/utils/url.py +13 -8
  39. mistralai/utils/values.py +6 -0
  40. mistralai/version.py +7 -0
  41. {mistralai-1.0.1.dist-info → mistralai-1.0.2.dist-info}/METADATA +5 -2
  42. {mistralai-1.0.1.dist-info → mistralai-1.0.2.dist-info}/RECORD +59 -58
  43. mistralai_azure/models/__init__.py +3 -3
  44. mistralai_azure/models/chatcompletionchoice.py +3 -4
  45. mistralai_azure/models/deltamessage.py +3 -3
  46. mistralai_azure/models/tool.py +9 -5
  47. mistralai_azure/models/toolcall.py +8 -4
  48. mistralai_azure/sdkconfiguration.py +3 -3
  49. mistralai_gcp/chat.py +4 -4
  50. mistralai_gcp/models/__init__.py +3 -3
  51. mistralai_gcp/models/chatcompletionchoice.py +3 -4
  52. mistralai_gcp/models/chatcompletionrequest.py +2 -2
  53. mistralai_gcp/models/chatcompletionstreamrequest.py +2 -2
  54. mistralai_gcp/models/deltamessage.py +3 -3
  55. mistralai_gcp/models/tool.py +9 -5
  56. mistralai_gcp/models/toolcall.py +8 -4
  57. mistralai_gcp/sdkconfiguration.py +3 -3
  58. {mistralai-1.0.1.dist-info → mistralai-1.0.2.dist-info}/LICENSE +0 -0
  59. {mistralai-1.0.1.dist-info → mistralai-1.0.2.dist-info}/WHEEL +0 -0
mistralai/agents.py CHANGED
@@ -3,7 +3,7 @@
3
3
  from .basesdk import BaseSDK
4
4
  from mistralai import models, utils
5
5
  from mistralai._hooks import HookContext
6
- from mistralai.types import Nullable, OptionalNullable, UNSET
6
+ from mistralai.types import OptionalNullable, UNSET
7
7
  from mistralai.utils import eventstreaming, get_security_from_env
8
8
  from typing import Any, AsyncGenerator, Generator, List, Optional, Union
9
9
 
@@ -221,16 +221,16 @@ class Agents(BaseSDK):
221
221
 
222
222
  def stream(
223
223
  self, *,
224
- model: Nullable[str],
225
- prompt: str,
226
- temperature: Optional[float] = 0.7,
227
- top_p: Optional[float] = 1,
224
+ messages: Union[List[models.AgentsCompletionStreamRequestMessages], List[models.AgentsCompletionStreamRequestMessagesTypedDict]],
225
+ agent_id: str,
228
226
  max_tokens: OptionalNullable[int] = UNSET,
229
227
  min_tokens: OptionalNullable[int] = UNSET,
230
228
  stream: Optional[bool] = True,
231
229
  stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None,
232
230
  random_seed: OptionalNullable[int] = UNSET,
233
- suffix: OptionalNullable[str] = UNSET,
231
+ response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None,
232
+ tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET,
233
+ tool_choice: Optional[models.AgentsCompletionStreamRequestToolChoice] = "auto",
234
234
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
235
235
  server_url: Optional[str] = None,
236
236
  timeout_ms: Optional[int] = None,
@@ -239,16 +239,16 @@ class Agents(BaseSDK):
239
239
 
240
240
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
241
241
 
242
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
243
- :param prompt: The text/code to complete.
244
- :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
245
- :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
242
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
243
+ :param agent_id: The ID of the agent to use for this completion.
246
244
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
247
245
  :param min_tokens: The minimum number of tokens to generate in the completion.
248
246
  :param stream:
249
247
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
250
248
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
251
- :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
249
+ :param response_format:
250
+ :param tools:
251
+ :param tool_choice:
252
252
  :param retries: Override the default retry configuration for this method
253
253
  :param server_url: Override the default server URL for this method
254
254
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -262,16 +262,16 @@ class Agents(BaseSDK):
262
262
  base_url = server_url
263
263
 
264
264
  request = models.AgentsCompletionStreamRequest(
265
- model=model,
266
- temperature=temperature,
267
- top_p=top_p,
268
265
  max_tokens=max_tokens,
269
266
  min_tokens=min_tokens,
270
267
  stream=stream,
271
268
  stop=stop,
272
269
  random_seed=random_seed,
273
- prompt=prompt,
274
- suffix=suffix,
270
+ messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionStreamRequestMessages]),
271
+ response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]),
272
+ tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]),
273
+ tool_choice=tool_choice,
274
+ agent_id=agent_id,
275
275
  )
276
276
 
277
277
  req = self.build_request(
@@ -328,16 +328,16 @@ class Agents(BaseSDK):
328
328
 
329
329
  async def stream_async(
330
330
  self, *,
331
- model: Nullable[str],
332
- prompt: str,
333
- temperature: Optional[float] = 0.7,
334
- top_p: Optional[float] = 1,
331
+ messages: Union[List[models.AgentsCompletionStreamRequestMessages], List[models.AgentsCompletionStreamRequestMessagesTypedDict]],
332
+ agent_id: str,
335
333
  max_tokens: OptionalNullable[int] = UNSET,
336
334
  min_tokens: OptionalNullable[int] = UNSET,
337
335
  stream: Optional[bool] = True,
338
336
  stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None,
339
337
  random_seed: OptionalNullable[int] = UNSET,
340
- suffix: OptionalNullable[str] = UNSET,
338
+ response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None,
339
+ tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET,
340
+ tool_choice: Optional[models.AgentsCompletionStreamRequestToolChoice] = "auto",
341
341
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
342
342
  server_url: Optional[str] = None,
343
343
  timeout_ms: Optional[int] = None,
@@ -346,16 +346,16 @@ class Agents(BaseSDK):
346
346
 
347
347
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
348
348
 
349
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
350
- :param prompt: The text/code to complete.
351
- :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
352
- :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
349
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
350
+ :param agent_id: The ID of the agent to use for this completion.
353
351
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
354
352
  :param min_tokens: The minimum number of tokens to generate in the completion.
355
353
  :param stream:
356
354
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
357
355
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
358
- :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
356
+ :param response_format:
357
+ :param tools:
358
+ :param tool_choice:
359
359
  :param retries: Override the default retry configuration for this method
360
360
  :param server_url: Override the default server URL for this method
361
361
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -369,16 +369,16 @@ class Agents(BaseSDK):
369
369
  base_url = server_url
370
370
 
371
371
  request = models.AgentsCompletionStreamRequest(
372
- model=model,
373
- temperature=temperature,
374
- top_p=top_p,
375
372
  max_tokens=max_tokens,
376
373
  min_tokens=min_tokens,
377
374
  stream=stream,
378
375
  stop=stop,
379
376
  random_seed=random_seed,
380
- prompt=prompt,
381
- suffix=suffix,
377
+ messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionStreamRequestMessages]),
378
+ response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]),
379
+ tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]),
380
+ tool_choice=tool_choice,
381
+ agent_id=agent_id,
382
382
  )
383
383
 
384
384
  req = self.build_request(
mistralai/chat.py CHANGED
@@ -32,7 +32,7 @@ class Chat(BaseSDK):
32
32
  ) -> Optional[models.ChatCompletionResponse]:
33
33
  r"""Chat Completion
34
34
 
35
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
35
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
36
36
  :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
37
37
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
38
38
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -145,7 +145,7 @@ class Chat(BaseSDK):
145
145
  ) -> Optional[models.ChatCompletionResponse]:
146
146
  r"""Chat Completion
147
147
 
148
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
148
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
149
149
  :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
150
150
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
151
151
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -260,7 +260,7 @@ class Chat(BaseSDK):
260
260
 
261
261
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
262
262
 
263
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
263
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
264
264
  :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
265
265
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
266
266
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -376,7 +376,7 @@ class Chat(BaseSDK):
376
376
 
377
377
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
378
378
 
379
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
379
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
380
380
  :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
381
381
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
382
382
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
mistralai/jobs.py CHANGED
@@ -214,8 +214,8 @@ class Jobs(BaseSDK):
214
214
  training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None,
215
215
  validation_files: OptionalNullable[List[str]] = UNSET,
216
216
  suffix: OptionalNullable[str] = UNSET,
217
- integrations: OptionalNullable[Union[List[models.WandbIntegration], List[models.WandbIntegrationTypedDict]]] = UNSET,
218
- repositories: Optional[Union[List[models.GithubRepositoryIn], List[models.GithubRepositoryInTypedDict]]] = None,
217
+ integrations: OptionalNullable[Union[List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict]]] = UNSET,
218
+ repositories: Optional[Union[List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict]]] = None,
219
219
  auto_start: Optional[bool] = None,
220
220
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
221
221
  server_url: Optional[str] = None,
@@ -251,8 +251,8 @@ class Jobs(BaseSDK):
251
251
  validation_files=validation_files,
252
252
  hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn),
253
253
  suffix=suffix,
254
- integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.WandbIntegration]]),
255
- repositories=utils.get_pydantic_model(repositories, Optional[List[models.GithubRepositoryIn]]),
254
+ integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.JobInIntegrations]]),
255
+ repositories=utils.get_pydantic_model(repositories, Optional[List[models.JobInRepositories]]),
256
256
  auto_start=auto_start,
257
257
  )
258
258
 
@@ -310,8 +310,8 @@ class Jobs(BaseSDK):
310
310
  training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None,
311
311
  validation_files: OptionalNullable[List[str]] = UNSET,
312
312
  suffix: OptionalNullable[str] = UNSET,
313
- integrations: OptionalNullable[Union[List[models.WandbIntegration], List[models.WandbIntegrationTypedDict]]] = UNSET,
314
- repositories: Optional[Union[List[models.GithubRepositoryIn], List[models.GithubRepositoryInTypedDict]]] = None,
313
+ integrations: OptionalNullable[Union[List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict]]] = UNSET,
314
+ repositories: Optional[Union[List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict]]] = None,
315
315
  auto_start: Optional[bool] = None,
316
316
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
317
317
  server_url: Optional[str] = None,
@@ -347,8 +347,8 @@ class Jobs(BaseSDK):
347
347
  validation_files=validation_files,
348
348
  hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn),
349
349
  suffix=suffix,
350
- integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.WandbIntegration]]),
351
- repositories=utils.get_pydantic_model(repositories, Optional[List[models.GithubRepositoryIn]]),
350
+ integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.JobInIntegrations]]),
351
+ repositories=utils.get_pydantic_model(repositories, Optional[List[models.JobInRepositories]]),
352
352
  auto_start=auto_start,
353
353
  )
354
354
 
@@ -1,8 +1,8 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from .agentscompletionrequest import AgentsCompletionRequest, AgentsCompletionRequestMessages, AgentsCompletionRequestMessagesTypedDict, AgentsCompletionRequestStop, AgentsCompletionRequestStopTypedDict, AgentsCompletionRequestToolChoice, AgentsCompletionRequestTypedDict
4
- from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestTypedDict
5
- from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutTypedDict
4
+ from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestMessages, AgentsCompletionStreamRequestMessagesTypedDict, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestToolChoice, AgentsCompletionStreamRequestTypedDict
5
+ from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutObject, ArchiveFTModelOutTypedDict
6
6
  from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict
7
7
  from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict, FinishReason
8
8
  from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice
@@ -17,29 +17,29 @@ from .delete_model_v1_models_model_id_deleteop import DeleteModelV1ModelsModelID
17
17
  from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict
18
18
  from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict
19
19
  from .deltamessage import DeltaMessage, DeltaMessageTypedDict
20
- from .detailedjobout import DetailedJobOut, DetailedJobOutStatus, DetailedJobOutTypedDict
20
+ from .detailedjobout import DetailedJobOut, DetailedJobOutIntegrations, DetailedJobOutIntegrationsTypedDict, DetailedJobOutObject, DetailedJobOutRepositories, DetailedJobOutRepositoriesTypedDict, DetailedJobOutStatus, DetailedJobOutTypedDict
21
21
  from .embeddingrequest import EmbeddingRequest, EmbeddingRequestTypedDict, Inputs, InputsTypedDict
22
22
  from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict
23
23
  from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict
24
24
  from .eventout import EventOut, EventOutTypedDict
25
25
  from .files_api_routes_delete_fileop import FilesAPIRoutesDeleteFileRequest, FilesAPIRoutesDeleteFileRequestTypedDict
26
26
  from .files_api_routes_retrieve_fileop import FilesAPIRoutesRetrieveFileRequest, FilesAPIRoutesRetrieveFileRequestTypedDict
27
- from .files_api_routes_upload_fileop import File, FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict
28
- from .fileschema import FileSchema, FileSchemaTypedDict
27
+ from .files_api_routes_upload_fileop import File, FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, FilesAPIRoutesUploadFilePurpose
28
+ from .fileschema import FileSchema, FileSchemaPurpose, FileSchemaTypedDict
29
29
  from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop, FIMCompletionRequestStopTypedDict, FIMCompletionRequestTypedDict
30
30
  from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict
31
31
  from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict
32
32
  from .finetuneablemodel import FineTuneableModel
33
33
  from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict
34
- from .ftmodelout import FTModelOut, FTModelOutTypedDict
34
+ from .ftmodelout import FTModelOut, FTModelOutObject, FTModelOutTypedDict
35
35
  from .function import Function, FunctionTypedDict
36
36
  from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict
37
- from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict
38
- from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict
37
+ from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInType, GithubRepositoryInTypedDict
38
+ from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutType, GithubRepositoryOutTypedDict
39
39
  from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
40
- from .jobin import JobIn, JobInTypedDict
40
+ from .jobin import JobIn, JobInIntegrations, JobInIntegrationsTypedDict, JobInRepositories, JobInRepositoriesTypedDict, JobInTypedDict
41
41
  from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict
42
- from .jobout import JobOut, JobOutTypedDict, Status
42
+ from .jobout import Integrations, IntegrationsTypedDict, JobOut, JobOutTypedDict, Object, Repositories, RepositoriesTypedDict, Status
43
43
  from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict
44
44
  from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import JobsAPIRoutesFineTuningCancelFineTuningJobRequest, JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict
45
45
  from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import JobsAPIRoutesFineTuningCreateFineTuningJobResponse, JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict
@@ -48,8 +48,8 @@ from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import JobsAPIRoutesFin
48
48
  from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import JobsAPIRoutesFineTuningStartFineTuningJobRequest, JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict
49
49
  from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict
50
50
  from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict
51
- from .jobsout import JobsOut, JobsOutTypedDict
52
- from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict
51
+ from .jobsout import JobsOut, JobsOutObject, JobsOutTypedDict
52
+ from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutObject, LegacyJobMetadataOutTypedDict
53
53
  from .listfilesout import ListFilesOut, ListFilesOutTypedDict
54
54
  from .metricout import MetricOut, MetricOutTypedDict
55
55
  from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict
@@ -57,26 +57,26 @@ from .modelcard import ModelCard, ModelCardTypedDict
57
57
  from .modellist import ModelList, ModelListTypedDict
58
58
  from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats
59
59
  from .retrieve_model_v1_models_model_id_getop import RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict
60
- from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict
60
+ from .retrievefileout import RetrieveFileOut, RetrieveFileOutPurpose, RetrieveFileOutTypedDict
61
61
  from .sampletype import SampleType
62
62
  from .sdkerror import SDKError
63
63
  from .security import Security, SecurityTypedDict
64
64
  from .source import Source
65
65
  from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict
66
66
  from .textchunk import TextChunk, TextChunkTypedDict
67
- from .tool import Tool, ToolTypedDict
68
- from .toolcall import ToolCall, ToolCallTypedDict
67
+ from .tool import Tool, ToolToolTypes, ToolTypedDict
68
+ from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes
69
69
  from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict
70
70
  from .trainingfile import TrainingFile, TrainingFileTypedDict
71
71
  from .trainingparameters import TrainingParameters, TrainingParametersTypedDict
72
72
  from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict
73
- from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutTypedDict
73
+ from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutObject, UnarchiveFTModelOutTypedDict
74
74
  from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict
75
- from .uploadfileout import UploadFileOut, UploadFileOutTypedDict
75
+ from .uploadfileout import Purpose, UploadFileOut, UploadFileOutTypedDict
76
76
  from .usageinfo import UsageInfo, UsageInfoTypedDict
77
77
  from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict
78
78
  from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict
79
- from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict
80
- from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict
79
+ from .wandbintegration import WandbIntegration, WandbIntegrationType, WandbIntegrationTypedDict
80
+ from .wandbintegrationout import Type, WandbIntegrationOut, WandbIntegrationOutTypedDict
81
81
 
82
- __all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "JobIn", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "QueryParamStatus", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "UnarchiveFTModelOut", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationTypedDict"]
82
+ __all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestMessages", "AgentsCompletionStreamRequestMessagesTypedDict", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutObject", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutIntegrations", "DetailedJobOutIntegrationsTypedDict", "DetailedJobOutObject", "DetailedJobOutRepositories", "DetailedJobOutRepositoriesTypedDict", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutObject", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaPurpose", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FilesAPIRoutesUploadFilePurpose", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInType", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutType", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "Integrations", "IntegrationsTypedDict", "JobIn", "JobInIntegrations", "JobInIntegrationsTypedDict", "JobInRepositories", "JobInRepositoriesTypedDict", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutObject", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutObject", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "Object", "Purpose", "QueryParamStatus", "Repositories", "RepositoriesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutPurpose", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "Type", "UnarchiveFTModelOut", "UnarchiveFTModelOutObject", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationType", "WandbIntegrationTypedDict"]
@@ -1,10 +1,16 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
+ from .tool import Tool, ToolTypedDict
7
+ from .toolmessage import ToolMessage, ToolMessageTypedDict
8
+ from .usermessage import UserMessage, UserMessageTypedDict
4
9
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
- from pydantic import model_serializer
6
- from typing import List, Optional, TypedDict, Union
7
- from typing_extensions import NotRequired
10
+ from mistralai.utils import get_discriminator
11
+ from pydantic import Discriminator, Tag, model_serializer
12
+ from typing import List, Literal, Optional, TypedDict, Union
13
+ from typing_extensions import Annotated, NotRequired
8
14
 
9
15
 
10
16
  AgentsCompletionStreamRequestStopTypedDict = Union[str, List[str]]
@@ -15,18 +21,19 @@ AgentsCompletionStreamRequestStop = Union[str, List[str]]
15
21
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
16
22
 
17
23
 
24
+ AgentsCompletionStreamRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
25
+
26
+
27
+ AgentsCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
28
+
29
+
30
+ AgentsCompletionStreamRequestToolChoice = Literal["auto", "none", "any"]
31
+
18
32
  class AgentsCompletionStreamRequestTypedDict(TypedDict):
19
- model: Nullable[str]
20
- r"""ID of the model to use. Only compatible for now with:
21
- - `codestral-2405`
22
- - `codestral-latest`
23
- """
24
- prompt: str
25
- r"""The text/code to complete."""
26
- temperature: NotRequired[float]
27
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
28
- top_p: NotRequired[float]
29
- r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
33
+ messages: List[AgentsCompletionStreamRequestMessagesTypedDict]
34
+ r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
35
+ agent_id: str
36
+ r"""The ID of the agent to use for this completion."""
30
37
  max_tokens: NotRequired[Nullable[int]]
31
38
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
32
39
  min_tokens: NotRequired[Nullable[int]]
@@ -36,22 +43,16 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
36
43
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
37
44
  random_seed: NotRequired[Nullable[int]]
38
45
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
39
- suffix: NotRequired[Nullable[str]]
40
- r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
46
+ response_format: NotRequired[ResponseFormatTypedDict]
47
+ tools: NotRequired[Nullable[List[ToolTypedDict]]]
48
+ tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoice]
41
49
 
42
50
 
43
51
  class AgentsCompletionStreamRequest(BaseModel):
44
- model: Nullable[str]
45
- r"""ID of the model to use. Only compatible for now with:
46
- - `codestral-2405`
47
- - `codestral-latest`
48
- """
49
- prompt: str
50
- r"""The text/code to complete."""
51
- temperature: Optional[float] = 0.7
52
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
53
- top_p: Optional[float] = 1
54
- r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
52
+ messages: List[AgentsCompletionStreamRequestMessages]
53
+ r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
54
+ agent_id: str
55
+ r"""The ID of the agent to use for this completion."""
55
56
  max_tokens: OptionalNullable[int] = UNSET
56
57
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
57
58
  min_tokens: OptionalNullable[int] = UNSET
@@ -61,13 +62,14 @@ class AgentsCompletionStreamRequest(BaseModel):
61
62
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
62
63
  random_seed: OptionalNullable[int] = UNSET
63
64
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
64
- suffix: OptionalNullable[str] = UNSET
65
- r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
65
+ response_format: Optional[ResponseFormat] = None
66
+ tools: OptionalNullable[List[Tool]] = UNSET
67
+ tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = "auto"
66
68
 
67
69
  @model_serializer(mode="wrap")
68
70
  def serialize_model(self, handler):
69
- optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
70
- nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
71
+ optional_fields = ["max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
72
+ nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"]
71
73
  null_default_fields = []
72
74
 
73
75
  serialized = handler(self)
@@ -3,10 +3,12 @@
3
3
  from __future__ import annotations
4
4
  from mistralai.types import BaseModel
5
5
  import pydantic
6
- from typing import Final, Optional, TypedDict
6
+ from typing import Final, Literal, Optional, TypedDict
7
7
  from typing_extensions import Annotated, NotRequired
8
8
 
9
9
 
10
+ ArchiveFTModelOutObject = Literal["model"]
11
+
10
12
  class ArchiveFTModelOutTypedDict(TypedDict):
11
13
  id: str
12
14
  archived: NotRequired[bool]
@@ -14,6 +16,6 @@ class ArchiveFTModelOutTypedDict(TypedDict):
14
16
 
15
17
  class ArchiveFTModelOut(BaseModel):
16
18
  id: str
17
- OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "model" # type: ignore
19
+ OBJECT: Annotated[Final[Optional[ArchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore
18
20
  archived: Optional[bool] = True
19
21
 
@@ -3,20 +3,19 @@
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
5
  from mistralai.types import BaseModel
6
- from typing import Literal, Optional, TypedDict
7
- from typing_extensions import NotRequired
6
+ from typing import Literal, TypedDict
8
7
 
9
8
 
10
9
  FinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"]
11
10
 
12
11
  class ChatCompletionChoiceTypedDict(TypedDict):
13
12
  index: int
13
+ message: AssistantMessageTypedDict
14
14
  finish_reason: FinishReason
15
- message: NotRequired[AssistantMessageTypedDict]
16
15
 
17
16
 
18
17
  class ChatCompletionChoice(BaseModel):
19
18
  index: int
19
+ message: AssistantMessage
20
20
  finish_reason: FinishReason
21
- message: Optional[AssistantMessage] = None
22
21
 
@@ -32,7 +32,7 @@ ToolChoice = Literal["auto", "none", "any"]
32
32
 
33
33
  class ChatCompletionRequestTypedDict(TypedDict):
34
34
  model: Nullable[str]
35
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
35
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
36
36
  messages: List[MessagesTypedDict]
37
37
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
38
38
  temperature: NotRequired[float]
@@ -58,7 +58,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
58
58
 
59
59
  class ChatCompletionRequest(BaseModel):
60
60
  model: Nullable[str]
61
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
61
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
62
62
  messages: List[Messages]
63
63
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
64
64
  temperature: Optional[float] = 0.7
@@ -32,7 +32,7 @@ ChatCompletionStreamRequestToolChoice = Literal["auto", "none", "any"]
32
32
 
33
33
  class ChatCompletionStreamRequestTypedDict(TypedDict):
34
34
  model: Nullable[str]
35
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
35
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
36
36
  messages: List[ChatCompletionStreamRequestMessagesTypedDict]
37
37
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
38
38
  temperature: NotRequired[float]
@@ -57,7 +57,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
57
57
 
58
58
  class ChatCompletionStreamRequest(BaseModel):
59
59
  model: Nullable[str]
60
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
60
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
61
61
  messages: List[ChatCompletionStreamRequestMessages]
62
62
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
63
63
  temperature: Optional[float] = 0.7
@@ -10,19 +10,19 @@ from typing_extensions import NotRequired
10
10
 
11
11
  class DeltaMessageTypedDict(TypedDict):
12
12
  role: NotRequired[str]
13
- content: NotRequired[str]
13
+ content: NotRequired[Nullable[str]]
14
14
  tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]]
15
15
 
16
16
 
17
17
  class DeltaMessage(BaseModel):
18
18
  role: Optional[str] = None
19
- content: Optional[str] = None
19
+ content: OptionalNullable[str] = UNSET
20
20
  tool_calls: OptionalNullable[List[ToolCall]] = UNSET
21
21
 
22
22
  @model_serializer(mode="wrap")
23
23
  def serialize_model(self, handler):
24
24
  optional_fields = ["role", "content", "tool_calls"]
25
- nullable_fields = ["tool_calls"]
25
+ nullable_fields = ["content", "tool_calls"]
26
26
  null_default_fields = []
27
27
 
28
28
  serialized = handler(self)
@@ -17,6 +17,20 @@ from typing_extensions import Annotated, NotRequired
17
17
 
18
18
  DetailedJobOutStatus = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"]
19
19
 
20
+ DetailedJobOutObject = Literal["job"]
21
+
22
+ DetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict
23
+
24
+
25
+ DetailedJobOutIntegrations = WandbIntegrationOut
26
+
27
+
28
+ DetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict
29
+
30
+
31
+ DetailedJobOutRepositories = GithubRepositoryOut
32
+
33
+
20
34
  class DetailedJobOutTypedDict(TypedDict):
21
35
  id: str
22
36
  auto_start: bool
@@ -31,9 +45,9 @@ class DetailedJobOutTypedDict(TypedDict):
31
45
  validation_files: NotRequired[Nullable[List[str]]]
32
46
  fine_tuned_model: NotRequired[Nullable[str]]
33
47
  suffix: NotRequired[Nullable[str]]
34
- integrations: NotRequired[Nullable[List[WandbIntegrationOutTypedDict]]]
48
+ integrations: NotRequired[Nullable[List[DetailedJobOutIntegrationsTypedDict]]]
35
49
  trained_tokens: NotRequired[Nullable[int]]
36
- repositories: NotRequired[List[GithubRepositoryOutTypedDict]]
50
+ repositories: NotRequired[List[DetailedJobOutRepositoriesTypedDict]]
37
51
  metadata: NotRequired[Nullable[JobMetadataOutTypedDict]]
38
52
  events: NotRequired[List[EventOutTypedDict]]
39
53
  r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here."""
@@ -52,12 +66,12 @@ class DetailedJobOut(BaseModel):
52
66
  modified_at: int
53
67
  training_files: List[str]
54
68
  validation_files: OptionalNullable[List[str]] = UNSET
55
- OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "job" # type: ignore
69
+ OBJECT: Annotated[Final[Optional[DetailedJobOutObject]], pydantic.Field(alias="object")] = "job" # type: ignore
56
70
  fine_tuned_model: OptionalNullable[str] = UNSET
57
71
  suffix: OptionalNullable[str] = UNSET
58
- integrations: OptionalNullable[List[WandbIntegrationOut]] = UNSET
72
+ integrations: OptionalNullable[List[DetailedJobOutIntegrations]] = UNSET
59
73
  trained_tokens: OptionalNullable[int] = UNSET
60
- repositories: Optional[List[GithubRepositoryOut]] = None
74
+ repositories: Optional[List[DetailedJobOutRepositories]] = None
61
75
  metadata: OptionalNullable[JobMetadataOut] = UNSET
62
76
  events: Optional[List[EventOut]] = None
63
77
  r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here."""