mistralai 0.5.5a50__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. mistralai/agents.py +434 -0
  2. mistralai/basesdk.py +43 -6
  3. mistralai/chat.py +29 -34
  4. mistralai/client.py +1 -1
  5. mistralai/embeddings.py +4 -4
  6. mistralai/files.py +10 -10
  7. mistralai/fim.py +17 -18
  8. mistralai/fine_tuning.py +10 -849
  9. mistralai/jobs.py +844 -0
  10. mistralai/models/__init__.py +6 -4
  11. mistralai/models/agentscompletionrequest.py +96 -0
  12. mistralai/models/agentscompletionstreamrequest.py +92 -0
  13. mistralai/models/assistantmessage.py +4 -9
  14. mistralai/models/chatcompletionchoice.py +4 -15
  15. mistralai/models/chatcompletionrequest.py +11 -16
  16. mistralai/models/chatcompletionstreamrequest.py +11 -16
  17. mistralai/models/completionresponsestreamchoice.py +4 -9
  18. mistralai/models/delete_model_v1_models_model_id_deleteop.py +2 -0
  19. mistralai/models/deltamessage.py +4 -9
  20. mistralai/models/detailedjobout.py +4 -9
  21. mistralai/models/embeddingrequest.py +4 -9
  22. mistralai/models/eventout.py +4 -9
  23. mistralai/models/fileschema.py +4 -9
  24. mistralai/models/fimcompletionrequest.py +11 -16
  25. mistralai/models/fimcompletionstreamrequest.py +11 -16
  26. mistralai/models/ftmodelout.py +4 -9
  27. mistralai/models/functioncall.py +9 -3
  28. mistralai/models/githubrepositoryin.py +4 -9
  29. mistralai/models/githubrepositoryout.py +4 -9
  30. mistralai/models/httpvalidationerror.py +1 -1
  31. mistralai/models/jobin.py +4 -9
  32. mistralai/models/jobmetadataout.py +4 -9
  33. mistralai/models/jobout.py +4 -9
  34. mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +2 -0
  35. mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +1 -59
  36. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +4 -9
  37. mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +2 -0
  38. mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +2 -0
  39. mistralai/models/legacyjobmetadataout.py +4 -9
  40. mistralai/models/metricout.py +4 -9
  41. mistralai/models/modelcard.py +4 -9
  42. mistralai/models/retrieve_model_v1_models_model_id_getop.py +2 -0
  43. mistralai/models/retrievefileout.py +4 -9
  44. mistralai/models/security.py +4 -4
  45. mistralai/models/toolmessage.py +4 -9
  46. mistralai/models/trainingparameters.py +4 -9
  47. mistralai/models/trainingparametersin.py +4 -9
  48. mistralai/models/updateftmodelin.py +4 -9
  49. mistralai/models/uploadfileout.py +4 -9
  50. mistralai/models/wandbintegration.py +4 -9
  51. mistralai/models/wandbintegrationout.py +4 -9
  52. mistralai/models_.py +24 -24
  53. mistralai/sdk.py +14 -6
  54. mistralai/sdkconfiguration.py +5 -4
  55. mistralai/types/basemodel.py +10 -6
  56. mistralai/utils/__init__.py +4 -0
  57. mistralai/utils/eventstreaming.py +8 -9
  58. mistralai/utils/logger.py +16 -0
  59. mistralai/utils/retries.py +2 -2
  60. mistralai/utils/security.py +5 -2
  61. {mistralai-0.5.5a50.dist-info → mistralai-1.0.0.dist-info}/METADATA +136 -67
  62. {mistralai-0.5.5a50.dist-info → mistralai-1.0.0.dist-info}/RECORD +105 -98
  63. mistralai_azure/basesdk.py +42 -4
  64. mistralai_azure/chat.py +15 -20
  65. mistralai_azure/models/__init__.py +3 -3
  66. mistralai_azure/models/assistantmessage.py +4 -9
  67. mistralai_azure/models/chatcompletionchoice.py +4 -15
  68. mistralai_azure/models/chatcompletionrequest.py +7 -12
  69. mistralai_azure/models/chatcompletionstreamrequest.py +7 -12
  70. mistralai_azure/models/completionresponsestreamchoice.py +4 -9
  71. mistralai_azure/models/deltamessage.py +4 -9
  72. mistralai_azure/models/functioncall.py +9 -3
  73. mistralai_azure/models/httpvalidationerror.py +1 -1
  74. mistralai_azure/models/toolmessage.py +4 -9
  75. mistralai_azure/sdk.py +7 -2
  76. mistralai_azure/sdkconfiguration.py +5 -4
  77. mistralai_azure/types/basemodel.py +10 -6
  78. mistralai_azure/utils/__init__.py +4 -0
  79. mistralai_azure/utils/eventstreaming.py +8 -9
  80. mistralai_azure/utils/logger.py +16 -0
  81. mistralai_azure/utils/retries.py +2 -2
  82. mistralai_gcp/basesdk.py +42 -4
  83. mistralai_gcp/chat.py +12 -17
  84. mistralai_gcp/fim.py +12 -13
  85. mistralai_gcp/models/__init__.py +3 -3
  86. mistralai_gcp/models/assistantmessage.py +4 -9
  87. mistralai_gcp/models/chatcompletionchoice.py +4 -15
  88. mistralai_gcp/models/chatcompletionrequest.py +9 -14
  89. mistralai_gcp/models/chatcompletionstreamrequest.py +9 -14
  90. mistralai_gcp/models/completionresponsestreamchoice.py +4 -9
  91. mistralai_gcp/models/deltamessage.py +4 -9
  92. mistralai_gcp/models/fimcompletionrequest.py +11 -16
  93. mistralai_gcp/models/fimcompletionstreamrequest.py +11 -16
  94. mistralai_gcp/models/functioncall.py +9 -3
  95. mistralai_gcp/models/httpvalidationerror.py +1 -1
  96. mistralai_gcp/models/toolmessage.py +4 -9
  97. mistralai_gcp/sdk.py +9 -0
  98. mistralai_gcp/sdkconfiguration.py +5 -4
  99. mistralai_gcp/types/basemodel.py +10 -6
  100. mistralai_gcp/utils/__init__.py +4 -0
  101. mistralai_gcp/utils/eventstreaming.py +8 -9
  102. mistralai_gcp/utils/logger.py +16 -0
  103. mistralai_gcp/utils/retries.py +2 -2
  104. {mistralai-0.5.5a50.dist-info → mistralai-1.0.0.dist-info}/LICENSE +0 -0
  105. {mistralai-0.5.5a50.dist-info → mistralai-1.0.0.dist-info}/WHEEL +0 -0
mistralai_gcp/chat.py CHANGED
@@ -1,10 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from .basesdk import BaseSDK
4
- from mistralai_gcp import models
4
+ from mistralai_gcp import models, utils
5
5
  from mistralai_gcp._hooks import HookContext
6
- from mistralai_gcp.types import OptionalNullable, UNSET
7
- import mistralai_gcp.utils as utils
6
+ from mistralai_gcp.types import Nullable, OptionalNullable, UNSET
8
7
  from mistralai_gcp.utils import eventstreaming
9
8
  from typing import Any, AsyncGenerator, Generator, List, Optional, Union
10
9
 
@@ -14,8 +13,8 @@ class Chat(BaseSDK):
14
13
 
15
14
  def stream(
16
15
  self, *,
16
+ model: Nullable[str],
17
17
  messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
18
- model: OptionalNullable[str] = UNSET,
19
18
  temperature: Optional[float] = 0.7,
20
19
  top_p: Optional[float] = 1,
21
20
  max_tokens: OptionalNullable[int] = UNSET,
@@ -34,8 +33,8 @@ class Chat(BaseSDK):
34
33
 
35
34
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
36
35
 
37
- :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
38
36
  :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
37
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
39
38
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
40
39
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
41
40
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -127,8 +126,8 @@ class Chat(BaseSDK):
127
126
 
128
127
  async def stream_async(
129
128
  self, *,
129
+ model: Nullable[str],
130
130
  messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
131
- model: OptionalNullable[str] = UNSET,
132
131
  temperature: Optional[float] = 0.7,
133
132
  top_p: Optional[float] = 1,
134
133
  max_tokens: OptionalNullable[int] = UNSET,
@@ -147,8 +146,8 @@ class Chat(BaseSDK):
147
146
 
148
147
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
149
148
 
150
- :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
151
149
  :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
150
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
152
151
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
153
152
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
154
153
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -238,10 +237,10 @@ class Chat(BaseSDK):
238
237
 
239
238
 
240
239
 
241
- def create(
240
+ def complete(
242
241
  self, *,
242
+ model: Nullable[str],
243
243
  messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]],
244
- model: OptionalNullable[str] = UNSET,
245
244
  temperature: Optional[float] = 0.7,
246
245
  top_p: Optional[float] = 1,
247
246
  max_tokens: OptionalNullable[int] = UNSET,
@@ -258,10 +257,8 @@ class Chat(BaseSDK):
258
257
  ) -> Optional[models.ChatCompletionResponse]:
259
258
  r"""Chat Completion
260
259
 
261
- Chat Completion
262
-
263
- :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
264
260
  :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
261
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
265
262
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
266
263
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
267
264
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -350,10 +347,10 @@ class Chat(BaseSDK):
350
347
 
351
348
 
352
349
 
353
- async def create_async(
350
+ async def complete_async(
354
351
  self, *,
352
+ model: Nullable[str],
355
353
  messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]],
356
- model: OptionalNullable[str] = UNSET,
357
354
  temperature: Optional[float] = 0.7,
358
355
  top_p: Optional[float] = 1,
359
356
  max_tokens: OptionalNullable[int] = UNSET,
@@ -370,10 +367,8 @@ class Chat(BaseSDK):
370
367
  ) -> Optional[models.ChatCompletionResponse]:
371
368
  r"""Chat Completion
372
369
 
373
- Chat Completion
374
-
375
- :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
376
370
  :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
371
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
377
372
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
378
373
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
379
374
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
mistralai_gcp/fim.py CHANGED
@@ -1,10 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from .basesdk import BaseSDK
4
- from mistralai_gcp import models
4
+ from mistralai_gcp import models, utils
5
5
  from mistralai_gcp._hooks import HookContext
6
- from mistralai_gcp.types import OptionalNullable, UNSET
7
- import mistralai_gcp.utils as utils
6
+ from mistralai_gcp.types import Nullable, OptionalNullable, UNSET
8
7
  from mistralai_gcp.utils import eventstreaming
9
8
  from typing import Any, AsyncGenerator, Generator, Optional, Union
10
9
 
@@ -14,8 +13,8 @@ class Fim(BaseSDK):
14
13
 
15
14
  def stream(
16
15
  self, *,
16
+ model: Nullable[str],
17
17
  prompt: str,
18
- model: OptionalNullable[str] = UNSET,
19
18
  temperature: Optional[float] = 0.7,
20
19
  top_p: Optional[float] = 1,
21
20
  max_tokens: OptionalNullable[int] = UNSET,
@@ -32,8 +31,8 @@ class Fim(BaseSDK):
32
31
 
33
32
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
34
33
 
35
- :param prompt: The text/code to complete.
36
34
  :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
35
+ :param prompt: The text/code to complete.
37
36
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
38
37
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
39
38
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -121,8 +120,8 @@ class Fim(BaseSDK):
121
120
 
122
121
  async def stream_async(
123
122
  self, *,
123
+ model: Nullable[str],
124
124
  prompt: str,
125
- model: OptionalNullable[str] = UNSET,
126
125
  temperature: Optional[float] = 0.7,
127
126
  top_p: Optional[float] = 1,
128
127
  max_tokens: OptionalNullable[int] = UNSET,
@@ -139,8 +138,8 @@ class Fim(BaseSDK):
139
138
 
140
139
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
141
140
 
142
- :param prompt: The text/code to complete.
143
141
  :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
142
+ :param prompt: The text/code to complete.
144
143
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
145
144
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
146
145
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -226,10 +225,10 @@ class Fim(BaseSDK):
226
225
 
227
226
 
228
227
 
229
- def create(
228
+ def complete(
230
229
  self, *,
230
+ model: Nullable[str],
231
231
  prompt: str,
232
- model: OptionalNullable[str] = UNSET,
233
232
  temperature: Optional[float] = 0.7,
234
233
  top_p: Optional[float] = 1,
235
234
  max_tokens: OptionalNullable[int] = UNSET,
@@ -246,8 +245,8 @@ class Fim(BaseSDK):
246
245
 
247
246
  FIM completion.
248
247
 
249
- :param prompt: The text/code to complete.
250
248
  :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
249
+ :param prompt: The text/code to complete.
251
250
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
252
251
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
253
252
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -332,10 +331,10 @@ class Fim(BaseSDK):
332
331
 
333
332
 
334
333
 
335
- async def create_async(
334
+ async def complete_async(
336
335
  self, *,
336
+ model: Nullable[str],
337
337
  prompt: str,
338
- model: OptionalNullable[str] = UNSET,
339
338
  temperature: Optional[float] = 0.7,
340
339
  top_p: Optional[float] = 1,
341
340
  max_tokens: OptionalNullable[int] = UNSET,
@@ -352,8 +351,8 @@ class Fim(BaseSDK):
352
351
 
353
352
  FIM completion.
354
353
 
355
- :param prompt: The text/code to complete.
356
354
  :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
355
+ :param prompt: The text/code to complete.
357
356
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
358
357
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
359
358
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -1,7 +1,7 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict
4
- from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict, Message, MessageTypedDict
4
+ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict
5
5
  from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestMessages, ChatCompletionRequestMessagesTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestTypedDict
6
6
  from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict
7
7
  from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice
@@ -14,7 +14,7 @@ from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop
14
14
  from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict
15
15
  from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict
16
16
  from .function import Function, FunctionTypedDict
17
- from .functioncall import FunctionCall, FunctionCallTypedDict
17
+ from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict
18
18
  from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
19
19
  from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats
20
20
  from .sdkerror import SDKError
@@ -28,4 +28,4 @@ from .usageinfo import UsageInfo, UsageInfoTypedDict
28
28
  from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict
29
29
  from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict
30
30
 
31
- __all__ = ["AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Message", "MessageTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"]
31
+ __all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"]
@@ -39,18 +39,13 @@ class AssistantMessage(BaseModel):
39
39
  k = f.alias or n
40
40
  val = serialized.get(k)
41
41
 
42
+ optional_nullable = k in optional_fields and k in nullable_fields
43
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
44
+
42
45
  if val is not None and val != UNSET_SENTINEL:
43
46
  m[k] = val
44
47
  elif val != UNSET_SENTINEL and (
45
- not k in optional_fields
46
- or (
47
- k in optional_fields
48
- and k in nullable_fields
49
- and (
50
- self.__pydantic_fields_set__.intersection({n})
51
- or k in null_default_fields
52
- ) # pylint: disable=no-member
53
- )
48
+ not k in optional_fields or (optional_nullable and is_set)
54
49
  ):
55
50
  m[k] = val
56
51
 
@@ -2,14 +2,9 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
- from .systemmessage import SystemMessage, SystemMessageTypedDict
6
- from .toolmessage import ToolMessage, ToolMessageTypedDict
7
- from .usermessage import UserMessage, UserMessageTypedDict
8
5
  from mistralai_gcp.types import BaseModel
9
- from mistralai_gcp.utils import get_discriminator
10
- from pydantic import Discriminator, Tag
11
- from typing import Literal, Optional, TypedDict, Union
12
- from typing_extensions import Annotated, NotRequired
6
+ from typing import Literal, Optional, TypedDict
7
+ from typing_extensions import NotRequired
13
8
 
14
9
 
15
10
  ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"]
@@ -17,17 +12,11 @@ ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "er
17
12
  class ChatCompletionChoiceTypedDict(TypedDict):
18
13
  index: int
19
14
  finish_reason: ChatCompletionChoiceFinishReason
20
- message: NotRequired[MessageTypedDict]
15
+ message: NotRequired[AssistantMessageTypedDict]
21
16
 
22
17
 
23
18
  class ChatCompletionChoice(BaseModel):
24
19
  index: int
25
20
  finish_reason: ChatCompletionChoiceFinishReason
26
- message: Optional[Message] = None
21
+ message: Optional[AssistantMessage] = None
27
22
 
28
-
29
- MessageTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
30
-
31
-
32
- Message = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
33
-
@@ -17,10 +17,10 @@ from typing_extensions import Annotated, NotRequired
17
17
  ChatCompletionRequestToolChoice = Literal["auto", "none", "any"]
18
18
 
19
19
  class ChatCompletionRequestTypedDict(TypedDict):
20
+ model: Nullable[str]
21
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
20
22
  messages: List[ChatCompletionRequestMessagesTypedDict]
21
23
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
22
- model: NotRequired[Nullable[str]]
23
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
24
24
  temperature: NotRequired[float]
25
25
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
26
26
  top_p: NotRequired[float]
@@ -41,10 +41,10 @@ class ChatCompletionRequestTypedDict(TypedDict):
41
41
 
42
42
 
43
43
  class ChatCompletionRequest(BaseModel):
44
+ model: Nullable[str]
45
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
44
46
  messages: List[ChatCompletionRequestMessages]
45
47
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
46
- model: OptionalNullable[str] = UNSET
47
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
48
48
  temperature: Optional[float] = 0.7
49
49
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
50
50
  top_p: Optional[float] = 1
@@ -65,7 +65,7 @@ class ChatCompletionRequest(BaseModel):
65
65
 
66
66
  @model_serializer(mode="wrap")
67
67
  def serialize_model(self, handler):
68
- optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
68
+ optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
69
69
  nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
70
70
  null_default_fields = []
71
71
 
@@ -77,18 +77,13 @@ class ChatCompletionRequest(BaseModel):
77
77
  k = f.alias or n
78
78
  val = serialized.get(k)
79
79
 
80
+ optional_nullable = k in optional_fields and k in nullable_fields
81
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
82
+
80
83
  if val is not None and val != UNSET_SENTINEL:
81
84
  m[k] = val
82
85
  elif val != UNSET_SENTINEL and (
83
- not k in optional_fields
84
- or (
85
- k in optional_fields
86
- and k in nullable_fields
87
- and (
88
- self.__pydantic_fields_set__.intersection({n})
89
- or k in null_default_fields
90
- ) # pylint: disable=no-member
91
- )
86
+ not k in optional_fields or (optional_nullable and is_set)
92
87
  ):
93
88
  m[k] = val
94
89
 
@@ -17,10 +17,10 @@ from typing_extensions import Annotated, NotRequired
17
17
  ToolChoice = Literal["auto", "none", "any"]
18
18
 
19
19
  class ChatCompletionStreamRequestTypedDict(TypedDict):
20
+ model: Nullable[str]
21
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
20
22
  messages: List[MessagesTypedDict]
21
23
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
22
- model: NotRequired[Nullable[str]]
23
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
24
24
  temperature: NotRequired[float]
25
25
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
26
26
  top_p: NotRequired[float]
@@ -40,10 +40,10 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
40
40
 
41
41
 
42
42
  class ChatCompletionStreamRequest(BaseModel):
43
+ model: Nullable[str]
44
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
43
45
  messages: List[Messages]
44
46
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
45
- model: OptionalNullable[str] = UNSET
46
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
47
47
  temperature: Optional[float] = 0.7
48
48
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
49
49
  top_p: Optional[float] = 1
@@ -63,7 +63,7 @@ class ChatCompletionStreamRequest(BaseModel):
63
63
 
64
64
  @model_serializer(mode="wrap")
65
65
  def serialize_model(self, handler):
66
- optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
66
+ optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
67
67
  nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
68
68
  null_default_fields = []
69
69
 
@@ -75,18 +75,13 @@ class ChatCompletionStreamRequest(BaseModel):
75
75
  k = f.alias or n
76
76
  val = serialized.get(k)
77
77
 
78
+ optional_nullable = k in optional_fields and k in nullable_fields
79
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
80
+
78
81
  if val is not None and val != UNSET_SENTINEL:
79
82
  m[k] = val
80
83
  elif val != UNSET_SENTINEL and (
81
- not k in optional_fields
82
- or (
83
- k in optional_fields
84
- and k in nullable_fields
85
- and (
86
- self.__pydantic_fields_set__.intersection({n})
87
- or k in null_default_fields
88
- ) # pylint: disable=no-member
89
- )
84
+ not k in optional_fields or (optional_nullable and is_set)
90
85
  ):
91
86
  m[k] = val
92
87
 
@@ -34,18 +34,13 @@ class CompletionResponseStreamChoice(BaseModel):
34
34
  k = f.alias or n
35
35
  val = serialized.get(k)
36
36
 
37
+ optional_nullable = k in optional_fields and k in nullable_fields
38
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
39
+
37
40
  if val is not None and val != UNSET_SENTINEL:
38
41
  m[k] = val
39
42
  elif val != UNSET_SENTINEL and (
40
- not k in optional_fields
41
- or (
42
- k in optional_fields
43
- and k in nullable_fields
44
- and (
45
- self.__pydantic_fields_set__.intersection({n})
46
- or k in null_default_fields
47
- ) # pylint: disable=no-member
48
- )
43
+ not k in optional_fields or (optional_nullable and is_set)
49
44
  ):
50
45
  m[k] = val
51
46
 
@@ -33,18 +33,13 @@ class DeltaMessage(BaseModel):
33
33
  k = f.alias or n
34
34
  val = serialized.get(k)
35
35
 
36
+ optional_nullable = k in optional_fields and k in nullable_fields
37
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
38
+
36
39
  if val is not None and val != UNSET_SENTINEL:
37
40
  m[k] = val
38
41
  elif val != UNSET_SENTINEL and (
39
- not k in optional_fields
40
- or (
41
- k in optional_fields
42
- and k in nullable_fields
43
- and (
44
- self.__pydantic_fields_set__.intersection({n})
45
- or k in null_default_fields
46
- ) # pylint: disable=no-member
47
- )
42
+ not k in optional_fields or (optional_nullable and is_set)
48
43
  ):
49
44
  m[k] = val
50
45
 
@@ -8,13 +8,13 @@ from typing_extensions import NotRequired
8
8
 
9
9
 
10
10
  class FIMCompletionRequestTypedDict(TypedDict):
11
- prompt: str
12
- r"""The text/code to complete."""
13
- model: NotRequired[Nullable[str]]
11
+ model: Nullable[str]
14
12
  r"""ID of the model to use. Only compatible for now with:
15
13
  - `codestral-2405`
16
14
  - `codestral-latest`
17
15
  """
16
+ prompt: str
17
+ r"""The text/code to complete."""
18
18
  temperature: NotRequired[float]
19
19
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
20
20
  top_p: NotRequired[float]
@@ -34,13 +34,13 @@ class FIMCompletionRequestTypedDict(TypedDict):
34
34
 
35
35
 
36
36
  class FIMCompletionRequest(BaseModel):
37
- prompt: str
38
- r"""The text/code to complete."""
39
- model: OptionalNullable[str] = UNSET
37
+ model: Nullable[str]
40
38
  r"""ID of the model to use. Only compatible for now with:
41
39
  - `codestral-2405`
42
40
  - `codestral-latest`
43
41
  """
42
+ prompt: str
43
+ r"""The text/code to complete."""
44
44
  temperature: Optional[float] = 0.7
45
45
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
46
46
  top_p: Optional[float] = 1
@@ -60,7 +60,7 @@ class FIMCompletionRequest(BaseModel):
60
60
 
61
61
  @model_serializer(mode="wrap")
62
62
  def serialize_model(self, handler):
63
- optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
63
+ optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
64
64
  nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
65
65
  null_default_fields = []
66
66
 
@@ -72,18 +72,13 @@ class FIMCompletionRequest(BaseModel):
72
72
  k = f.alias or n
73
73
  val = serialized.get(k)
74
74
 
75
+ optional_nullable = k in optional_fields and k in nullable_fields
76
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
77
+
75
78
  if val is not None and val != UNSET_SENTINEL:
76
79
  m[k] = val
77
80
  elif val != UNSET_SENTINEL and (
78
- not k in optional_fields
79
- or (
80
- k in optional_fields
81
- and k in nullable_fields
82
- and (
83
- self.__pydantic_fields_set__.intersection({n})
84
- or k in null_default_fields
85
- ) # pylint: disable=no-member
86
- )
81
+ not k in optional_fields or (optional_nullable and is_set)
87
82
  ):
88
83
  m[k] = val
89
84
 
@@ -8,13 +8,13 @@ from typing_extensions import NotRequired
8
8
 
9
9
 
10
10
  class FIMCompletionStreamRequestTypedDict(TypedDict):
11
- prompt: str
12
- r"""The text/code to complete."""
13
- model: NotRequired[Nullable[str]]
11
+ model: Nullable[str]
14
12
  r"""ID of the model to use. Only compatible for now with:
15
13
  - `codestral-2405`
16
14
  - `codestral-latest`
17
15
  """
16
+ prompt: str
17
+ r"""The text/code to complete."""
18
18
  temperature: NotRequired[float]
19
19
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
20
20
  top_p: NotRequired[float]
@@ -33,13 +33,13 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
33
33
 
34
34
 
35
35
  class FIMCompletionStreamRequest(BaseModel):
36
- prompt: str
37
- r"""The text/code to complete."""
38
- model: OptionalNullable[str] = UNSET
36
+ model: Nullable[str]
39
37
  r"""ID of the model to use. Only compatible for now with:
40
38
  - `codestral-2405`
41
39
  - `codestral-latest`
42
40
  """
41
+ prompt: str
42
+ r"""The text/code to complete."""
43
43
  temperature: Optional[float] = 0.7
44
44
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
45
45
  top_p: Optional[float] = 1
@@ -58,7 +58,7 @@ class FIMCompletionStreamRequest(BaseModel):
58
58
 
59
59
  @model_serializer(mode="wrap")
60
60
  def serialize_model(self, handler):
61
- optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
61
+ optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
62
62
  nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
63
63
  null_default_fields = []
64
64
 
@@ -70,18 +70,13 @@ class FIMCompletionStreamRequest(BaseModel):
70
70
  k = f.alias or n
71
71
  val = serialized.get(k)
72
72
 
73
+ optional_nullable = k in optional_fields and k in nullable_fields
74
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
75
+
73
76
  if val is not None and val != UNSET_SENTINEL:
74
77
  m[k] = val
75
78
  elif val != UNSET_SENTINEL and (
76
- not k in optional_fields
77
- or (
78
- k in optional_fields
79
- and k in nullable_fields
80
- and (
81
- self.__pydantic_fields_set__.intersection({n})
82
- or k in null_default_fields
83
- ) # pylint: disable=no-member
84
- )
79
+ not k in optional_fields or (optional_nullable and is_set)
85
80
  ):
86
81
  m[k] = val
87
82
 
@@ -2,15 +2,21 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from mistralai_gcp.types import BaseModel
5
- from typing import TypedDict
5
+ from typing import Any, Dict, TypedDict, Union
6
6
 
7
7
 
8
8
  class FunctionCallTypedDict(TypedDict):
9
9
  name: str
10
- arguments: str
10
+ arguments: ArgumentsTypedDict
11
11
 
12
12
 
13
13
  class FunctionCall(BaseModel):
14
14
  name: str
15
- arguments: str
15
+ arguments: Arguments
16
16
 
17
+
18
+ ArgumentsTypedDict = Union[Dict[str, Any], str]
19
+
20
+
21
+ Arguments = Union[Dict[str, Any], str]
22
+
@@ -2,8 +2,8 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .validationerror import ValidationError
5
+ from mistralai_gcp import utils
5
6
  from mistralai_gcp.types import BaseModel
6
- import mistralai_gcp.utils as utils
7
7
  from typing import List, Optional
8
8
 
9
9
  class HTTPValidationErrorData(BaseModel):