mistralai 1.0.3__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (230) hide show
  1. mistralai/__init__.py +4 -0
  2. mistralai/_hooks/sdkhooks.py +23 -4
  3. mistralai/_hooks/types.py +27 -9
  4. mistralai/_version.py +12 -0
  5. mistralai/agents.py +334 -164
  6. mistralai/basesdk.py +90 -5
  7. mistralai/batch.py +17 -0
  8. mistralai/chat.py +316 -166
  9. mistralai/classifiers.py +396 -0
  10. mistralai/embeddings.py +79 -55
  11. mistralai/files.py +487 -194
  12. mistralai/fim.py +206 -132
  13. mistralai/fine_tuning.py +3 -2
  14. mistralai/jobs.py +392 -263
  15. mistralai/mistral_jobs.py +733 -0
  16. mistralai/models/__init__.py +593 -50
  17. mistralai/models/agentscompletionrequest.py +70 -17
  18. mistralai/models/agentscompletionstreamrequest.py +72 -17
  19. mistralai/models/apiendpoint.py +9 -0
  20. mistralai/models/archiveftmodelout.py +15 -5
  21. mistralai/models/assistantmessage.py +22 -10
  22. mistralai/models/{modelcard.py → basemodelcard.py} +53 -14
  23. mistralai/models/batcherror.py +17 -0
  24. mistralai/models/batchjobin.py +58 -0
  25. mistralai/models/batchjobout.py +117 -0
  26. mistralai/models/batchjobsout.py +30 -0
  27. mistralai/models/batchjobstatus.py +15 -0
  28. mistralai/models/chatclassificationrequest.py +104 -0
  29. mistralai/models/chatcompletionchoice.py +13 -6
  30. mistralai/models/chatcompletionrequest.py +86 -21
  31. mistralai/models/chatcompletionresponse.py +8 -4
  32. mistralai/models/chatcompletionstreamrequest.py +88 -21
  33. mistralai/models/checkpointout.py +4 -3
  34. mistralai/models/classificationobject.py +21 -0
  35. mistralai/models/classificationrequest.py +59 -0
  36. mistralai/models/classificationresponse.py +21 -0
  37. mistralai/models/completionchunk.py +12 -5
  38. mistralai/models/completionevent.py +2 -3
  39. mistralai/models/completionresponsestreamchoice.py +22 -8
  40. mistralai/models/contentchunk.py +13 -10
  41. mistralai/models/delete_model_v1_models_model_id_deleteop.py +5 -5
  42. mistralai/models/deletefileout.py +4 -3
  43. mistralai/models/deletemodelout.py +5 -4
  44. mistralai/models/deltamessage.py +23 -11
  45. mistralai/models/detailedjobout.py +70 -12
  46. mistralai/models/embeddingrequest.py +14 -9
  47. mistralai/models/embeddingresponse.py +7 -3
  48. mistralai/models/embeddingresponsedata.py +5 -4
  49. mistralai/models/eventout.py +11 -6
  50. mistralai/models/filepurpose.py +8 -0
  51. mistralai/models/files_api_routes_delete_fileop.py +5 -5
  52. mistralai/models/files_api_routes_download_fileop.py +16 -0
  53. mistralai/models/files_api_routes_list_filesop.py +96 -0
  54. mistralai/models/files_api_routes_retrieve_fileop.py +5 -5
  55. mistralai/models/files_api_routes_upload_fileop.py +33 -14
  56. mistralai/models/fileschema.py +22 -15
  57. mistralai/models/fimcompletionrequest.py +44 -16
  58. mistralai/models/fimcompletionresponse.py +8 -4
  59. mistralai/models/fimcompletionstreamrequest.py +44 -16
  60. mistralai/models/finetuneablemodel.py +7 -1
  61. mistralai/models/ftmodelcapabilitiesout.py +6 -4
  62. mistralai/models/ftmodelcard.py +121 -0
  63. mistralai/models/ftmodelout.py +39 -9
  64. mistralai/models/function.py +5 -4
  65. mistralai/models/functioncall.py +4 -3
  66. mistralai/models/functionname.py +17 -0
  67. mistralai/models/githubrepositoryin.py +24 -7
  68. mistralai/models/githubrepositoryout.py +24 -7
  69. mistralai/models/httpvalidationerror.py +1 -3
  70. mistralai/models/imageurl.py +47 -0
  71. mistralai/models/imageurlchunk.py +38 -0
  72. mistralai/models/jobin.py +24 -7
  73. mistralai/models/jobmetadataout.py +32 -8
  74. mistralai/models/jobout.py +65 -12
  75. mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py +16 -0
  76. mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +16 -0
  77. mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +95 -0
  78. mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +5 -5
  79. mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +5 -5
  80. mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +3 -2
  81. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +5 -5
  82. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +85 -18
  83. mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +5 -5
  84. mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +5 -5
  85. mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +10 -6
  86. mistralai/models/jobsout.py +13 -5
  87. mistralai/models/legacyjobmetadataout.py +55 -9
  88. mistralai/models/listfilesout.py +7 -3
  89. mistralai/models/metricout.py +12 -8
  90. mistralai/models/modelcapabilities.py +9 -4
  91. mistralai/models/modellist.py +21 -7
  92. mistralai/models/responseformat.py +7 -8
  93. mistralai/models/responseformats.py +8 -0
  94. mistralai/models/retrieve_model_v1_models_model_id_getop.py +25 -6
  95. mistralai/models/retrievefileout.py +25 -15
  96. mistralai/models/sampletype.py +6 -2
  97. mistralai/models/security.py +14 -5
  98. mistralai/models/source.py +3 -2
  99. mistralai/models/systemmessage.py +10 -9
  100. mistralai/models/textchunk.py +14 -5
  101. mistralai/models/tool.py +10 -9
  102. mistralai/models/toolcall.py +10 -8
  103. mistralai/models/toolchoice.py +29 -0
  104. mistralai/models/toolchoiceenum.py +7 -0
  105. mistralai/models/toolmessage.py +13 -6
  106. mistralai/models/tooltypes.py +8 -0
  107. mistralai/models/trainingfile.py +4 -4
  108. mistralai/models/trainingparameters.py +34 -8
  109. mistralai/models/trainingparametersin.py +36 -10
  110. mistralai/models/unarchiveftmodelout.py +15 -5
  111. mistralai/models/updateftmodelin.py +9 -6
  112. mistralai/models/uploadfileout.py +22 -15
  113. mistralai/models/usageinfo.py +4 -3
  114. mistralai/models/usermessage.py +42 -10
  115. mistralai/models/validationerror.py +5 -3
  116. mistralai/models/wandbintegration.py +23 -7
  117. mistralai/models/wandbintegrationout.py +23 -8
  118. mistralai/models_.py +416 -294
  119. mistralai/sdk.py +31 -19
  120. mistralai/sdkconfiguration.py +9 -11
  121. mistralai/utils/__init__.py +14 -1
  122. mistralai/utils/annotations.py +13 -2
  123. mistralai/utils/logger.py +4 -1
  124. mistralai/utils/retries.py +2 -1
  125. mistralai/utils/security.py +13 -6
  126. mistralai/utils/serializers.py +25 -0
  127. {mistralai-1.0.3.dist-info → mistralai-1.2.0.dist-info}/METADATA +171 -66
  128. mistralai-1.2.0.dist-info/RECORD +276 -0
  129. {mistralai-1.0.3.dist-info → mistralai-1.2.0.dist-info}/WHEEL +1 -1
  130. mistralai_azure/__init__.py +4 -0
  131. mistralai_azure/_hooks/sdkhooks.py +23 -4
  132. mistralai_azure/_hooks/types.py +27 -9
  133. mistralai_azure/_version.py +12 -0
  134. mistralai_azure/basesdk.py +91 -6
  135. mistralai_azure/chat.py +308 -166
  136. mistralai_azure/models/__init__.py +164 -16
  137. mistralai_azure/models/assistantmessage.py +29 -11
  138. mistralai_azure/models/chatcompletionchoice.py +15 -6
  139. mistralai_azure/models/chatcompletionrequest.py +94 -22
  140. mistralai_azure/models/chatcompletionresponse.py +8 -4
  141. mistralai_azure/models/chatcompletionstreamrequest.py +96 -22
  142. mistralai_azure/models/completionchunk.py +12 -5
  143. mistralai_azure/models/completionevent.py +2 -3
  144. mistralai_azure/models/completionresponsestreamchoice.py +19 -8
  145. mistralai_azure/models/contentchunk.py +4 -11
  146. mistralai_azure/models/deltamessage.py +30 -12
  147. mistralai_azure/models/function.py +5 -4
  148. mistralai_azure/models/functioncall.py +4 -3
  149. mistralai_azure/models/functionname.py +17 -0
  150. mistralai_azure/models/httpvalidationerror.py +1 -3
  151. mistralai_azure/models/responseformat.py +7 -8
  152. mistralai_azure/models/responseformats.py +8 -0
  153. mistralai_azure/models/security.py +13 -5
  154. mistralai_azure/models/systemmessage.py +10 -9
  155. mistralai_azure/models/textchunk.py +14 -5
  156. mistralai_azure/models/tool.py +10 -9
  157. mistralai_azure/models/toolcall.py +10 -8
  158. mistralai_azure/models/toolchoice.py +29 -0
  159. mistralai_azure/models/toolchoiceenum.py +7 -0
  160. mistralai_azure/models/toolmessage.py +20 -7
  161. mistralai_azure/models/tooltypes.py +8 -0
  162. mistralai_azure/models/usageinfo.py +4 -3
  163. mistralai_azure/models/usermessage.py +42 -10
  164. mistralai_azure/models/validationerror.py +5 -3
  165. mistralai_azure/sdkconfiguration.py +9 -11
  166. mistralai_azure/utils/__init__.py +16 -3
  167. mistralai_azure/utils/annotations.py +13 -2
  168. mistralai_azure/utils/forms.py +10 -9
  169. mistralai_azure/utils/headers.py +8 -8
  170. mistralai_azure/utils/logger.py +6 -0
  171. mistralai_azure/utils/queryparams.py +16 -14
  172. mistralai_azure/utils/retries.py +2 -1
  173. mistralai_azure/utils/security.py +12 -6
  174. mistralai_azure/utils/serializers.py +42 -8
  175. mistralai_azure/utils/url.py +13 -8
  176. mistralai_azure/utils/values.py +6 -0
  177. mistralai_gcp/__init__.py +4 -0
  178. mistralai_gcp/_hooks/sdkhooks.py +23 -4
  179. mistralai_gcp/_hooks/types.py +27 -9
  180. mistralai_gcp/_version.py +12 -0
  181. mistralai_gcp/basesdk.py +91 -6
  182. mistralai_gcp/chat.py +308 -166
  183. mistralai_gcp/fim.py +198 -132
  184. mistralai_gcp/models/__init__.py +186 -18
  185. mistralai_gcp/models/assistantmessage.py +29 -11
  186. mistralai_gcp/models/chatcompletionchoice.py +15 -6
  187. mistralai_gcp/models/chatcompletionrequest.py +91 -22
  188. mistralai_gcp/models/chatcompletionresponse.py +8 -4
  189. mistralai_gcp/models/chatcompletionstreamrequest.py +93 -22
  190. mistralai_gcp/models/completionchunk.py +12 -5
  191. mistralai_gcp/models/completionevent.py +2 -3
  192. mistralai_gcp/models/completionresponsestreamchoice.py +19 -8
  193. mistralai_gcp/models/contentchunk.py +4 -11
  194. mistralai_gcp/models/deltamessage.py +30 -12
  195. mistralai_gcp/models/fimcompletionrequest.py +51 -17
  196. mistralai_gcp/models/fimcompletionresponse.py +8 -4
  197. mistralai_gcp/models/fimcompletionstreamrequest.py +51 -17
  198. mistralai_gcp/models/function.py +5 -4
  199. mistralai_gcp/models/functioncall.py +4 -3
  200. mistralai_gcp/models/functionname.py +17 -0
  201. mistralai_gcp/models/httpvalidationerror.py +1 -3
  202. mistralai_gcp/models/responseformat.py +7 -8
  203. mistralai_gcp/models/responseformats.py +8 -0
  204. mistralai_gcp/models/security.py +13 -5
  205. mistralai_gcp/models/systemmessage.py +10 -9
  206. mistralai_gcp/models/textchunk.py +14 -5
  207. mistralai_gcp/models/tool.py +10 -9
  208. mistralai_gcp/models/toolcall.py +10 -8
  209. mistralai_gcp/models/toolchoice.py +29 -0
  210. mistralai_gcp/models/toolchoiceenum.py +7 -0
  211. mistralai_gcp/models/toolmessage.py +20 -7
  212. mistralai_gcp/models/tooltypes.py +8 -0
  213. mistralai_gcp/models/usageinfo.py +4 -3
  214. mistralai_gcp/models/usermessage.py +42 -10
  215. mistralai_gcp/models/validationerror.py +5 -3
  216. mistralai_gcp/sdk.py +6 -7
  217. mistralai_gcp/sdkconfiguration.py +9 -11
  218. mistralai_gcp/utils/__init__.py +16 -3
  219. mistralai_gcp/utils/annotations.py +13 -2
  220. mistralai_gcp/utils/forms.py +10 -9
  221. mistralai_gcp/utils/headers.py +8 -8
  222. mistralai_gcp/utils/logger.py +6 -0
  223. mistralai_gcp/utils/queryparams.py +16 -14
  224. mistralai_gcp/utils/retries.py +2 -1
  225. mistralai_gcp/utils/security.py +12 -6
  226. mistralai_gcp/utils/serializers.py +42 -8
  227. mistralai_gcp/utils/url.py +13 -8
  228. mistralai_gcp/utils/values.py +6 -0
  229. mistralai-1.0.3.dist-info/RECORD +0 -236
  230. {mistralai-1.0.3.dist-info → mistralai-1.2.0.dist-info}/LICENSE +0 -0
@@ -5,13 +5,21 @@ from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
5
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
6
  from .systemmessage import SystemMessage, SystemMessageTypedDict
7
7
  from .tool import Tool, ToolTypedDict
8
+ from .toolchoice import ToolChoice, ToolChoiceTypedDict
9
+ from .toolchoiceenum import ToolChoiceEnum
8
10
  from .toolmessage import ToolMessage, ToolMessageTypedDict
9
11
  from .usermessage import UserMessage, UserMessageTypedDict
10
- from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
12
+ from mistralai_gcp.types import (
13
+ BaseModel,
14
+ Nullable,
15
+ OptionalNullable,
16
+ UNSET,
17
+ UNSET_SENTINEL,
18
+ )
11
19
  from mistralai_gcp.utils import get_discriminator
12
20
  from pydantic import Discriminator, Tag, model_serializer
13
- from typing import List, Literal, Optional, TypedDict, Union
14
- from typing_extensions import Annotated, NotRequired
21
+ from typing import List, Optional, Union
22
+ from typing_extensions import Annotated, NotRequired, TypedDict
15
23
 
16
24
 
17
25
  StopTypedDict = Union[str, List[str]]
@@ -22,27 +30,44 @@ Stop = Union[str, List[str]]
22
30
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
23
31
 
24
32
 
25
- MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
33
+ MessagesTypedDict = Union[
34
+ SystemMessageTypedDict,
35
+ UserMessageTypedDict,
36
+ AssistantMessageTypedDict,
37
+ ToolMessageTypedDict,
38
+ ]
26
39
 
27
40
 
28
- Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
41
+ Messages = Annotated[
42
+ Union[
43
+ Annotated[AssistantMessage, Tag("assistant")],
44
+ Annotated[SystemMessage, Tag("system")],
45
+ Annotated[ToolMessage, Tag("tool")],
46
+ Annotated[UserMessage, Tag("user")],
47
+ ],
48
+ Discriminator(lambda m: get_discriminator(m, "role", "role")),
49
+ ]
29
50
 
30
51
 
31
- ToolChoice = Literal["auto", "none", "any"]
52
+ ChatCompletionStreamRequestToolChoiceTypedDict = Union[
53
+ ToolChoiceTypedDict, ToolChoiceEnum
54
+ ]
55
+
56
+
57
+ ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum]
58
+
32
59
 
33
60
  class ChatCompletionStreamRequestTypedDict(TypedDict):
34
61
  model: Nullable[str]
35
62
  r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
36
63
  messages: List[MessagesTypedDict]
37
64
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
38
- temperature: NotRequired[float]
39
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
65
+ temperature: NotRequired[Nullable[float]]
66
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
40
67
  top_p: NotRequired[float]
41
68
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
42
69
  max_tokens: NotRequired[Nullable[int]]
43
70
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
44
- min_tokens: NotRequired[Nullable[int]]
45
- r"""The minimum number of tokens to generate in the completion."""
46
71
  stream: NotRequired[bool]
47
72
  stop: NotRequired[StopTypedDict]
48
73
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
@@ -50,35 +75,78 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
50
75
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
51
76
  response_format: NotRequired[ResponseFormatTypedDict]
52
77
  tools: NotRequired[Nullable[List[ToolTypedDict]]]
53
- tool_choice: NotRequired[ToolChoice]
54
-
78
+ tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict]
79
+ presence_penalty: NotRequired[float]
80
+ r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
81
+ frequency_penalty: NotRequired[float]
82
+ r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
83
+ n: NotRequired[Nullable[int]]
84
+ r"""Number of completions to return for each request, input tokens are only billed once."""
85
+
55
86
 
56
87
  class ChatCompletionStreamRequest(BaseModel):
57
88
  model: Nullable[str]
58
89
  r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
90
+
59
91
  messages: List[Messages]
60
92
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
61
- temperature: Optional[float] = 0.7
62
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
93
+
94
+ temperature: OptionalNullable[float] = UNSET
95
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
96
+
63
97
  top_p: Optional[float] = 1
64
98
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
99
+
65
100
  max_tokens: OptionalNullable[int] = UNSET
66
101
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
67
- min_tokens: OptionalNullable[int] = UNSET
68
- r"""The minimum number of tokens to generate in the completion."""
102
+
69
103
  stream: Optional[bool] = True
104
+
70
105
  stop: Optional[Stop] = None
71
106
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
107
+
72
108
  random_seed: OptionalNullable[int] = UNSET
73
109
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
110
+
74
111
  response_format: Optional[ResponseFormat] = None
112
+
75
113
  tools: OptionalNullable[List[Tool]] = UNSET
76
- tool_choice: Optional[ToolChoice] = "auto"
77
-
114
+
115
+ tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None
116
+
117
+ presence_penalty: Optional[float] = 0
118
+ r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
119
+
120
+ frequency_penalty: Optional[float] = 0
121
+ r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
122
+
123
+ n: OptionalNullable[int] = UNSET
124
+ r"""Number of completions to return for each request, input tokens are only billed once."""
125
+
78
126
  @model_serializer(mode="wrap")
79
127
  def serialize_model(self, handler):
80
- optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
81
- nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
128
+ optional_fields = [
129
+ "temperature",
130
+ "top_p",
131
+ "max_tokens",
132
+ "stream",
133
+ "stop",
134
+ "random_seed",
135
+ "response_format",
136
+ "tools",
137
+ "tool_choice",
138
+ "presence_penalty",
139
+ "frequency_penalty",
140
+ "n",
141
+ ]
142
+ nullable_fields = [
143
+ "model",
144
+ "temperature",
145
+ "max_tokens",
146
+ "random_seed",
147
+ "tools",
148
+ "n",
149
+ ]
82
150
  null_default_fields = []
83
151
 
84
152
  serialized = handler(self)
@@ -88,9 +156,13 @@ class ChatCompletionStreamRequest(BaseModel):
88
156
  for n, f in self.model_fields.items():
89
157
  k = f.alias or n
90
158
  val = serialized.get(k)
159
+ serialized.pop(k, None)
91
160
 
92
161
  optional_nullable = k in optional_fields and k in nullable_fields
93
- is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
162
+ is_set = (
163
+ self.__pydantic_fields_set__.intersection({n})
164
+ or k in null_default_fields
165
+ ) # pylint: disable=no-member
94
166
 
95
167
  if val is not None and val != UNSET_SENTINEL:
96
168
  m[k] = val
@@ -100,4 +172,3 @@ class ChatCompletionStreamRequest(BaseModel):
100
172
  m[k] = val
101
173
 
102
174
  return m
103
-
@@ -1,11 +1,14 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict
4
+ from .completionresponsestreamchoice import (
5
+ CompletionResponseStreamChoice,
6
+ CompletionResponseStreamChoiceTypedDict,
7
+ )
5
8
  from .usageinfo import UsageInfo, UsageInfoTypedDict
6
9
  from mistralai_gcp.types import BaseModel
7
- from typing import List, Optional, TypedDict
8
- from typing_extensions import NotRequired
10
+ from typing import List, Optional
11
+ from typing_extensions import NotRequired, TypedDict
9
12
 
10
13
 
11
14
  class CompletionChunkTypedDict(TypedDict):
@@ -15,13 +18,17 @@ class CompletionChunkTypedDict(TypedDict):
15
18
  object: NotRequired[str]
16
19
  created: NotRequired[int]
17
20
  usage: NotRequired[UsageInfoTypedDict]
18
-
21
+
19
22
 
20
23
  class CompletionChunk(BaseModel):
21
24
  id: str
25
+
22
26
  model: str
27
+
23
28
  choices: List[CompletionResponseStreamChoice]
29
+
24
30
  object: Optional[str] = None
31
+
25
32
  created: Optional[int] = None
33
+
26
34
  usage: Optional[UsageInfo] = None
27
-
@@ -3,13 +3,12 @@
3
3
  from __future__ import annotations
4
4
  from .completionchunk import CompletionChunk, CompletionChunkTypedDict
5
5
  from mistralai_gcp.types import BaseModel
6
- from typing import TypedDict
6
+ from typing_extensions import TypedDict
7
7
 
8
8
 
9
9
  class CompletionEventTypedDict(TypedDict):
10
10
  data: CompletionChunkTypedDict
11
-
11
+
12
12
 
13
13
  class CompletionEvent(BaseModel):
14
14
  data: CompletionChunk
15
-
@@ -2,24 +2,32 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .deltamessage import DeltaMessage, DeltaMessageTypedDict
5
- from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL
5
+ from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr
6
+ from mistralai_gcp.utils import validate_open_enum
6
7
  from pydantic import model_serializer
7
- from typing import Literal, TypedDict
8
+ from pydantic.functional_validators import PlainValidator
9
+ from typing import Literal, Union
10
+ from typing_extensions import Annotated, TypedDict
8
11
 
9
12
 
10
- FinishReason = Literal["stop", "length", "error", "tool_calls"]
13
+ FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr]
14
+
11
15
 
12
16
  class CompletionResponseStreamChoiceTypedDict(TypedDict):
13
17
  index: int
14
18
  delta: DeltaMessageTypedDict
15
19
  finish_reason: Nullable[FinishReason]
16
-
20
+
17
21
 
18
22
  class CompletionResponseStreamChoice(BaseModel):
19
23
  index: int
24
+
20
25
  delta: DeltaMessage
21
- finish_reason: Nullable[FinishReason]
22
-
26
+
27
+ finish_reason: Annotated[
28
+ Nullable[FinishReason], PlainValidator(validate_open_enum(False))
29
+ ]
30
+
23
31
  @model_serializer(mode="wrap")
24
32
  def serialize_model(self, handler):
25
33
  optional_fields = []
@@ -33,9 +41,13 @@ class CompletionResponseStreamChoice(BaseModel):
33
41
  for n, f in self.model_fields.items():
34
42
  k = f.alias or n
35
43
  val = serialized.get(k)
44
+ serialized.pop(k, None)
36
45
 
37
46
  optional_nullable = k in optional_fields and k in nullable_fields
38
- is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
47
+ is_set = (
48
+ self.__pydantic_fields_set__.intersection({n})
49
+ or k in null_default_fields
50
+ ) # pylint: disable=no-member
39
51
 
40
52
  if val is not None and val != UNSET_SENTINEL:
41
53
  m[k] = val
@@ -45,4 +57,3 @@ class CompletionResponseStreamChoice(BaseModel):
45
57
  m[k] = val
46
58
 
47
59
  return m
48
-
@@ -1,17 +1,10 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from mistralai_gcp.types import BaseModel
5
- import pydantic
6
- from typing import Final, Optional, TypedDict
7
- from typing_extensions import Annotated
4
+ from .textchunk import TextChunk, TextChunkTypedDict
8
5
 
9
6
 
10
- class ContentChunkTypedDict(TypedDict):
11
- text: str
12
-
7
+ ContentChunkTypedDict = TextChunkTypedDict
13
8
 
14
- class ContentChunk(BaseModel):
15
- text: str
16
- TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore
17
-
9
+
10
+ ContentChunk = TextChunk
@@ -1,28 +1,43 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from .contentchunk import ContentChunk, ContentChunkTypedDict
4
5
  from .toolcall import ToolCall, ToolCallTypedDict
5
- from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
6
+ from mistralai_gcp.types import (
7
+ BaseModel,
8
+ Nullable,
9
+ OptionalNullable,
10
+ UNSET,
11
+ UNSET_SENTINEL,
12
+ )
6
13
  from pydantic import model_serializer
7
- from typing import List, Optional, TypedDict
8
- from typing_extensions import NotRequired
14
+ from typing import List, Union
15
+ from typing_extensions import NotRequired, TypedDict
16
+
17
+
18
+ ContentTypedDict = Union[str, List[ContentChunkTypedDict]]
19
+
20
+
21
+ Content = Union[str, List[ContentChunk]]
9
22
 
10
23
 
11
24
  class DeltaMessageTypedDict(TypedDict):
12
- role: NotRequired[str]
13
- content: NotRequired[Nullable[str]]
25
+ role: NotRequired[Nullable[str]]
26
+ content: NotRequired[Nullable[ContentTypedDict]]
14
27
  tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]]
15
-
28
+
16
29
 
17
30
  class DeltaMessage(BaseModel):
18
- role: Optional[str] = None
19
- content: OptionalNullable[str] = UNSET
31
+ role: OptionalNullable[str] = UNSET
32
+
33
+ content: OptionalNullable[Content] = UNSET
34
+
20
35
  tool_calls: OptionalNullable[List[ToolCall]] = UNSET
21
-
36
+
22
37
  @model_serializer(mode="wrap")
23
38
  def serialize_model(self, handler):
24
39
  optional_fields = ["role", "content", "tool_calls"]
25
- nullable_fields = ["content", "tool_calls"]
40
+ nullable_fields = ["role", "content", "tool_calls"]
26
41
  null_default_fields = []
27
42
 
28
43
  serialized = handler(self)
@@ -32,9 +47,13 @@ class DeltaMessage(BaseModel):
32
47
  for n, f in self.model_fields.items():
33
48
  k = f.alias or n
34
49
  val = serialized.get(k)
50
+ serialized.pop(k, None)
35
51
 
36
52
  optional_nullable = k in optional_fields and k in nullable_fields
37
- is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
53
+ is_set = (
54
+ self.__pydantic_fields_set__.intersection({n})
55
+ or k in null_default_fields
56
+ ) # pylint: disable=no-member
38
57
 
39
58
  if val is not None and val != UNSET_SENTINEL:
40
59
  m[k] = val
@@ -44,4 +63,3 @@ class DeltaMessage(BaseModel):
44
63
  m[k] = val
45
64
 
46
65
  return m
47
-
@@ -1,10 +1,16 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
4
+ from mistralai_gcp.types import (
5
+ BaseModel,
6
+ Nullable,
7
+ OptionalNullable,
8
+ UNSET,
9
+ UNSET_SENTINEL,
10
+ )
5
11
  from pydantic import model_serializer
6
- from typing import List, Optional, TypedDict, Union
7
- from typing_extensions import NotRequired
12
+ from typing import List, Optional, Union
13
+ from typing_extensions import NotRequired, TypedDict
8
14
 
9
15
 
10
16
  FIMCompletionRequestStopTypedDict = Union[str, List[str]]
@@ -23,14 +29,12 @@ class FIMCompletionRequestTypedDict(TypedDict):
23
29
  """
24
30
  prompt: str
25
31
  r"""The text/code to complete."""
26
- temperature: NotRequired[float]
27
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
32
+ temperature: NotRequired[Nullable[float]]
33
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
28
34
  top_p: NotRequired[float]
29
35
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
30
36
  max_tokens: NotRequired[Nullable[int]]
31
37
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
32
- min_tokens: NotRequired[Nullable[int]]
33
- r"""The minimum number of tokens to generate in the completion."""
34
38
  stream: NotRequired[bool]
35
39
  r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
36
40
  stop: NotRequired[FIMCompletionRequestStopTypedDict]
@@ -39,7 +43,9 @@ class FIMCompletionRequestTypedDict(TypedDict):
39
43
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
40
44
  suffix: NotRequired[Nullable[str]]
41
45
  r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
42
-
46
+ min_tokens: NotRequired[Nullable[int]]
47
+ r"""The minimum number of tokens to generate in the completion."""
48
+
43
49
 
44
50
  class FIMCompletionRequest(BaseModel):
45
51
  model: Nullable[str]
@@ -47,29 +53,54 @@ class FIMCompletionRequest(BaseModel):
47
53
  - `codestral-2405`
48
54
  - `codestral-latest`
49
55
  """
56
+
50
57
  prompt: str
51
58
  r"""The text/code to complete."""
52
- temperature: Optional[float] = 0.7
53
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
59
+
60
+ temperature: OptionalNullable[float] = UNSET
61
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
62
+
54
63
  top_p: Optional[float] = 1
55
64
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
65
+
56
66
  max_tokens: OptionalNullable[int] = UNSET
57
67
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
58
- min_tokens: OptionalNullable[int] = UNSET
59
- r"""The minimum number of tokens to generate in the completion."""
68
+
60
69
  stream: Optional[bool] = False
61
70
  r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
71
+
62
72
  stop: Optional[FIMCompletionRequestStop] = None
63
73
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
74
+
64
75
  random_seed: OptionalNullable[int] = UNSET
65
76
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
77
+
66
78
  suffix: OptionalNullable[str] = UNSET
67
79
  r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
68
-
80
+
81
+ min_tokens: OptionalNullable[int] = UNSET
82
+ r"""The minimum number of tokens to generate in the completion."""
83
+
69
84
  @model_serializer(mode="wrap")
70
85
  def serialize_model(self, handler):
71
- optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
72
- nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
86
+ optional_fields = [
87
+ "temperature",
88
+ "top_p",
89
+ "max_tokens",
90
+ "stream",
91
+ "stop",
92
+ "random_seed",
93
+ "suffix",
94
+ "min_tokens",
95
+ ]
96
+ nullable_fields = [
97
+ "model",
98
+ "temperature",
99
+ "max_tokens",
100
+ "random_seed",
101
+ "suffix",
102
+ "min_tokens",
103
+ ]
73
104
  null_default_fields = []
74
105
 
75
106
  serialized = handler(self)
@@ -79,9 +110,13 @@ class FIMCompletionRequest(BaseModel):
79
110
  for n, f in self.model_fields.items():
80
111
  k = f.alias or n
81
112
  val = serialized.get(k)
113
+ serialized.pop(k, None)
82
114
 
83
115
  optional_nullable = k in optional_fields and k in nullable_fields
84
- is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
116
+ is_set = (
117
+ self.__pydantic_fields_set__.intersection({n})
118
+ or k in null_default_fields
119
+ ) # pylint: disable=no-member
85
120
 
86
121
  if val is not None and val != UNSET_SENTINEL:
87
122
  m[k] = val
@@ -91,4 +126,3 @@ class FIMCompletionRequest(BaseModel):
91
126
  m[k] = val
92
127
 
93
128
  return m
94
-
@@ -4,8 +4,8 @@ from __future__ import annotations
4
4
  from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
5
5
  from .usageinfo import UsageInfo, UsageInfoTypedDict
6
6
  from mistralai_gcp.types import BaseModel
7
- from typing import List, Optional, TypedDict
8
- from typing_extensions import NotRequired
7
+ from typing import List, Optional
8
+ from typing_extensions import NotRequired, TypedDict
9
9
 
10
10
 
11
11
  class FIMCompletionResponseTypedDict(TypedDict):
@@ -15,13 +15,17 @@ class FIMCompletionResponseTypedDict(TypedDict):
15
15
  usage: UsageInfoTypedDict
16
16
  created: NotRequired[int]
17
17
  choices: NotRequired[List[ChatCompletionChoiceTypedDict]]
18
-
18
+
19
19
 
20
20
  class FIMCompletionResponse(BaseModel):
21
21
  id: str
22
+
22
23
  object: str
24
+
23
25
  model: str
26
+
24
27
  usage: UsageInfo
28
+
25
29
  created: Optional[int] = None
30
+
26
31
  choices: Optional[List[ChatCompletionChoice]] = None
27
-