mistralai 0.4.2__py3-none-any.whl → 0.5.5a50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (240) hide show
  1. mistralai/__init__.py +5 -0
  2. mistralai/_hooks/__init__.py +5 -0
  3. mistralai/_hooks/custom_user_agent.py +16 -0
  4. mistralai/_hooks/deprecation_warning.py +26 -0
  5. mistralai/_hooks/registration.py +17 -0
  6. mistralai/_hooks/sdkhooks.py +57 -0
  7. mistralai/_hooks/types.py +76 -0
  8. mistralai/async_client.py +5 -413
  9. mistralai/basesdk.py +216 -0
  10. mistralai/chat.py +475 -0
  11. mistralai/client.py +5 -414
  12. mistralai/embeddings.py +182 -0
  13. mistralai/files.py +600 -84
  14. mistralai/fim.py +439 -0
  15. mistralai/fine_tuning.py +855 -0
  16. mistralai/httpclient.py +78 -0
  17. mistralai/models/__init__.py +80 -0
  18. mistralai/models/archiveftmodelout.py +19 -0
  19. mistralai/models/assistantmessage.py +58 -0
  20. mistralai/models/chatcompletionchoice.py +33 -0
  21. mistralai/models/chatcompletionrequest.py +114 -0
  22. mistralai/models/chatcompletionresponse.py +27 -0
  23. mistralai/models/chatcompletionstreamrequest.py +112 -0
  24. mistralai/models/checkpointout.py +25 -0
  25. mistralai/models/completionchunk.py +27 -0
  26. mistralai/models/completionevent.py +15 -0
  27. mistralai/models/completionresponsestreamchoice.py +53 -0
  28. mistralai/models/contentchunk.py +17 -0
  29. mistralai/models/delete_model_v1_models_model_id_deleteop.py +16 -0
  30. mistralai/models/deletefileout.py +24 -0
  31. mistralai/models/deletemodelout.py +25 -0
  32. mistralai/models/deltamessage.py +52 -0
  33. mistralai/models/detailedjobout.py +96 -0
  34. mistralai/models/embeddingrequest.py +66 -0
  35. mistralai/models/embeddingresponse.py +24 -0
  36. mistralai/models/embeddingresponsedata.py +19 -0
  37. mistralai/models/eventout.py +55 -0
  38. mistralai/models/files_api_routes_delete_fileop.py +16 -0
  39. mistralai/models/files_api_routes_retrieve_fileop.py +16 -0
  40. mistralai/models/files_api_routes_upload_fileop.py +51 -0
  41. mistralai/models/fileschema.py +76 -0
  42. mistralai/models/fimcompletionrequest.py +99 -0
  43. mistralai/models/fimcompletionresponse.py +27 -0
  44. mistralai/models/fimcompletionstreamrequest.py +97 -0
  45. mistralai/models/finetuneablemodel.py +8 -0
  46. mistralai/models/ftmodelcapabilitiesout.py +21 -0
  47. mistralai/models/ftmodelout.py +70 -0
  48. mistralai/models/function.py +19 -0
  49. mistralai/models/functioncall.py +16 -0
  50. mistralai/models/githubrepositoryin.py +57 -0
  51. mistralai/models/githubrepositoryout.py +57 -0
  52. mistralai/models/httpvalidationerror.py +23 -0
  53. mistralai/models/jobin.py +78 -0
  54. mistralai/models/jobmetadataout.py +59 -0
  55. mistralai/models/jobout.py +112 -0
  56. mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +16 -0
  57. mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +18 -0
  58. mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +73 -0
  59. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +18 -0
  60. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +86 -0
  61. mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +16 -0
  62. mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +16 -0
  63. mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +19 -0
  64. mistralai/models/jobsout.py +20 -0
  65. mistralai/models/legacyjobmetadataout.py +85 -0
  66. mistralai/models/listfilesout.py +17 -0
  67. mistralai/models/metricout.py +55 -0
  68. mistralai/models/modelcapabilities.py +21 -0
  69. mistralai/models/modelcard.py +71 -0
  70. mistralai/models/modellist.py +18 -0
  71. mistralai/models/responseformat.py +18 -0
  72. mistralai/models/retrieve_model_v1_models_model_id_getop.py +16 -0
  73. mistralai/models/retrievefileout.py +76 -0
  74. mistralai/models/sampletype.py +7 -0
  75. mistralai/models/sdkerror.py +22 -0
  76. mistralai/models/security.py +16 -0
  77. mistralai/models/source.py +7 -0
  78. mistralai/models/systemmessage.py +26 -0
  79. mistralai/models/textchunk.py +17 -0
  80. mistralai/models/tool.py +18 -0
  81. mistralai/models/toolcall.py +20 -0
  82. mistralai/models/toolmessage.py +55 -0
  83. mistralai/models/trainingfile.py +17 -0
  84. mistralai/models/trainingparameters.py +53 -0
  85. mistralai/models/trainingparametersin.py +61 -0
  86. mistralai/models/unarchiveftmodelout.py +19 -0
  87. mistralai/models/updateftmodelin.py +49 -0
  88. mistralai/models/uploadfileout.py +76 -0
  89. mistralai/models/usageinfo.py +18 -0
  90. mistralai/models/usermessage.py +26 -0
  91. mistralai/models/validationerror.py +24 -0
  92. mistralai/models/wandbintegration.py +61 -0
  93. mistralai/models/wandbintegrationout.py +57 -0
  94. mistralai/models_.py +928 -0
  95. mistralai/py.typed +1 -0
  96. mistralai/sdk.py +111 -0
  97. mistralai/sdkconfiguration.py +53 -0
  98. mistralai/types/__init__.py +21 -0
  99. mistralai/types/basemodel.py +35 -0
  100. mistralai/utils/__init__.py +82 -0
  101. mistralai/utils/annotations.py +19 -0
  102. mistralai/utils/enums.py +34 -0
  103. mistralai/utils/eventstreaming.py +179 -0
  104. mistralai/utils/forms.py +207 -0
  105. mistralai/utils/headers.py +136 -0
  106. mistralai/utils/metadata.py +118 -0
  107. mistralai/utils/queryparams.py +203 -0
  108. mistralai/utils/requestbodies.py +66 -0
  109. mistralai/utils/retries.py +216 -0
  110. mistralai/utils/security.py +182 -0
  111. mistralai/utils/serializers.py +181 -0
  112. mistralai/utils/url.py +150 -0
  113. mistralai/utils/values.py +128 -0
  114. {mistralai-0.4.2.dist-info → mistralai-0.5.5a50.dist-info}/LICENSE +1 -1
  115. mistralai-0.5.5a50.dist-info/METADATA +626 -0
  116. mistralai-0.5.5a50.dist-info/RECORD +228 -0
  117. mistralai_azure/__init__.py +5 -0
  118. mistralai_azure/_hooks/__init__.py +5 -0
  119. mistralai_azure/_hooks/custom_user_agent.py +16 -0
  120. mistralai_azure/_hooks/registration.py +15 -0
  121. mistralai_azure/_hooks/sdkhooks.py +57 -0
  122. mistralai_azure/_hooks/types.py +76 -0
  123. mistralai_azure/basesdk.py +215 -0
  124. mistralai_azure/chat.py +475 -0
  125. mistralai_azure/httpclient.py +78 -0
  126. mistralai_azure/models/__init__.py +28 -0
  127. mistralai_azure/models/assistantmessage.py +58 -0
  128. mistralai_azure/models/chatcompletionchoice.py +33 -0
  129. mistralai_azure/models/chatcompletionrequest.py +114 -0
  130. mistralai_azure/models/chatcompletionresponse.py +27 -0
  131. mistralai_azure/models/chatcompletionstreamrequest.py +112 -0
  132. mistralai_azure/models/completionchunk.py +27 -0
  133. mistralai_azure/models/completionevent.py +15 -0
  134. mistralai_azure/models/completionresponsestreamchoice.py +53 -0
  135. mistralai_azure/models/contentchunk.py +17 -0
  136. mistralai_azure/models/deltamessage.py +52 -0
  137. mistralai_azure/models/function.py +19 -0
  138. mistralai_azure/models/functioncall.py +16 -0
  139. mistralai_azure/models/httpvalidationerror.py +23 -0
  140. mistralai_azure/models/responseformat.py +18 -0
  141. mistralai_azure/models/sdkerror.py +22 -0
  142. mistralai_azure/models/security.py +16 -0
  143. mistralai_azure/models/systemmessage.py +26 -0
  144. mistralai_azure/models/textchunk.py +17 -0
  145. mistralai_azure/models/tool.py +18 -0
  146. mistralai_azure/models/toolcall.py +20 -0
  147. mistralai_azure/models/toolmessage.py +55 -0
  148. mistralai_azure/models/usageinfo.py +18 -0
  149. mistralai_azure/models/usermessage.py +26 -0
  150. mistralai_azure/models/validationerror.py +24 -0
  151. mistralai_azure/py.typed +1 -0
  152. mistralai_azure/sdk.py +102 -0
  153. mistralai_azure/sdkconfiguration.py +53 -0
  154. mistralai_azure/types/__init__.py +21 -0
  155. mistralai_azure/types/basemodel.py +35 -0
  156. mistralai_azure/utils/__init__.py +80 -0
  157. mistralai_azure/utils/annotations.py +19 -0
  158. mistralai_azure/utils/enums.py +34 -0
  159. mistralai_azure/utils/eventstreaming.py +179 -0
  160. mistralai_azure/utils/forms.py +207 -0
  161. mistralai_azure/utils/headers.py +136 -0
  162. mistralai_azure/utils/metadata.py +118 -0
  163. mistralai_azure/utils/queryparams.py +203 -0
  164. mistralai_azure/utils/requestbodies.py +66 -0
  165. mistralai_azure/utils/retries.py +216 -0
  166. mistralai_azure/utils/security.py +168 -0
  167. mistralai_azure/utils/serializers.py +181 -0
  168. mistralai_azure/utils/url.py +150 -0
  169. mistralai_azure/utils/values.py +128 -0
  170. mistralai_gcp/__init__.py +5 -0
  171. mistralai_gcp/_hooks/__init__.py +5 -0
  172. mistralai_gcp/_hooks/custom_user_agent.py +16 -0
  173. mistralai_gcp/_hooks/registration.py +15 -0
  174. mistralai_gcp/_hooks/sdkhooks.py +57 -0
  175. mistralai_gcp/_hooks/types.py +76 -0
  176. mistralai_gcp/basesdk.py +215 -0
  177. mistralai_gcp/chat.py +463 -0
  178. mistralai_gcp/fim.py +439 -0
  179. mistralai_gcp/httpclient.py +78 -0
  180. mistralai_gcp/models/__init__.py +31 -0
  181. mistralai_gcp/models/assistantmessage.py +58 -0
  182. mistralai_gcp/models/chatcompletionchoice.py +33 -0
  183. mistralai_gcp/models/chatcompletionrequest.py +110 -0
  184. mistralai_gcp/models/chatcompletionresponse.py +27 -0
  185. mistralai_gcp/models/chatcompletionstreamrequest.py +108 -0
  186. mistralai_gcp/models/completionchunk.py +27 -0
  187. mistralai_gcp/models/completionevent.py +15 -0
  188. mistralai_gcp/models/completionresponsestreamchoice.py +53 -0
  189. mistralai_gcp/models/contentchunk.py +17 -0
  190. mistralai_gcp/models/deltamessage.py +52 -0
  191. mistralai_gcp/models/fimcompletionrequest.py +99 -0
  192. mistralai_gcp/models/fimcompletionresponse.py +27 -0
  193. mistralai_gcp/models/fimcompletionstreamrequest.py +97 -0
  194. mistralai_gcp/models/function.py +19 -0
  195. mistralai_gcp/models/functioncall.py +16 -0
  196. mistralai_gcp/models/httpvalidationerror.py +23 -0
  197. mistralai_gcp/models/responseformat.py +18 -0
  198. mistralai_gcp/models/sdkerror.py +22 -0
  199. mistralai_gcp/models/security.py +16 -0
  200. mistralai_gcp/models/systemmessage.py +26 -0
  201. mistralai_gcp/models/textchunk.py +17 -0
  202. mistralai_gcp/models/tool.py +18 -0
  203. mistralai_gcp/models/toolcall.py +20 -0
  204. mistralai_gcp/models/toolmessage.py +55 -0
  205. mistralai_gcp/models/usageinfo.py +18 -0
  206. mistralai_gcp/models/usermessage.py +26 -0
  207. mistralai_gcp/models/validationerror.py +24 -0
  208. mistralai_gcp/py.typed +1 -0
  209. mistralai_gcp/sdk.py +165 -0
  210. mistralai_gcp/sdkconfiguration.py +53 -0
  211. mistralai_gcp/types/__init__.py +21 -0
  212. mistralai_gcp/types/basemodel.py +35 -0
  213. mistralai_gcp/utils/__init__.py +80 -0
  214. mistralai_gcp/utils/annotations.py +19 -0
  215. mistralai_gcp/utils/enums.py +34 -0
  216. mistralai_gcp/utils/eventstreaming.py +179 -0
  217. mistralai_gcp/utils/forms.py +207 -0
  218. mistralai_gcp/utils/headers.py +136 -0
  219. mistralai_gcp/utils/metadata.py +118 -0
  220. mistralai_gcp/utils/queryparams.py +203 -0
  221. mistralai_gcp/utils/requestbodies.py +66 -0
  222. mistralai_gcp/utils/retries.py +216 -0
  223. mistralai_gcp/utils/security.py +168 -0
  224. mistralai_gcp/utils/serializers.py +181 -0
  225. mistralai_gcp/utils/url.py +150 -0
  226. mistralai_gcp/utils/values.py +128 -0
  227. py.typed +1 -0
  228. mistralai/client_base.py +0 -211
  229. mistralai/constants.py +0 -5
  230. mistralai/exceptions.py +0 -54
  231. mistralai/jobs.py +0 -172
  232. mistralai/models/chat_completion.py +0 -93
  233. mistralai/models/common.py +0 -9
  234. mistralai/models/embeddings.py +0 -19
  235. mistralai/models/files.py +0 -23
  236. mistralai/models/jobs.py +0 -100
  237. mistralai/models/models.py +0 -39
  238. mistralai-0.4.2.dist-info/METADATA +0 -82
  239. mistralai-0.4.2.dist-info/RECORD +0 -20
  240. {mistralai-0.4.2.dist-info → mistralai-0.5.5a50.dist-info}/WHEEL +0 -0
@@ -0,0 +1,110 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
+ from .systemmessage import SystemMessage, SystemMessageTypedDict
7
+ from .tool import Tool, ToolTypedDict
8
+ from .toolmessage import ToolMessage, ToolMessageTypedDict
9
+ from .usermessage import UserMessage, UserMessageTypedDict
10
+ from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
11
+ from mistralai_gcp.utils import get_discriminator
12
+ from pydantic import Discriminator, Tag, model_serializer
13
+ from typing import List, Literal, Optional, TypedDict, Union
14
+ from typing_extensions import Annotated, NotRequired
15
+
16
+
17
+ ChatCompletionRequestToolChoice = Literal["auto", "none", "any"]
18
+
19
+ class ChatCompletionRequestTypedDict(TypedDict):
20
+ messages: List[ChatCompletionRequestMessagesTypedDict]
21
+ r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
22
+ model: NotRequired[Nullable[str]]
23
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
24
+ temperature: NotRequired[float]
25
+ r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
26
+ top_p: NotRequired[float]
27
+ r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
28
+ max_tokens: NotRequired[Nullable[int]]
29
+ r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
30
+ min_tokens: NotRequired[Nullable[int]]
31
+ r"""The minimum number of tokens to generate in the completion."""
32
+ stream: NotRequired[bool]
33
+ r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
34
+ stop: NotRequired[ChatCompletionRequestStopTypedDict]
35
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
36
+ random_seed: NotRequired[Nullable[int]]
37
+ r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
38
+ response_format: NotRequired[ResponseFormatTypedDict]
39
+ tools: NotRequired[Nullable[List[ToolTypedDict]]]
40
+ tool_choice: NotRequired[ChatCompletionRequestToolChoice]
41
+
42
+
43
+ class ChatCompletionRequest(BaseModel):
44
+ messages: List[ChatCompletionRequestMessages]
45
+ r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
46
+ model: OptionalNullable[str] = UNSET
47
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
48
+ temperature: Optional[float] = 0.7
49
+ r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
50
+ top_p: Optional[float] = 1
51
+ r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
52
+ max_tokens: OptionalNullable[int] = UNSET
53
+ r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
54
+ min_tokens: OptionalNullable[int] = UNSET
55
+ r"""The minimum number of tokens to generate in the completion."""
56
+ stream: Optional[bool] = False
57
+ r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
58
+ stop: Optional[ChatCompletionRequestStop] = None
59
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
60
+ random_seed: OptionalNullable[int] = UNSET
61
+ r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
62
+ response_format: Optional[ResponseFormat] = None
63
+ tools: OptionalNullable[List[Tool]] = UNSET
64
+ tool_choice: Optional[ChatCompletionRequestToolChoice] = "auto"
65
+
66
+ @model_serializer(mode="wrap")
67
+ def serialize_model(self, handler):
68
+ optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
69
+ nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
70
+ null_default_fields = []
71
+
72
+ serialized = handler(self)
73
+
74
+ m = {}
75
+
76
+ for n, f in self.model_fields.items():
77
+ k = f.alias or n
78
+ val = serialized.get(k)
79
+
80
+ if val is not None and val != UNSET_SENTINEL:
81
+ m[k] = val
82
+ elif val != UNSET_SENTINEL and (
83
+ not k in optional_fields
84
+ or (
85
+ k in optional_fields
86
+ and k in nullable_fields
87
+ and (
88
+ self.__pydantic_fields_set__.intersection({n})
89
+ or k in null_default_fields
90
+ ) # pylint: disable=no-member
91
+ )
92
+ ):
93
+ m[k] = val
94
+
95
+ return m
96
+
97
+
98
+ ChatCompletionRequestStopTypedDict = Union[str, List[str]]
99
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
100
+
101
+
102
+ ChatCompletionRequestStop = Union[str, List[str]]
103
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
104
+
105
+
106
+ ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
107
+
108
+
109
+ ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
110
+
@@ -0,0 +1,27 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
5
+ from .usageinfo import UsageInfo, UsageInfoTypedDict
6
+ from mistralai_gcp.types import BaseModel
7
+ from typing import List, Optional, TypedDict
8
+ from typing_extensions import NotRequired
9
+
10
+
11
+ class ChatCompletionResponseTypedDict(TypedDict):
12
+ id: str
13
+ object: str
14
+ model: str
15
+ usage: UsageInfoTypedDict
16
+ created: NotRequired[int]
17
+ choices: NotRequired[List[ChatCompletionChoiceTypedDict]]
18
+
19
+
20
+ class ChatCompletionResponse(BaseModel):
21
+ id: str
22
+ object: str
23
+ model: str
24
+ usage: UsageInfo
25
+ created: Optional[int] = None
26
+ choices: Optional[List[ChatCompletionChoice]] = None
27
+
@@ -0,0 +1,108 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
+ from .systemmessage import SystemMessage, SystemMessageTypedDict
7
+ from .tool import Tool, ToolTypedDict
8
+ from .toolmessage import ToolMessage, ToolMessageTypedDict
9
+ from .usermessage import UserMessage, UserMessageTypedDict
10
+ from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
11
+ from mistralai_gcp.utils import get_discriminator
12
+ from pydantic import Discriminator, Tag, model_serializer
13
+ from typing import List, Literal, Optional, TypedDict, Union
14
+ from typing_extensions import Annotated, NotRequired
15
+
16
+
17
+ ToolChoice = Literal["auto", "none", "any"]
18
+
19
+ class ChatCompletionStreamRequestTypedDict(TypedDict):
20
+ messages: List[MessagesTypedDict]
21
+ r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
22
+ model: NotRequired[Nullable[str]]
23
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
24
+ temperature: NotRequired[float]
25
+ r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
26
+ top_p: NotRequired[float]
27
+ r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
28
+ max_tokens: NotRequired[Nullable[int]]
29
+ r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
30
+ min_tokens: NotRequired[Nullable[int]]
31
+ r"""The minimum number of tokens to generate in the completion."""
32
+ stream: NotRequired[bool]
33
+ stop: NotRequired[StopTypedDict]
34
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
35
+ random_seed: NotRequired[Nullable[int]]
36
+ r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
37
+ response_format: NotRequired[ResponseFormatTypedDict]
38
+ tools: NotRequired[Nullable[List[ToolTypedDict]]]
39
+ tool_choice: NotRequired[ToolChoice]
40
+
41
+
42
+ class ChatCompletionStreamRequest(BaseModel):
43
+ messages: List[Messages]
44
+ r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
45
+ model: OptionalNullable[str] = UNSET
46
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
47
+ temperature: Optional[float] = 0.7
48
+ r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
49
+ top_p: Optional[float] = 1
50
+ r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
51
+ max_tokens: OptionalNullable[int] = UNSET
52
+ r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
53
+ min_tokens: OptionalNullable[int] = UNSET
54
+ r"""The minimum number of tokens to generate in the completion."""
55
+ stream: Optional[bool] = True
56
+ stop: Optional[Stop] = None
57
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
58
+ random_seed: OptionalNullable[int] = UNSET
59
+ r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
60
+ response_format: Optional[ResponseFormat] = None
61
+ tools: OptionalNullable[List[Tool]] = UNSET
62
+ tool_choice: Optional[ToolChoice] = "auto"
63
+
64
+ @model_serializer(mode="wrap")
65
+ def serialize_model(self, handler):
66
+ optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
67
+ nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
68
+ null_default_fields = []
69
+
70
+ serialized = handler(self)
71
+
72
+ m = {}
73
+
74
+ for n, f in self.model_fields.items():
75
+ k = f.alias or n
76
+ val = serialized.get(k)
77
+
78
+ if val is not None and val != UNSET_SENTINEL:
79
+ m[k] = val
80
+ elif val != UNSET_SENTINEL and (
81
+ not k in optional_fields
82
+ or (
83
+ k in optional_fields
84
+ and k in nullable_fields
85
+ and (
86
+ self.__pydantic_fields_set__.intersection({n})
87
+ or k in null_default_fields
88
+ ) # pylint: disable=no-member
89
+ )
90
+ ):
91
+ m[k] = val
92
+
93
+ return m
94
+
95
+
96
+ StopTypedDict = Union[str, List[str]]
97
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
98
+
99
+
100
+ Stop = Union[str, List[str]]
101
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
102
+
103
+
104
+ MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
105
+
106
+
107
+ Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
108
+
@@ -0,0 +1,27 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict
5
+ from .usageinfo import UsageInfo, UsageInfoTypedDict
6
+ from mistralai_gcp.types import BaseModel
7
+ from typing import List, Optional, TypedDict
8
+ from typing_extensions import NotRequired
9
+
10
+
11
+ class CompletionChunkTypedDict(TypedDict):
12
+ id: str
13
+ model: str
14
+ choices: List[CompletionResponseStreamChoiceTypedDict]
15
+ object: NotRequired[str]
16
+ created: NotRequired[int]
17
+ usage: NotRequired[UsageInfoTypedDict]
18
+
19
+
20
+ class CompletionChunk(BaseModel):
21
+ id: str
22
+ model: str
23
+ choices: List[CompletionResponseStreamChoice]
24
+ object: Optional[str] = None
25
+ created: Optional[int] = None
26
+ usage: Optional[UsageInfo] = None
27
+
@@ -0,0 +1,15 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .completionchunk import CompletionChunk, CompletionChunkTypedDict
5
+ from mistralai_gcp.types import BaseModel
6
+ from typing import TypedDict
7
+
8
+
9
+ class CompletionEventTypedDict(TypedDict):
10
+ data: CompletionChunkTypedDict
11
+
12
+
13
+ class CompletionEvent(BaseModel):
14
+ data: CompletionChunk
15
+
@@ -0,0 +1,53 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .deltamessage import DeltaMessage, DeltaMessageTypedDict
5
+ from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL
6
+ from pydantic import model_serializer
7
+ from typing import Literal, TypedDict
8
+
9
+
10
+ FinishReason = Literal["stop", "length", "error", "tool_calls"]
11
+
12
+ class CompletionResponseStreamChoiceTypedDict(TypedDict):
13
+ index: int
14
+ delta: DeltaMessageTypedDict
15
+ finish_reason: Nullable[FinishReason]
16
+
17
+
18
+ class CompletionResponseStreamChoice(BaseModel):
19
+ index: int
20
+ delta: DeltaMessage
21
+ finish_reason: Nullable[FinishReason]
22
+
23
+ @model_serializer(mode="wrap")
24
+ def serialize_model(self, handler):
25
+ optional_fields = []
26
+ nullable_fields = ["finish_reason"]
27
+ null_default_fields = []
28
+
29
+ serialized = handler(self)
30
+
31
+ m = {}
32
+
33
+ for n, f in self.model_fields.items():
34
+ k = f.alias or n
35
+ val = serialized.get(k)
36
+
37
+ if val is not None and val != UNSET_SENTINEL:
38
+ m[k] = val
39
+ elif val != UNSET_SENTINEL and (
40
+ not k in optional_fields
41
+ or (
42
+ k in optional_fields
43
+ and k in nullable_fields
44
+ and (
45
+ self.__pydantic_fields_set__.intersection({n})
46
+ or k in null_default_fields
47
+ ) # pylint: disable=no-member
48
+ )
49
+ ):
50
+ m[k] = val
51
+
52
+ return m
53
+
@@ -0,0 +1,17 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai_gcp.types import BaseModel
5
+ import pydantic
6
+ from typing import Final, Optional, TypedDict
7
+ from typing_extensions import Annotated
8
+
9
+
10
+ class ContentChunkTypedDict(TypedDict):
11
+ text: str
12
+
13
+
14
+ class ContentChunk(BaseModel):
15
+ text: str
16
+ TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore
17
+
@@ -0,0 +1,52 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .toolcall import ToolCall, ToolCallTypedDict
5
+ from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
6
+ from pydantic import model_serializer
7
+ from typing import Optional, TypedDict
8
+ from typing_extensions import NotRequired
9
+
10
+
11
+ class DeltaMessageTypedDict(TypedDict):
12
+ role: NotRequired[str]
13
+ content: NotRequired[str]
14
+ tool_calls: NotRequired[Nullable[ToolCallTypedDict]]
15
+
16
+
17
+ class DeltaMessage(BaseModel):
18
+ role: Optional[str] = None
19
+ content: Optional[str] = None
20
+ tool_calls: OptionalNullable[ToolCall] = UNSET
21
+
22
+ @model_serializer(mode="wrap")
23
+ def serialize_model(self, handler):
24
+ optional_fields = ["role", "content", "tool_calls"]
25
+ nullable_fields = ["tool_calls"]
26
+ null_default_fields = []
27
+
28
+ serialized = handler(self)
29
+
30
+ m = {}
31
+
32
+ for n, f in self.model_fields.items():
33
+ k = f.alias or n
34
+ val = serialized.get(k)
35
+
36
+ if val is not None and val != UNSET_SENTINEL:
37
+ m[k] = val
38
+ elif val != UNSET_SENTINEL and (
39
+ not k in optional_fields
40
+ or (
41
+ k in optional_fields
42
+ and k in nullable_fields
43
+ and (
44
+ self.__pydantic_fields_set__.intersection({n})
45
+ or k in null_default_fields
46
+ ) # pylint: disable=no-member
47
+ )
48
+ ):
49
+ m[k] = val
50
+
51
+ return m
52
+
@@ -0,0 +1,99 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
+ from pydantic import model_serializer
6
+ from typing import List, Optional, TypedDict, Union
7
+ from typing_extensions import NotRequired
8
+
9
+
10
+ class FIMCompletionRequestTypedDict(TypedDict):
11
+ prompt: str
12
+ r"""The text/code to complete."""
13
+ model: NotRequired[Nullable[str]]
14
+ r"""ID of the model to use. Only compatible for now with:
15
+ - `codestral-2405`
16
+ - `codestral-latest`
17
+ """
18
+ temperature: NotRequired[float]
19
+ r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
20
+ top_p: NotRequired[float]
21
+ r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
22
+ max_tokens: NotRequired[Nullable[int]]
23
+ r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
24
+ min_tokens: NotRequired[Nullable[int]]
25
+ r"""The minimum number of tokens to generate in the completion."""
26
+ stream: NotRequired[bool]
27
+ r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
28
+ stop: NotRequired[FIMCompletionRequestStopTypedDict]
29
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
30
+ random_seed: NotRequired[Nullable[int]]
31
+ r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
32
+ suffix: NotRequired[Nullable[str]]
33
+ r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
34
+
35
+
36
+ class FIMCompletionRequest(BaseModel):
37
+ prompt: str
38
+ r"""The text/code to complete."""
39
+ model: OptionalNullable[str] = UNSET
40
+ r"""ID of the model to use. Only compatible for now with:
41
+ - `codestral-2405`
42
+ - `codestral-latest`
43
+ """
44
+ temperature: Optional[float] = 0.7
45
+ r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
46
+ top_p: Optional[float] = 1
47
+ r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
48
+ max_tokens: OptionalNullable[int] = UNSET
49
+ r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
50
+ min_tokens: OptionalNullable[int] = UNSET
51
+ r"""The minimum number of tokens to generate in the completion."""
52
+ stream: Optional[bool] = False
53
+ r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
54
+ stop: Optional[FIMCompletionRequestStop] = None
55
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
56
+ random_seed: OptionalNullable[int] = UNSET
57
+ r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
58
+ suffix: OptionalNullable[str] = UNSET
59
+ r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
60
+
61
+ @model_serializer(mode="wrap")
62
+ def serialize_model(self, handler):
63
+ optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
64
+ nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
65
+ null_default_fields = []
66
+
67
+ serialized = handler(self)
68
+
69
+ m = {}
70
+
71
+ for n, f in self.model_fields.items():
72
+ k = f.alias or n
73
+ val = serialized.get(k)
74
+
75
+ if val is not None and val != UNSET_SENTINEL:
76
+ m[k] = val
77
+ elif val != UNSET_SENTINEL and (
78
+ not k in optional_fields
79
+ or (
80
+ k in optional_fields
81
+ and k in nullable_fields
82
+ and (
83
+ self.__pydantic_fields_set__.intersection({n})
84
+ or k in null_default_fields
85
+ ) # pylint: disable=no-member
86
+ )
87
+ ):
88
+ m[k] = val
89
+
90
+ return m
91
+
92
+
93
+ FIMCompletionRequestStopTypedDict = Union[str, List[str]]
94
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
95
+
96
+
97
+ FIMCompletionRequestStop = Union[str, List[str]]
98
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
99
+
@@ -0,0 +1,27 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
5
+ from .usageinfo import UsageInfo, UsageInfoTypedDict
6
+ from mistralai_gcp.types import BaseModel
7
+ from typing import List, Optional, TypedDict
8
+ from typing_extensions import NotRequired
9
+
10
+
11
+ class FIMCompletionResponseTypedDict(TypedDict):
12
+ id: str
13
+ object: str
14
+ model: str
15
+ usage: UsageInfoTypedDict
16
+ created: NotRequired[int]
17
+ choices: NotRequired[List[ChatCompletionChoiceTypedDict]]
18
+
19
+
20
+ class FIMCompletionResponse(BaseModel):
21
+ id: str
22
+ object: str
23
+ model: str
24
+ usage: UsageInfo
25
+ created: Optional[int] = None
26
+ choices: Optional[List[ChatCompletionChoice]] = None
27
+
@@ -0,0 +1,97 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
+ from pydantic import model_serializer
6
+ from typing import List, Optional, TypedDict, Union
7
+ from typing_extensions import NotRequired
8
+
9
+
10
+ class FIMCompletionStreamRequestTypedDict(TypedDict):
11
+ prompt: str
12
+ r"""The text/code to complete."""
13
+ model: NotRequired[Nullable[str]]
14
+ r"""ID of the model to use. Only compatible for now with:
15
+ - `codestral-2405`
16
+ - `codestral-latest`
17
+ """
18
+ temperature: NotRequired[float]
19
+ r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
20
+ top_p: NotRequired[float]
21
+ r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
22
+ max_tokens: NotRequired[Nullable[int]]
23
+ r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
24
+ min_tokens: NotRequired[Nullable[int]]
25
+ r"""The minimum number of tokens to generate in the completion."""
26
+ stream: NotRequired[bool]
27
+ stop: NotRequired[FIMCompletionStreamRequestStopTypedDict]
28
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
29
+ random_seed: NotRequired[Nullable[int]]
30
+ r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
31
+ suffix: NotRequired[Nullable[str]]
32
+ r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
33
+
34
+
35
+ class FIMCompletionStreamRequest(BaseModel):
36
+ prompt: str
37
+ r"""The text/code to complete."""
38
+ model: OptionalNullable[str] = UNSET
39
+ r"""ID of the model to use. Only compatible for now with:
40
+ - `codestral-2405`
41
+ - `codestral-latest`
42
+ """
43
+ temperature: Optional[float] = 0.7
44
+ r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
45
+ top_p: Optional[float] = 1
46
+ r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
47
+ max_tokens: OptionalNullable[int] = UNSET
48
+ r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
49
+ min_tokens: OptionalNullable[int] = UNSET
50
+ r"""The minimum number of tokens to generate in the completion."""
51
+ stream: Optional[bool] = True
52
+ stop: Optional[FIMCompletionStreamRequestStop] = None
53
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
54
+ random_seed: OptionalNullable[int] = UNSET
55
+ r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
56
+ suffix: OptionalNullable[str] = UNSET
57
+ r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
58
+
59
+ @model_serializer(mode="wrap")
60
+ def serialize_model(self, handler):
61
+ optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
62
+ nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
63
+ null_default_fields = []
64
+
65
+ serialized = handler(self)
66
+
67
+ m = {}
68
+
69
+ for n, f in self.model_fields.items():
70
+ k = f.alias or n
71
+ val = serialized.get(k)
72
+
73
+ if val is not None and val != UNSET_SENTINEL:
74
+ m[k] = val
75
+ elif val != UNSET_SENTINEL and (
76
+ not k in optional_fields
77
+ or (
78
+ k in optional_fields
79
+ and k in nullable_fields
80
+ and (
81
+ self.__pydantic_fields_set__.intersection({n})
82
+ or k in null_default_fields
83
+ ) # pylint: disable=no-member
84
+ )
85
+ ):
86
+ m[k] = val
87
+
88
+ return m
89
+
90
+
91
+ FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]]
92
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
93
+
94
+
95
+ FIMCompletionStreamRequestStop = Union[str, List[str]]
96
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
97
+