mistralai 0.4.2__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (246) hide show
  1. mistralai/__init__.py +5 -0
  2. mistralai/_hooks/__init__.py +5 -0
  3. mistralai/_hooks/custom_user_agent.py +16 -0
  4. mistralai/_hooks/deprecation_warning.py +26 -0
  5. mistralai/_hooks/registration.py +17 -0
  6. mistralai/_hooks/sdkhooks.py +57 -0
  7. mistralai/_hooks/types.py +76 -0
  8. mistralai/agents.py +434 -0
  9. mistralai/async_client.py +5 -413
  10. mistralai/basesdk.py +253 -0
  11. mistralai/chat.py +470 -0
  12. mistralai/client.py +5 -414
  13. mistralai/embeddings.py +182 -0
  14. mistralai/files.py +600 -84
  15. mistralai/fim.py +438 -0
  16. mistralai/fine_tuning.py +16 -0
  17. mistralai/httpclient.py +78 -0
  18. mistralai/jobs.py +822 -150
  19. mistralai/models/__init__.py +82 -0
  20. mistralai/models/agentscompletionrequest.py +96 -0
  21. mistralai/models/agentscompletionstreamrequest.py +92 -0
  22. mistralai/models/archiveftmodelout.py +19 -0
  23. mistralai/models/assistantmessage.py +53 -0
  24. mistralai/models/chatcompletionchoice.py +22 -0
  25. mistralai/models/chatcompletionrequest.py +109 -0
  26. mistralai/models/chatcompletionresponse.py +27 -0
  27. mistralai/models/chatcompletionstreamrequest.py +107 -0
  28. mistralai/models/checkpointout.py +25 -0
  29. mistralai/models/completionchunk.py +27 -0
  30. mistralai/models/completionevent.py +15 -0
  31. mistralai/models/completionresponsestreamchoice.py +48 -0
  32. mistralai/models/contentchunk.py +17 -0
  33. mistralai/models/delete_model_v1_models_model_id_deleteop.py +18 -0
  34. mistralai/models/deletefileout.py +24 -0
  35. mistralai/models/deletemodelout.py +25 -0
  36. mistralai/models/deltamessage.py +47 -0
  37. mistralai/models/detailedjobout.py +91 -0
  38. mistralai/models/embeddingrequest.py +61 -0
  39. mistralai/models/embeddingresponse.py +24 -0
  40. mistralai/models/embeddingresponsedata.py +19 -0
  41. mistralai/models/eventout.py +50 -0
  42. mistralai/models/files_api_routes_delete_fileop.py +16 -0
  43. mistralai/models/files_api_routes_retrieve_fileop.py +16 -0
  44. mistralai/models/files_api_routes_upload_fileop.py +51 -0
  45. mistralai/models/fileschema.py +71 -0
  46. mistralai/models/fimcompletionrequest.py +94 -0
  47. mistralai/models/fimcompletionresponse.py +27 -0
  48. mistralai/models/fimcompletionstreamrequest.py +92 -0
  49. mistralai/models/finetuneablemodel.py +8 -0
  50. mistralai/models/ftmodelcapabilitiesout.py +21 -0
  51. mistralai/models/ftmodelout.py +65 -0
  52. mistralai/models/function.py +19 -0
  53. mistralai/models/functioncall.py +22 -0
  54. mistralai/models/githubrepositoryin.py +52 -0
  55. mistralai/models/githubrepositoryout.py +52 -0
  56. mistralai/models/httpvalidationerror.py +23 -0
  57. mistralai/models/jobin.py +73 -0
  58. mistralai/models/jobmetadataout.py +54 -0
  59. mistralai/models/jobout.py +107 -0
  60. mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +18 -0
  61. mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +18 -0
  62. mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +15 -0
  63. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +18 -0
  64. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +81 -0
  65. mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +16 -0
  66. mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +18 -0
  67. mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +21 -0
  68. mistralai/models/jobsout.py +20 -0
  69. mistralai/models/legacyjobmetadataout.py +80 -0
  70. mistralai/models/listfilesout.py +17 -0
  71. mistralai/models/metricout.py +50 -0
  72. mistralai/models/modelcapabilities.py +21 -0
  73. mistralai/models/modelcard.py +66 -0
  74. mistralai/models/modellist.py +18 -0
  75. mistralai/models/responseformat.py +18 -0
  76. mistralai/models/retrieve_model_v1_models_model_id_getop.py +18 -0
  77. mistralai/models/retrievefileout.py +71 -0
  78. mistralai/models/sampletype.py +7 -0
  79. mistralai/models/sdkerror.py +22 -0
  80. mistralai/models/security.py +16 -0
  81. mistralai/models/source.py +7 -0
  82. mistralai/models/systemmessage.py +26 -0
  83. mistralai/models/textchunk.py +17 -0
  84. mistralai/models/tool.py +18 -0
  85. mistralai/models/toolcall.py +20 -0
  86. mistralai/models/toolmessage.py +50 -0
  87. mistralai/models/trainingfile.py +17 -0
  88. mistralai/models/trainingparameters.py +48 -0
  89. mistralai/models/trainingparametersin.py +56 -0
  90. mistralai/models/unarchiveftmodelout.py +19 -0
  91. mistralai/models/updateftmodelin.py +44 -0
  92. mistralai/models/uploadfileout.py +71 -0
  93. mistralai/models/usageinfo.py +18 -0
  94. mistralai/models/usermessage.py +26 -0
  95. mistralai/models/validationerror.py +24 -0
  96. mistralai/models/wandbintegration.py +56 -0
  97. mistralai/models/wandbintegrationout.py +52 -0
  98. mistralai/models_.py +928 -0
  99. mistralai/py.typed +1 -0
  100. mistralai/sdk.py +119 -0
  101. mistralai/sdkconfiguration.py +54 -0
  102. mistralai/types/__init__.py +21 -0
  103. mistralai/types/basemodel.py +39 -0
  104. mistralai/utils/__init__.py +86 -0
  105. mistralai/utils/annotations.py +19 -0
  106. mistralai/utils/enums.py +34 -0
  107. mistralai/utils/eventstreaming.py +178 -0
  108. mistralai/utils/forms.py +207 -0
  109. mistralai/utils/headers.py +136 -0
  110. mistralai/utils/logger.py +16 -0
  111. mistralai/utils/metadata.py +118 -0
  112. mistralai/utils/queryparams.py +203 -0
  113. mistralai/utils/requestbodies.py +66 -0
  114. mistralai/utils/retries.py +216 -0
  115. mistralai/utils/security.py +185 -0
  116. mistralai/utils/serializers.py +181 -0
  117. mistralai/utils/url.py +150 -0
  118. mistralai/utils/values.py +128 -0
  119. {mistralai-0.4.2.dist-info → mistralai-1.0.0.dist-info}/LICENSE +1 -1
  120. mistralai-1.0.0.dist-info/METADATA +695 -0
  121. mistralai-1.0.0.dist-info/RECORD +235 -0
  122. mistralai_azure/__init__.py +5 -0
  123. mistralai_azure/_hooks/__init__.py +5 -0
  124. mistralai_azure/_hooks/custom_user_agent.py +16 -0
  125. mistralai_azure/_hooks/registration.py +15 -0
  126. mistralai_azure/_hooks/sdkhooks.py +57 -0
  127. mistralai_azure/_hooks/types.py +76 -0
  128. mistralai_azure/basesdk.py +253 -0
  129. mistralai_azure/chat.py +470 -0
  130. mistralai_azure/httpclient.py +78 -0
  131. mistralai_azure/models/__init__.py +28 -0
  132. mistralai_azure/models/assistantmessage.py +53 -0
  133. mistralai_azure/models/chatcompletionchoice.py +22 -0
  134. mistralai_azure/models/chatcompletionrequest.py +109 -0
  135. mistralai_azure/models/chatcompletionresponse.py +27 -0
  136. mistralai_azure/models/chatcompletionstreamrequest.py +107 -0
  137. mistralai_azure/models/completionchunk.py +27 -0
  138. mistralai_azure/models/completionevent.py +15 -0
  139. mistralai_azure/models/completionresponsestreamchoice.py +48 -0
  140. mistralai_azure/models/contentchunk.py +17 -0
  141. mistralai_azure/models/deltamessage.py +47 -0
  142. mistralai_azure/models/function.py +19 -0
  143. mistralai_azure/models/functioncall.py +22 -0
  144. mistralai_azure/models/httpvalidationerror.py +23 -0
  145. mistralai_azure/models/responseformat.py +18 -0
  146. mistralai_azure/models/sdkerror.py +22 -0
  147. mistralai_azure/models/security.py +16 -0
  148. mistralai_azure/models/systemmessage.py +26 -0
  149. mistralai_azure/models/textchunk.py +17 -0
  150. mistralai_azure/models/tool.py +18 -0
  151. mistralai_azure/models/toolcall.py +20 -0
  152. mistralai_azure/models/toolmessage.py +50 -0
  153. mistralai_azure/models/usageinfo.py +18 -0
  154. mistralai_azure/models/usermessage.py +26 -0
  155. mistralai_azure/models/validationerror.py +24 -0
  156. mistralai_azure/py.typed +1 -0
  157. mistralai_azure/sdk.py +107 -0
  158. mistralai_azure/sdkconfiguration.py +54 -0
  159. mistralai_azure/types/__init__.py +21 -0
  160. mistralai_azure/types/basemodel.py +39 -0
  161. mistralai_azure/utils/__init__.py +84 -0
  162. mistralai_azure/utils/annotations.py +19 -0
  163. mistralai_azure/utils/enums.py +34 -0
  164. mistralai_azure/utils/eventstreaming.py +178 -0
  165. mistralai_azure/utils/forms.py +207 -0
  166. mistralai_azure/utils/headers.py +136 -0
  167. mistralai_azure/utils/logger.py +16 -0
  168. mistralai_azure/utils/metadata.py +118 -0
  169. mistralai_azure/utils/queryparams.py +203 -0
  170. mistralai_azure/utils/requestbodies.py +66 -0
  171. mistralai_azure/utils/retries.py +216 -0
  172. mistralai_azure/utils/security.py +168 -0
  173. mistralai_azure/utils/serializers.py +181 -0
  174. mistralai_azure/utils/url.py +150 -0
  175. mistralai_azure/utils/values.py +128 -0
  176. mistralai_gcp/__init__.py +5 -0
  177. mistralai_gcp/_hooks/__init__.py +5 -0
  178. mistralai_gcp/_hooks/custom_user_agent.py +16 -0
  179. mistralai_gcp/_hooks/registration.py +15 -0
  180. mistralai_gcp/_hooks/sdkhooks.py +57 -0
  181. mistralai_gcp/_hooks/types.py +76 -0
  182. mistralai_gcp/basesdk.py +253 -0
  183. mistralai_gcp/chat.py +458 -0
  184. mistralai_gcp/fim.py +438 -0
  185. mistralai_gcp/httpclient.py +78 -0
  186. mistralai_gcp/models/__init__.py +31 -0
  187. mistralai_gcp/models/assistantmessage.py +53 -0
  188. mistralai_gcp/models/chatcompletionchoice.py +22 -0
  189. mistralai_gcp/models/chatcompletionrequest.py +105 -0
  190. mistralai_gcp/models/chatcompletionresponse.py +27 -0
  191. mistralai_gcp/models/chatcompletionstreamrequest.py +103 -0
  192. mistralai_gcp/models/completionchunk.py +27 -0
  193. mistralai_gcp/models/completionevent.py +15 -0
  194. mistralai_gcp/models/completionresponsestreamchoice.py +48 -0
  195. mistralai_gcp/models/contentchunk.py +17 -0
  196. mistralai_gcp/models/deltamessage.py +47 -0
  197. mistralai_gcp/models/fimcompletionrequest.py +94 -0
  198. mistralai_gcp/models/fimcompletionresponse.py +27 -0
  199. mistralai_gcp/models/fimcompletionstreamrequest.py +92 -0
  200. mistralai_gcp/models/function.py +19 -0
  201. mistralai_gcp/models/functioncall.py +22 -0
  202. mistralai_gcp/models/httpvalidationerror.py +23 -0
  203. mistralai_gcp/models/responseformat.py +18 -0
  204. mistralai_gcp/models/sdkerror.py +22 -0
  205. mistralai_gcp/models/security.py +16 -0
  206. mistralai_gcp/models/systemmessage.py +26 -0
  207. mistralai_gcp/models/textchunk.py +17 -0
  208. mistralai_gcp/models/tool.py +18 -0
  209. mistralai_gcp/models/toolcall.py +20 -0
  210. mistralai_gcp/models/toolmessage.py +50 -0
  211. mistralai_gcp/models/usageinfo.py +18 -0
  212. mistralai_gcp/models/usermessage.py +26 -0
  213. mistralai_gcp/models/validationerror.py +24 -0
  214. mistralai_gcp/py.typed +1 -0
  215. mistralai_gcp/sdk.py +174 -0
  216. mistralai_gcp/sdkconfiguration.py +54 -0
  217. mistralai_gcp/types/__init__.py +21 -0
  218. mistralai_gcp/types/basemodel.py +39 -0
  219. mistralai_gcp/utils/__init__.py +84 -0
  220. mistralai_gcp/utils/annotations.py +19 -0
  221. mistralai_gcp/utils/enums.py +34 -0
  222. mistralai_gcp/utils/eventstreaming.py +178 -0
  223. mistralai_gcp/utils/forms.py +207 -0
  224. mistralai_gcp/utils/headers.py +136 -0
  225. mistralai_gcp/utils/logger.py +16 -0
  226. mistralai_gcp/utils/metadata.py +118 -0
  227. mistralai_gcp/utils/queryparams.py +203 -0
  228. mistralai_gcp/utils/requestbodies.py +66 -0
  229. mistralai_gcp/utils/retries.py +216 -0
  230. mistralai_gcp/utils/security.py +168 -0
  231. mistralai_gcp/utils/serializers.py +181 -0
  232. mistralai_gcp/utils/url.py +150 -0
  233. mistralai_gcp/utils/values.py +128 -0
  234. py.typed +1 -0
  235. mistralai/client_base.py +0 -211
  236. mistralai/constants.py +0 -5
  237. mistralai/exceptions.py +0 -54
  238. mistralai/models/chat_completion.py +0 -93
  239. mistralai/models/common.py +0 -9
  240. mistralai/models/embeddings.py +0 -19
  241. mistralai/models/files.py +0 -23
  242. mistralai/models/jobs.py +0 -100
  243. mistralai/models/models.py +0 -39
  244. mistralai-0.4.2.dist-info/METADATA +0 -82
  245. mistralai-0.4.2.dist-info/RECORD +0 -20
  246. {mistralai-0.4.2.dist-info → mistralai-1.0.0.dist-info}/WHEEL +0 -0
mistralai/client.py CHANGED
@@ -1,423 +1,14 @@
1
- import posixpath
2
- import time
3
- from json import JSONDecodeError
4
- from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
1
+ from typing import Optional
5
2
 
6
- from httpx import Client, ConnectError, HTTPTransport, RequestError, Response
3
+ MIGRATION_MESSAGE = "This client is deprecated. To migrate to the new client, please refer to this guide: https://github.com/mistralai/client-python/blob/main/MIGRATION.md. If you need to use this client anyway, pin your version to 0.4.2."
7
4
 
8
- from mistralai.client_base import ClientBase
9
- from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES
10
- from mistralai.exceptions import (
11
- MistralAPIException,
12
- MistralAPIStatusException,
13
- MistralConnectionException,
14
- MistralException,
15
- )
16
- from mistralai.files import FilesClient
17
- from mistralai.jobs import JobsClient
18
- from mistralai.models.chat_completion import (
19
- ChatCompletionResponse,
20
- ChatCompletionStreamResponse,
21
- ResponseFormat,
22
- ToolChoice,
23
- )
24
- from mistralai.models.embeddings import EmbeddingResponse
25
- from mistralai.models.models import ModelDeleted, ModelList
26
-
27
-
28
- class MistralClient(ClientBase):
29
- """
30
- Synchronous wrapper around the async client
31
- """
32
5
 
6
+ class MistralClient:
33
7
  def __init__(
34
8
  self,
35
9
  api_key: Optional[str] = None,
36
- endpoint: str = ENDPOINT,
10
+ endpoint: str = "",
37
11
  max_retries: int = 5,
38
12
  timeout: int = 120,
39
13
  ):
40
- super().__init__(endpoint, api_key, max_retries, timeout)
41
-
42
- self._client = Client(
43
- follow_redirects=True,
44
- timeout=self._timeout,
45
- transport=HTTPTransport(retries=self._max_retries),
46
- )
47
- self.files = FilesClient(self)
48
- self.jobs = JobsClient(self)
49
-
50
- def __del__(self) -> None:
51
- self._client.close()
52
-
53
- def _check_response_status_codes(self, response: Response) -> None:
54
- if response.status_code in RETRY_STATUS_CODES:
55
- raise MistralAPIStatusException.from_response(
56
- response,
57
- message=f"Status: {response.status_code}. Message: {response.text}",
58
- )
59
- elif 400 <= response.status_code < 500:
60
- if response.stream:
61
- response.read()
62
- raise MistralAPIException.from_response(
63
- response,
64
- message=f"Status: {response.status_code}. Message: {response.text}",
65
- )
66
- elif response.status_code >= 500:
67
- if response.stream:
68
- response.read()
69
- raise MistralException(
70
- message=f"Status: {response.status_code}. Message: {response.text}",
71
- )
72
-
73
- def _check_streaming_response(self, response: Response) -> None:
74
- self._check_response_status_codes(response)
75
-
76
- def _check_response(self, response: Response) -> Dict[str, Any]:
77
- self._check_response_status_codes(response)
78
-
79
- json_response: Dict[str, Any] = response.json()
80
-
81
- if "object" not in json_response:
82
- raise MistralException(message=f"Unexpected response: {json_response}")
83
- if "error" == json_response["object"]: # has errors
84
- raise MistralAPIException.from_response(
85
- response,
86
- message=json_response["message"],
87
- )
88
-
89
- return json_response
90
-
91
- def _request(
92
- self,
93
- method: str,
94
- json: Optional[Dict[str, Any]],
95
- path: str,
96
- stream: bool = False,
97
- attempt: int = 1,
98
- data: Optional[Dict[str, Any]] = None,
99
- check_model_deprecation_headers_callback: Optional[Callable] = None,
100
- **kwargs: Any,
101
- ) -> Iterator[Dict[str, Any]]:
102
- accept_header = "text/event-stream" if stream else "application/json"
103
- headers = {
104
- "Accept": accept_header,
105
- "User-Agent": f"mistral-client-python/{self._version}",
106
- "Authorization": f"Bearer {self._api_key}",
107
- }
108
-
109
- if json is not None:
110
- headers["Content-Type"] = "application/json"
111
-
112
- url = posixpath.join(self._endpoint, path)
113
-
114
- self._logger.debug(f"Sending request: {method} {url} {json}")
115
-
116
- response: Response
117
-
118
- try:
119
- if stream:
120
- with self._client.stream(
121
- method,
122
- url,
123
- headers=headers,
124
- json=json,
125
- data=data,
126
- **kwargs,
127
- ) as response:
128
- if check_model_deprecation_headers_callback:
129
- check_model_deprecation_headers_callback(response.headers)
130
- self._check_streaming_response(response)
131
-
132
- for line in response.iter_lines():
133
- json_streamed_response = self._process_line(line)
134
- if json_streamed_response:
135
- yield json_streamed_response
136
-
137
- else:
138
- response = self._client.request(
139
- method,
140
- url,
141
- headers=headers,
142
- json=json,
143
- data=data,
144
- **kwargs,
145
- )
146
- if check_model_deprecation_headers_callback:
147
- check_model_deprecation_headers_callback(response.headers)
148
- yield self._check_response(response)
149
-
150
- except ConnectError as e:
151
- raise MistralConnectionException(str(e)) from e
152
- except RequestError as e:
153
- raise MistralException(f"Unexpected exception ({e.__class__.__name__}): {e}") from e
154
- except JSONDecodeError as e:
155
- raise MistralAPIException.from_response(
156
- response,
157
- message=f"Failed to decode json body: {response.text}",
158
- ) from e
159
- except MistralAPIStatusException as e:
160
- attempt += 1
161
- if attempt > self._max_retries:
162
- raise MistralAPIStatusException.from_response(response, message=str(e)) from e
163
- backoff = 2.0**attempt # exponential backoff
164
- time.sleep(backoff)
165
-
166
- # Retry as a generator
167
- for r in self._request(method, json, path, stream=stream, attempt=attempt):
168
- yield r
169
-
170
- def chat(
171
- self,
172
- messages: List[Any],
173
- model: Optional[str] = None,
174
- tools: Optional[List[Dict[str, Any]]] = None,
175
- temperature: Optional[float] = None,
176
- max_tokens: Optional[int] = None,
177
- top_p: Optional[float] = None,
178
- random_seed: Optional[int] = None,
179
- safe_mode: bool = False,
180
- safe_prompt: bool = False,
181
- tool_choice: Optional[Union[str, ToolChoice]] = None,
182
- response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None,
183
- ) -> ChatCompletionResponse:
184
- """A chat endpoint that returns a single response.
185
-
186
- Args:
187
- model (str): model the name of the model to chat with, e.g. mistral-tiny
188
- messages (List[Any]): messages an array of messages to chat with, e.g.
189
- [{role: 'user', content: 'What is the best French cheese?'}]
190
- tools (Optional[List[Function]], optional): a list of tools to use.
191
- temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
192
- max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
193
- top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
194
- Defaults to None.
195
- random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
196
- safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False.
197
- safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False.
198
-
199
- Returns:
200
- ChatCompletionResponse: a response object containing the generated text.
201
- """
202
- request = self._make_chat_request(
203
- messages,
204
- model,
205
- tools=tools,
206
- temperature=temperature,
207
- max_tokens=max_tokens,
208
- top_p=top_p,
209
- random_seed=random_seed,
210
- stream=False,
211
- safe_prompt=safe_mode or safe_prompt,
212
- tool_choice=tool_choice,
213
- response_format=response_format,
214
- )
215
-
216
- single_response = self._request(
217
- "post",
218
- request,
219
- "v1/chat/completions",
220
- check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model),
221
- )
222
-
223
- for response in single_response:
224
- return ChatCompletionResponse(**response)
225
-
226
- raise MistralException("No response received")
227
-
228
- def chat_stream(
229
- self,
230
- messages: List[Any],
231
- model: Optional[str] = None,
232
- tools: Optional[List[Dict[str, Any]]] = None,
233
- temperature: Optional[float] = None,
234
- max_tokens: Optional[int] = None,
235
- top_p: Optional[float] = None,
236
- random_seed: Optional[int] = None,
237
- safe_mode: bool = False,
238
- safe_prompt: bool = False,
239
- tool_choice: Optional[Union[str, ToolChoice]] = None,
240
- response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None,
241
- ) -> Iterable[ChatCompletionStreamResponse]:
242
- """A chat endpoint that streams responses.
243
-
244
- Args:
245
- model (str): model the name of the model to chat with, e.g. mistral-tiny
246
- messages (List[Any]): messages an array of messages to chat with, e.g.
247
- [{role: 'user', content: 'What is the best French cheese?'}]
248
- tools (Optional[List[Function]], optional): a list of tools to use.
249
- temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
250
- max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
251
- top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
252
- Defaults to None.
253
- random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
254
- safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False.
255
- safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False.
256
-
257
- Returns:
258
- Iterable[ChatCompletionStreamResponse]:
259
- A generator that yields ChatCompletionStreamResponse objects.
260
- """
261
- request = self._make_chat_request(
262
- messages,
263
- model,
264
- tools=tools,
265
- temperature=temperature,
266
- max_tokens=max_tokens,
267
- top_p=top_p,
268
- random_seed=random_seed,
269
- stream=True,
270
- safe_prompt=safe_mode or safe_prompt,
271
- tool_choice=tool_choice,
272
- response_format=response_format,
273
- )
274
-
275
- response = self._request(
276
- "post",
277
- request,
278
- "v1/chat/completions",
279
- stream=True,
280
- check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model),
281
- )
282
-
283
- for json_streamed_response in response:
284
- yield ChatCompletionStreamResponse(**json_streamed_response)
285
-
286
- def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingResponse:
287
- """An embeddings endpoint that returns embeddings for a single, or batch of inputs
288
-
289
- Args:
290
- model (str): The embedding model to use, e.g. mistral-embed
291
- input (Union[str, List[str]]): The input to embed,
292
- e.g. ['What is the best French cheese?']
293
-
294
- Returns:
295
- EmbeddingResponse: A response object containing the embeddings.
296
- """
297
- request = {"model": model, "input": input}
298
- singleton_response = self._request(
299
- "post",
300
- request,
301
- "v1/embeddings",
302
- check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model),
303
- )
304
-
305
- for response in singleton_response:
306
- return EmbeddingResponse(**response)
307
-
308
- raise MistralException("No response received")
309
-
310
- def list_models(self) -> ModelList:
311
- """Returns a list of the available models
312
-
313
- Returns:
314
- ModelList: A response object containing the list of models.
315
- """
316
- singleton_response = self._request("get", {}, "v1/models")
317
-
318
- for response in singleton_response:
319
- return ModelList(**response)
320
-
321
- raise MistralException("No response received")
322
-
323
- def delete_model(self, model_id: str) -> ModelDeleted:
324
- single_response = self._request("delete", {}, f"v1/models/{model_id}")
325
-
326
- for response in single_response:
327
- return ModelDeleted(**response)
328
-
329
- raise MistralException("No response received")
330
-
331
- def completion(
332
- self,
333
- model: str,
334
- prompt: str,
335
- suffix: Optional[str] = None,
336
- temperature: Optional[float] = None,
337
- max_tokens: Optional[int] = None,
338
- top_p: Optional[float] = None,
339
- random_seed: Optional[int] = None,
340
- stop: Optional[List[str]] = None,
341
- ) -> ChatCompletionResponse:
342
- """A completion endpoint that returns a single response.
343
-
344
- Args:
345
- model (str): model the name of the model to get completion with, e.g. codestral-latest
346
- prompt (str): the prompt to complete
347
- suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion
348
- temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
349
- max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
350
- top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
351
- Defaults to None.
352
- random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
353
- stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n']
354
-
355
- Returns:
356
- Dict[str, Any]: a response object containing the generated text.
357
- """
358
- request = self._make_completion_request(
359
- prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop
360
- )
361
-
362
- single_response = self._request(
363
- "post",
364
- request,
365
- "v1/fim/completions",
366
- stream=False,
367
- check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model),
368
- )
369
-
370
- for response in single_response:
371
- return ChatCompletionResponse(**response)
372
-
373
- raise MistralException("No response received")
374
-
375
- def completion_stream(
376
- self,
377
- model: str,
378
- prompt: str,
379
- suffix: Optional[str] = None,
380
- temperature: Optional[float] = None,
381
- max_tokens: Optional[int] = None,
382
- top_p: Optional[float] = None,
383
- random_seed: Optional[int] = None,
384
- stop: Optional[List[str]] = None,
385
- ) -> Iterable[ChatCompletionStreamResponse]:
386
- """An asynchronous completion endpoint that streams responses.
387
-
388
- Args:
389
- model (str): model the name of the model to get completions with, e.g. codestral-latest
390
- prompt (str): the prompt to complete
391
- suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion
392
- temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
393
- max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
394
- top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
395
- Defaults to None.
396
- random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
397
- stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n']
398
-
399
- Returns:
400
- Iterable[Dict[str, Any]]: a generator that yields response objects containing the generated text.
401
- """
402
- request = self._make_completion_request(
403
- prompt,
404
- model,
405
- suffix,
406
- temperature,
407
- max_tokens,
408
- top_p,
409
- random_seed,
410
- stop,
411
- stream=True,
412
- )
413
-
414
- response = self._request(
415
- "post",
416
- request,
417
- "v1/fim/completions",
418
- stream=True,
419
- check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model),
420
- )
421
-
422
- for json_streamed_response in response:
423
- yield ChatCompletionStreamResponse(**json_streamed_response)
14
+ raise NotImplementedError(MIGRATION_MESSAGE)
@@ -0,0 +1,182 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from .basesdk import BaseSDK
4
+ from mistralai import models, utils
5
+ from mistralai._hooks import HookContext
6
+ from mistralai.types import OptionalNullable, UNSET
7
+ from mistralai.utils import get_security_from_env
8
+ from typing import Any, Optional, Union
9
+
10
+ class Embeddings(BaseSDK):
11
+ r"""Embeddings API."""
12
+
13
+
14
+ def create(
15
+ self, *,
16
+ inputs: Union[models.Inputs, models.InputsTypedDict],
17
+ model: str,
18
+ encoding_format: OptionalNullable[str] = UNSET,
19
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
20
+ server_url: Optional[str] = None,
21
+ timeout_ms: Optional[int] = None,
22
+ ) -> Optional[models.EmbeddingResponse]:
23
+ r"""Embeddings
24
+
25
+ Embeddings
26
+
27
+ :param inputs: Text to embed.
28
+ :param model: ID of the model to use.
29
+ :param encoding_format: The format to return the embeddings in.
30
+ :param retries: Override the default retry configuration for this method
31
+ :param server_url: Override the default server URL for this method
32
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
33
+ """
34
+ base_url = None
35
+ url_variables = None
36
+ if timeout_ms is None:
37
+ timeout_ms = self.sdk_configuration.timeout_ms
38
+
39
+ if server_url is not None:
40
+ base_url = server_url
41
+
42
+ request = models.EmbeddingRequest(
43
+ inputs=inputs,
44
+ model=model,
45
+ encoding_format=encoding_format,
46
+ )
47
+
48
+ req = self.build_request(
49
+ method="POST",
50
+ path="/v1/embeddings",
51
+ base_url=base_url,
52
+ url_variables=url_variables,
53
+ request=request,
54
+ request_body_required=True,
55
+ request_has_path_params=False,
56
+ request_has_query_params=True,
57
+ user_agent_header="user-agent",
58
+ accept_header_value="application/json",
59
+ security=self.sdk_configuration.security,
60
+ get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.EmbeddingRequest),
61
+ timeout_ms=timeout_ms,
62
+ )
63
+
64
+ if retries == UNSET:
65
+ if self.sdk_configuration.retry_config is not UNSET:
66
+ retries = self.sdk_configuration.retry_config
67
+
68
+ retry_config = None
69
+ if isinstance(retries, utils.RetryConfig):
70
+ retry_config = (retries, [
71
+ "429",
72
+ "500",
73
+ "502",
74
+ "503",
75
+ "504"
76
+ ])
77
+
78
+ http_res = self.do_request(
79
+ hook_ctx=HookContext(operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)),
80
+ request=req,
81
+ error_status_codes=["422","4XX","5XX"],
82
+ retry_config=retry_config
83
+ )
84
+
85
+ data: Any = None
86
+ if utils.match_response(http_res, "200", "application/json"):
87
+ return utils.unmarshal_json(http_res.text, Optional[models.EmbeddingResponse])
88
+ if utils.match_response(http_res, "422", "application/json"):
89
+ data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData)
90
+ raise models.HTTPValidationError(data=data)
91
+ if utils.match_response(http_res, ["4XX","5XX"], "*"):
92
+ raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res)
93
+
94
+ content_type = http_res.headers.get("Content-Type")
95
+ raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res)
96
+
97
+
98
+
99
+ async def create_async(
100
+ self, *,
101
+ inputs: Union[models.Inputs, models.InputsTypedDict],
102
+ model: str,
103
+ encoding_format: OptionalNullable[str] = UNSET,
104
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
105
+ server_url: Optional[str] = None,
106
+ timeout_ms: Optional[int] = None,
107
+ ) -> Optional[models.EmbeddingResponse]:
108
+ r"""Embeddings
109
+
110
+ Embeddings
111
+
112
+ :param inputs: Text to embed.
113
+ :param model: ID of the model to use.
114
+ :param encoding_format: The format to return the embeddings in.
115
+ :param retries: Override the default retry configuration for this method
116
+ :param server_url: Override the default server URL for this method
117
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
118
+ """
119
+ base_url = None
120
+ url_variables = None
121
+ if timeout_ms is None:
122
+ timeout_ms = self.sdk_configuration.timeout_ms
123
+
124
+ if server_url is not None:
125
+ base_url = server_url
126
+
127
+ request = models.EmbeddingRequest(
128
+ inputs=inputs,
129
+ model=model,
130
+ encoding_format=encoding_format,
131
+ )
132
+
133
+ req = self.build_request(
134
+ method="POST",
135
+ path="/v1/embeddings",
136
+ base_url=base_url,
137
+ url_variables=url_variables,
138
+ request=request,
139
+ request_body_required=True,
140
+ request_has_path_params=False,
141
+ request_has_query_params=True,
142
+ user_agent_header="user-agent",
143
+ accept_header_value="application/json",
144
+ security=self.sdk_configuration.security,
145
+ get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.EmbeddingRequest),
146
+ timeout_ms=timeout_ms,
147
+ )
148
+
149
+ if retries == UNSET:
150
+ if self.sdk_configuration.retry_config is not UNSET:
151
+ retries = self.sdk_configuration.retry_config
152
+
153
+ retry_config = None
154
+ if isinstance(retries, utils.RetryConfig):
155
+ retry_config = (retries, [
156
+ "429",
157
+ "500",
158
+ "502",
159
+ "503",
160
+ "504"
161
+ ])
162
+
163
+ http_res = await self.do_request_async(
164
+ hook_ctx=HookContext(operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)),
165
+ request=req,
166
+ error_status_codes=["422","4XX","5XX"],
167
+ retry_config=retry_config
168
+ )
169
+
170
+ data: Any = None
171
+ if utils.match_response(http_res, "200", "application/json"):
172
+ return utils.unmarshal_json(http_res.text, Optional[models.EmbeddingResponse])
173
+ if utils.match_response(http_res, "422", "application/json"):
174
+ data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData)
175
+ raise models.HTTPValidationError(data=data)
176
+ if utils.match_response(http_res, ["4XX","5XX"], "*"):
177
+ raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res)
178
+
179
+ content_type = http_res.headers.get("Content-Type")
180
+ raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res)
181
+
182
+