mistralai 0.4.2__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/__init__.py +5 -0
- mistralai/_hooks/__init__.py +5 -0
- mistralai/_hooks/custom_user_agent.py +16 -0
- mistralai/_hooks/deprecation_warning.py +26 -0
- mistralai/_hooks/registration.py +17 -0
- mistralai/_hooks/sdkhooks.py +57 -0
- mistralai/_hooks/types.py +76 -0
- mistralai/agents.py +434 -0
- mistralai/async_client.py +5 -413
- mistralai/basesdk.py +253 -0
- mistralai/chat.py +470 -0
- mistralai/client.py +5 -414
- mistralai/embeddings.py +182 -0
- mistralai/files.py +600 -84
- mistralai/fim.py +438 -0
- mistralai/fine_tuning.py +16 -0
- mistralai/httpclient.py +78 -0
- mistralai/jobs.py +822 -150
- mistralai/models/__init__.py +82 -0
- mistralai/models/agentscompletionrequest.py +96 -0
- mistralai/models/agentscompletionstreamrequest.py +92 -0
- mistralai/models/archiveftmodelout.py +19 -0
- mistralai/models/assistantmessage.py +53 -0
- mistralai/models/chatcompletionchoice.py +22 -0
- mistralai/models/chatcompletionrequest.py +109 -0
- mistralai/models/chatcompletionresponse.py +27 -0
- mistralai/models/chatcompletionstreamrequest.py +107 -0
- mistralai/models/checkpointout.py +25 -0
- mistralai/models/completionchunk.py +27 -0
- mistralai/models/completionevent.py +15 -0
- mistralai/models/completionresponsestreamchoice.py +48 -0
- mistralai/models/contentchunk.py +17 -0
- mistralai/models/delete_model_v1_models_model_id_deleteop.py +18 -0
- mistralai/models/deletefileout.py +24 -0
- mistralai/models/deletemodelout.py +25 -0
- mistralai/models/deltamessage.py +47 -0
- mistralai/models/detailedjobout.py +91 -0
- mistralai/models/embeddingrequest.py +61 -0
- mistralai/models/embeddingresponse.py +24 -0
- mistralai/models/embeddingresponsedata.py +19 -0
- mistralai/models/eventout.py +50 -0
- mistralai/models/files_api_routes_delete_fileop.py +16 -0
- mistralai/models/files_api_routes_retrieve_fileop.py +16 -0
- mistralai/models/files_api_routes_upload_fileop.py +51 -0
- mistralai/models/fileschema.py +71 -0
- mistralai/models/fimcompletionrequest.py +94 -0
- mistralai/models/fimcompletionresponse.py +27 -0
- mistralai/models/fimcompletionstreamrequest.py +92 -0
- mistralai/models/finetuneablemodel.py +8 -0
- mistralai/models/ftmodelcapabilitiesout.py +21 -0
- mistralai/models/ftmodelout.py +65 -0
- mistralai/models/function.py +19 -0
- mistralai/models/functioncall.py +22 -0
- mistralai/models/githubrepositoryin.py +52 -0
- mistralai/models/githubrepositoryout.py +52 -0
- mistralai/models/httpvalidationerror.py +23 -0
- mistralai/models/jobin.py +73 -0
- mistralai/models/jobmetadataout.py +54 -0
- mistralai/models/jobout.py +107 -0
- mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +18 -0
- mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +18 -0
- mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +15 -0
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +18 -0
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +81 -0
- mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +16 -0
- mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +18 -0
- mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +21 -0
- mistralai/models/jobsout.py +20 -0
- mistralai/models/legacyjobmetadataout.py +80 -0
- mistralai/models/listfilesout.py +17 -0
- mistralai/models/metricout.py +50 -0
- mistralai/models/modelcapabilities.py +21 -0
- mistralai/models/modelcard.py +66 -0
- mistralai/models/modellist.py +18 -0
- mistralai/models/responseformat.py +18 -0
- mistralai/models/retrieve_model_v1_models_model_id_getop.py +18 -0
- mistralai/models/retrievefileout.py +71 -0
- mistralai/models/sampletype.py +7 -0
- mistralai/models/sdkerror.py +22 -0
- mistralai/models/security.py +16 -0
- mistralai/models/source.py +7 -0
- mistralai/models/systemmessage.py +26 -0
- mistralai/models/textchunk.py +17 -0
- mistralai/models/tool.py +18 -0
- mistralai/models/toolcall.py +20 -0
- mistralai/models/toolmessage.py +50 -0
- mistralai/models/trainingfile.py +17 -0
- mistralai/models/trainingparameters.py +48 -0
- mistralai/models/trainingparametersin.py +56 -0
- mistralai/models/unarchiveftmodelout.py +19 -0
- mistralai/models/updateftmodelin.py +44 -0
- mistralai/models/uploadfileout.py +71 -0
- mistralai/models/usageinfo.py +18 -0
- mistralai/models/usermessage.py +26 -0
- mistralai/models/validationerror.py +24 -0
- mistralai/models/wandbintegration.py +56 -0
- mistralai/models/wandbintegrationout.py +52 -0
- mistralai/models_.py +928 -0
- mistralai/py.typed +1 -0
- mistralai/sdk.py +119 -0
- mistralai/sdkconfiguration.py +54 -0
- mistralai/types/__init__.py +21 -0
- mistralai/types/basemodel.py +39 -0
- mistralai/utils/__init__.py +86 -0
- mistralai/utils/annotations.py +19 -0
- mistralai/utils/enums.py +34 -0
- mistralai/utils/eventstreaming.py +178 -0
- mistralai/utils/forms.py +207 -0
- mistralai/utils/headers.py +136 -0
- mistralai/utils/logger.py +16 -0
- mistralai/utils/metadata.py +118 -0
- mistralai/utils/queryparams.py +203 -0
- mistralai/utils/requestbodies.py +66 -0
- mistralai/utils/retries.py +216 -0
- mistralai/utils/security.py +185 -0
- mistralai/utils/serializers.py +181 -0
- mistralai/utils/url.py +150 -0
- mistralai/utils/values.py +128 -0
- {mistralai-0.4.2.dist-info → mistralai-1.0.0.dist-info}/LICENSE +1 -1
- mistralai-1.0.0.dist-info/METADATA +695 -0
- mistralai-1.0.0.dist-info/RECORD +235 -0
- mistralai_azure/__init__.py +5 -0
- mistralai_azure/_hooks/__init__.py +5 -0
- mistralai_azure/_hooks/custom_user_agent.py +16 -0
- mistralai_azure/_hooks/registration.py +15 -0
- mistralai_azure/_hooks/sdkhooks.py +57 -0
- mistralai_azure/_hooks/types.py +76 -0
- mistralai_azure/basesdk.py +253 -0
- mistralai_azure/chat.py +470 -0
- mistralai_azure/httpclient.py +78 -0
- mistralai_azure/models/__init__.py +28 -0
- mistralai_azure/models/assistantmessage.py +53 -0
- mistralai_azure/models/chatcompletionchoice.py +22 -0
- mistralai_azure/models/chatcompletionrequest.py +109 -0
- mistralai_azure/models/chatcompletionresponse.py +27 -0
- mistralai_azure/models/chatcompletionstreamrequest.py +107 -0
- mistralai_azure/models/completionchunk.py +27 -0
- mistralai_azure/models/completionevent.py +15 -0
- mistralai_azure/models/completionresponsestreamchoice.py +48 -0
- mistralai_azure/models/contentchunk.py +17 -0
- mistralai_azure/models/deltamessage.py +47 -0
- mistralai_azure/models/function.py +19 -0
- mistralai_azure/models/functioncall.py +22 -0
- mistralai_azure/models/httpvalidationerror.py +23 -0
- mistralai_azure/models/responseformat.py +18 -0
- mistralai_azure/models/sdkerror.py +22 -0
- mistralai_azure/models/security.py +16 -0
- mistralai_azure/models/systemmessage.py +26 -0
- mistralai_azure/models/textchunk.py +17 -0
- mistralai_azure/models/tool.py +18 -0
- mistralai_azure/models/toolcall.py +20 -0
- mistralai_azure/models/toolmessage.py +50 -0
- mistralai_azure/models/usageinfo.py +18 -0
- mistralai_azure/models/usermessage.py +26 -0
- mistralai_azure/models/validationerror.py +24 -0
- mistralai_azure/py.typed +1 -0
- mistralai_azure/sdk.py +107 -0
- mistralai_azure/sdkconfiguration.py +54 -0
- mistralai_azure/types/__init__.py +21 -0
- mistralai_azure/types/basemodel.py +39 -0
- mistralai_azure/utils/__init__.py +84 -0
- mistralai_azure/utils/annotations.py +19 -0
- mistralai_azure/utils/enums.py +34 -0
- mistralai_azure/utils/eventstreaming.py +178 -0
- mistralai_azure/utils/forms.py +207 -0
- mistralai_azure/utils/headers.py +136 -0
- mistralai_azure/utils/logger.py +16 -0
- mistralai_azure/utils/metadata.py +118 -0
- mistralai_azure/utils/queryparams.py +203 -0
- mistralai_azure/utils/requestbodies.py +66 -0
- mistralai_azure/utils/retries.py +216 -0
- mistralai_azure/utils/security.py +168 -0
- mistralai_azure/utils/serializers.py +181 -0
- mistralai_azure/utils/url.py +150 -0
- mistralai_azure/utils/values.py +128 -0
- mistralai_gcp/__init__.py +5 -0
- mistralai_gcp/_hooks/__init__.py +5 -0
- mistralai_gcp/_hooks/custom_user_agent.py +16 -0
- mistralai_gcp/_hooks/registration.py +15 -0
- mistralai_gcp/_hooks/sdkhooks.py +57 -0
- mistralai_gcp/_hooks/types.py +76 -0
- mistralai_gcp/basesdk.py +253 -0
- mistralai_gcp/chat.py +458 -0
- mistralai_gcp/fim.py +438 -0
- mistralai_gcp/httpclient.py +78 -0
- mistralai_gcp/models/__init__.py +31 -0
- mistralai_gcp/models/assistantmessage.py +53 -0
- mistralai_gcp/models/chatcompletionchoice.py +22 -0
- mistralai_gcp/models/chatcompletionrequest.py +105 -0
- mistralai_gcp/models/chatcompletionresponse.py +27 -0
- mistralai_gcp/models/chatcompletionstreamrequest.py +103 -0
- mistralai_gcp/models/completionchunk.py +27 -0
- mistralai_gcp/models/completionevent.py +15 -0
- mistralai_gcp/models/completionresponsestreamchoice.py +48 -0
- mistralai_gcp/models/contentchunk.py +17 -0
- mistralai_gcp/models/deltamessage.py +47 -0
- mistralai_gcp/models/fimcompletionrequest.py +94 -0
- mistralai_gcp/models/fimcompletionresponse.py +27 -0
- mistralai_gcp/models/fimcompletionstreamrequest.py +92 -0
- mistralai_gcp/models/function.py +19 -0
- mistralai_gcp/models/functioncall.py +22 -0
- mistralai_gcp/models/httpvalidationerror.py +23 -0
- mistralai_gcp/models/responseformat.py +18 -0
- mistralai_gcp/models/sdkerror.py +22 -0
- mistralai_gcp/models/security.py +16 -0
- mistralai_gcp/models/systemmessage.py +26 -0
- mistralai_gcp/models/textchunk.py +17 -0
- mistralai_gcp/models/tool.py +18 -0
- mistralai_gcp/models/toolcall.py +20 -0
- mistralai_gcp/models/toolmessage.py +50 -0
- mistralai_gcp/models/usageinfo.py +18 -0
- mistralai_gcp/models/usermessage.py +26 -0
- mistralai_gcp/models/validationerror.py +24 -0
- mistralai_gcp/py.typed +1 -0
- mistralai_gcp/sdk.py +174 -0
- mistralai_gcp/sdkconfiguration.py +54 -0
- mistralai_gcp/types/__init__.py +21 -0
- mistralai_gcp/types/basemodel.py +39 -0
- mistralai_gcp/utils/__init__.py +84 -0
- mistralai_gcp/utils/annotations.py +19 -0
- mistralai_gcp/utils/enums.py +34 -0
- mistralai_gcp/utils/eventstreaming.py +178 -0
- mistralai_gcp/utils/forms.py +207 -0
- mistralai_gcp/utils/headers.py +136 -0
- mistralai_gcp/utils/logger.py +16 -0
- mistralai_gcp/utils/metadata.py +118 -0
- mistralai_gcp/utils/queryparams.py +203 -0
- mistralai_gcp/utils/requestbodies.py +66 -0
- mistralai_gcp/utils/retries.py +216 -0
- mistralai_gcp/utils/security.py +168 -0
- mistralai_gcp/utils/serializers.py +181 -0
- mistralai_gcp/utils/url.py +150 -0
- mistralai_gcp/utils/values.py +128 -0
- py.typed +1 -0
- mistralai/client_base.py +0 -211
- mistralai/constants.py +0 -5
- mistralai/exceptions.py +0 -54
- mistralai/models/chat_completion.py +0 -93
- mistralai/models/common.py +0 -9
- mistralai/models/embeddings.py +0 -19
- mistralai/models/files.py +0 -23
- mistralai/models/jobs.py +0 -100
- mistralai/models/models.py +0 -39
- mistralai-0.4.2.dist-info/METADATA +0 -82
- mistralai-0.4.2.dist-info/RECORD +0 -20
- {mistralai-0.4.2.dist-info → mistralai-1.0.0.dist-info}/WHEEL +0 -0
mistralai/async_client.py
CHANGED
|
@@ -1,423 +1,15 @@
|
|
|
1
|
-
import
|
|
2
|
-
import posixpath
|
|
3
|
-
from json import JSONDecodeError
|
|
4
|
-
from typing import Any, AsyncGenerator, Callable, Dict, List, Optional, Union
|
|
1
|
+
from typing import Optional
|
|
5
2
|
|
|
6
|
-
from
|
|
7
|
-
AsyncClient,
|
|
8
|
-
AsyncHTTPTransport,
|
|
9
|
-
ConnectError,
|
|
10
|
-
Limits,
|
|
11
|
-
RequestError,
|
|
12
|
-
Response,
|
|
13
|
-
)
|
|
3
|
+
from .client import MIGRATION_MESSAGE
|
|
14
4
|
|
|
15
|
-
from mistralai.client_base import ClientBase
|
|
16
|
-
from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES
|
|
17
|
-
from mistralai.exceptions import (
|
|
18
|
-
MistralAPIException,
|
|
19
|
-
MistralAPIStatusException,
|
|
20
|
-
MistralConnectionException,
|
|
21
|
-
MistralException,
|
|
22
|
-
)
|
|
23
|
-
from mistralai.files import FilesAsyncClient
|
|
24
|
-
from mistralai.jobs import JobsAsyncClient
|
|
25
|
-
from mistralai.models.chat_completion import (
|
|
26
|
-
ChatCompletionResponse,
|
|
27
|
-
ChatCompletionStreamResponse,
|
|
28
|
-
ResponseFormat,
|
|
29
|
-
ToolChoice,
|
|
30
|
-
)
|
|
31
|
-
from mistralai.models.embeddings import EmbeddingResponse
|
|
32
|
-
from mistralai.models.models import ModelDeleted, ModelList
|
|
33
5
|
|
|
34
|
-
|
|
35
|
-
class MistralAsyncClient(ClientBase):
|
|
6
|
+
class MistralAsyncClient:
|
|
36
7
|
def __init__(
|
|
37
8
|
self,
|
|
38
9
|
api_key: Optional[str] = None,
|
|
39
|
-
endpoint: str =
|
|
10
|
+
endpoint: str = "",
|
|
40
11
|
max_retries: int = 5,
|
|
41
12
|
timeout: int = 120,
|
|
42
13
|
max_concurrent_requests: int = 64,
|
|
43
14
|
):
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
self._client = AsyncClient(
|
|
47
|
-
follow_redirects=True,
|
|
48
|
-
timeout=timeout,
|
|
49
|
-
limits=Limits(max_connections=max_concurrent_requests),
|
|
50
|
-
transport=AsyncHTTPTransport(retries=max_retries),
|
|
51
|
-
)
|
|
52
|
-
self.files = FilesAsyncClient(self)
|
|
53
|
-
self.jobs = JobsAsyncClient(self)
|
|
54
|
-
|
|
55
|
-
async def close(self) -> None:
|
|
56
|
-
await self._client.aclose()
|
|
57
|
-
|
|
58
|
-
async def _check_response_status_codes(self, response: Response) -> None:
|
|
59
|
-
if response.status_code in RETRY_STATUS_CODES:
|
|
60
|
-
raise MistralAPIStatusException.from_response(
|
|
61
|
-
response,
|
|
62
|
-
message=f"Status: {response.status_code}. Message: {response.text}",
|
|
63
|
-
)
|
|
64
|
-
elif 400 <= response.status_code < 500:
|
|
65
|
-
if response.stream:
|
|
66
|
-
await response.aread()
|
|
67
|
-
raise MistralAPIException.from_response(
|
|
68
|
-
response,
|
|
69
|
-
message=f"Status: {response.status_code}. Message: {response.text}",
|
|
70
|
-
)
|
|
71
|
-
elif response.status_code >= 500:
|
|
72
|
-
if response.stream:
|
|
73
|
-
await response.aread()
|
|
74
|
-
raise MistralException(
|
|
75
|
-
message=f"Status: {response.status_code}. Message: {response.text}",
|
|
76
|
-
)
|
|
77
|
-
|
|
78
|
-
async def _check_streaming_response(self, response: Response) -> None:
|
|
79
|
-
await self._check_response_status_codes(response)
|
|
80
|
-
|
|
81
|
-
async def _check_response(self, response: Response) -> Dict[str, Any]:
|
|
82
|
-
await self._check_response_status_codes(response)
|
|
83
|
-
|
|
84
|
-
json_response: Dict[str, Any] = response.json()
|
|
85
|
-
|
|
86
|
-
if "object" not in json_response:
|
|
87
|
-
raise MistralException(message=f"Unexpected response: {json_response}")
|
|
88
|
-
if "error" == json_response["object"]: # has errors
|
|
89
|
-
raise MistralAPIException.from_response(
|
|
90
|
-
response,
|
|
91
|
-
message=json_response["message"],
|
|
92
|
-
)
|
|
93
|
-
|
|
94
|
-
return json_response
|
|
95
|
-
|
|
96
|
-
async def _request(
|
|
97
|
-
self,
|
|
98
|
-
method: str,
|
|
99
|
-
json: Optional[Dict[str, Any]],
|
|
100
|
-
path: str,
|
|
101
|
-
stream: bool = False,
|
|
102
|
-
attempt: int = 1,
|
|
103
|
-
data: Optional[Dict[str, Any]] = None,
|
|
104
|
-
check_model_deprecation_headers_callback: Optional[Callable] = None,
|
|
105
|
-
**kwargs: Any,
|
|
106
|
-
) -> AsyncGenerator[Dict[str, Any], None]:
|
|
107
|
-
accept_header = "text/event-stream" if stream else "application/json"
|
|
108
|
-
headers = {
|
|
109
|
-
"Accept": accept_header,
|
|
110
|
-
"User-Agent": f"mistral-client-python/{self._version}",
|
|
111
|
-
"Authorization": f"Bearer {self._api_key}",
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
if json is not None:
|
|
115
|
-
headers["Content-Type"] = "application/json"
|
|
116
|
-
|
|
117
|
-
url = posixpath.join(self._endpoint, path)
|
|
118
|
-
|
|
119
|
-
self._logger.debug(f"Sending request: {method} {url} {json}")
|
|
120
|
-
|
|
121
|
-
response: Response
|
|
122
|
-
|
|
123
|
-
try:
|
|
124
|
-
if stream:
|
|
125
|
-
async with self._client.stream(
|
|
126
|
-
method,
|
|
127
|
-
url,
|
|
128
|
-
headers=headers,
|
|
129
|
-
json=json,
|
|
130
|
-
data=data,
|
|
131
|
-
**kwargs,
|
|
132
|
-
) as response:
|
|
133
|
-
if check_model_deprecation_headers_callback:
|
|
134
|
-
check_model_deprecation_headers_callback(response.headers)
|
|
135
|
-
await self._check_streaming_response(response)
|
|
136
|
-
|
|
137
|
-
async for line in response.aiter_lines():
|
|
138
|
-
json_streamed_response = self._process_line(line)
|
|
139
|
-
if json_streamed_response:
|
|
140
|
-
yield json_streamed_response
|
|
141
|
-
|
|
142
|
-
else:
|
|
143
|
-
response = await self._client.request(
|
|
144
|
-
method,
|
|
145
|
-
url,
|
|
146
|
-
headers=headers,
|
|
147
|
-
json=json,
|
|
148
|
-
data=data,
|
|
149
|
-
**kwargs,
|
|
150
|
-
)
|
|
151
|
-
if check_model_deprecation_headers_callback:
|
|
152
|
-
check_model_deprecation_headers_callback(response.headers)
|
|
153
|
-
yield await self._check_response(response)
|
|
154
|
-
|
|
155
|
-
except ConnectError as e:
|
|
156
|
-
raise MistralConnectionException(str(e)) from e
|
|
157
|
-
except RequestError as e:
|
|
158
|
-
raise MistralException(f"Unexpected exception ({e.__class__.__name__}): {e}") from e
|
|
159
|
-
except JSONDecodeError as e:
|
|
160
|
-
raise MistralAPIException.from_response(
|
|
161
|
-
response,
|
|
162
|
-
message=f"Failed to decode json body: {response.text}",
|
|
163
|
-
) from e
|
|
164
|
-
except MistralAPIStatusException as e:
|
|
165
|
-
attempt += 1
|
|
166
|
-
if attempt > self._max_retries:
|
|
167
|
-
raise MistralAPIStatusException.from_response(response, message=str(e)) from e
|
|
168
|
-
backoff = 2.0**attempt # exponential backoff
|
|
169
|
-
await asyncio.sleep(backoff)
|
|
170
|
-
|
|
171
|
-
# Retry as a generator
|
|
172
|
-
async for r in self._request(method, json, path, stream=stream, attempt=attempt):
|
|
173
|
-
yield r
|
|
174
|
-
|
|
175
|
-
async def chat(
|
|
176
|
-
self,
|
|
177
|
-
messages: List[Any],
|
|
178
|
-
model: Optional[str] = None,
|
|
179
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
180
|
-
temperature: Optional[float] = None,
|
|
181
|
-
max_tokens: Optional[int] = None,
|
|
182
|
-
top_p: Optional[float] = None,
|
|
183
|
-
random_seed: Optional[int] = None,
|
|
184
|
-
safe_mode: bool = False,
|
|
185
|
-
safe_prompt: bool = False,
|
|
186
|
-
tool_choice: Optional[Union[str, ToolChoice]] = None,
|
|
187
|
-
response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None,
|
|
188
|
-
) -> ChatCompletionResponse:
|
|
189
|
-
"""A asynchronous chat endpoint that returns a single response.
|
|
190
|
-
|
|
191
|
-
Args:
|
|
192
|
-
model (str): model the name of the model to chat with, e.g. mistral-tiny
|
|
193
|
-
messages (List[Any]): messages an array of messages to chat with, e.g.
|
|
194
|
-
[{role: 'user', content: 'What is the best French cheese?'}]
|
|
195
|
-
temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
|
|
196
|
-
max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
|
|
197
|
-
top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
|
|
198
|
-
Defaults to None.
|
|
199
|
-
random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
|
|
200
|
-
safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False.
|
|
201
|
-
safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False.
|
|
202
|
-
|
|
203
|
-
Returns:
|
|
204
|
-
ChatCompletionResponse: a response object containing the generated text.
|
|
205
|
-
"""
|
|
206
|
-
request = self._make_chat_request(
|
|
207
|
-
messages,
|
|
208
|
-
model,
|
|
209
|
-
tools=tools,
|
|
210
|
-
temperature=temperature,
|
|
211
|
-
max_tokens=max_tokens,
|
|
212
|
-
top_p=top_p,
|
|
213
|
-
random_seed=random_seed,
|
|
214
|
-
stream=False,
|
|
215
|
-
safe_prompt=safe_mode or safe_prompt,
|
|
216
|
-
tool_choice=tool_choice,
|
|
217
|
-
response_format=response_format,
|
|
218
|
-
)
|
|
219
|
-
|
|
220
|
-
single_response = self._request(
|
|
221
|
-
"post",
|
|
222
|
-
request,
|
|
223
|
-
"v1/chat/completions",
|
|
224
|
-
check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model),
|
|
225
|
-
)
|
|
226
|
-
|
|
227
|
-
async for response in single_response:
|
|
228
|
-
return ChatCompletionResponse(**response)
|
|
229
|
-
|
|
230
|
-
raise MistralException("No response received")
|
|
231
|
-
|
|
232
|
-
async def chat_stream(
|
|
233
|
-
self,
|
|
234
|
-
messages: List[Any],
|
|
235
|
-
model: Optional[str] = None,
|
|
236
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
237
|
-
temperature: Optional[float] = None,
|
|
238
|
-
max_tokens: Optional[int] = None,
|
|
239
|
-
top_p: Optional[float] = None,
|
|
240
|
-
random_seed: Optional[int] = None,
|
|
241
|
-
safe_mode: bool = False,
|
|
242
|
-
safe_prompt: bool = False,
|
|
243
|
-
tool_choice: Optional[Union[str, ToolChoice]] = None,
|
|
244
|
-
response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None,
|
|
245
|
-
) -> AsyncGenerator[ChatCompletionStreamResponse, None]:
|
|
246
|
-
"""An Asynchronous chat endpoint that streams responses.
|
|
247
|
-
|
|
248
|
-
Args:
|
|
249
|
-
model (str): model the name of the model to chat with, e.g. mistral-tiny
|
|
250
|
-
messages (List[Any]): messages an array of messages to chat with, e.g.
|
|
251
|
-
[{role: 'user', content: 'What is the best French cheese?'}]
|
|
252
|
-
tools (Optional[List[Function]], optional): a list of tools to use.
|
|
253
|
-
temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
|
|
254
|
-
max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
|
|
255
|
-
top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
|
|
256
|
-
Defaults to None.
|
|
257
|
-
random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
|
|
258
|
-
safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False.
|
|
259
|
-
safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False.
|
|
260
|
-
|
|
261
|
-
Returns:
|
|
262
|
-
AsyncGenerator[ChatCompletionStreamResponse, None]:
|
|
263
|
-
An async generator that yields ChatCompletionStreamResponse objects.
|
|
264
|
-
"""
|
|
265
|
-
|
|
266
|
-
request = self._make_chat_request(
|
|
267
|
-
messages,
|
|
268
|
-
model,
|
|
269
|
-
tools=tools,
|
|
270
|
-
temperature=temperature,
|
|
271
|
-
max_tokens=max_tokens,
|
|
272
|
-
top_p=top_p,
|
|
273
|
-
random_seed=random_seed,
|
|
274
|
-
stream=True,
|
|
275
|
-
safe_prompt=safe_mode or safe_prompt,
|
|
276
|
-
tool_choice=tool_choice,
|
|
277
|
-
response_format=response_format,
|
|
278
|
-
)
|
|
279
|
-
async_response = self._request(
|
|
280
|
-
"post",
|
|
281
|
-
request,
|
|
282
|
-
"v1/chat/completions",
|
|
283
|
-
stream=True,
|
|
284
|
-
check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model),
|
|
285
|
-
)
|
|
286
|
-
|
|
287
|
-
async for json_response in async_response:
|
|
288
|
-
yield ChatCompletionStreamResponse(**json_response)
|
|
289
|
-
|
|
290
|
-
async def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingResponse:
|
|
291
|
-
"""An asynchronous embeddings endpoint that returns embeddings for a single, or batch of inputs
|
|
292
|
-
|
|
293
|
-
Args:
|
|
294
|
-
model (str): The embedding model to use, e.g. mistral-embed
|
|
295
|
-
input (Union[str, List[str]]): The input to embed,
|
|
296
|
-
e.g. ['What is the best French cheese?']
|
|
297
|
-
|
|
298
|
-
Returns:
|
|
299
|
-
EmbeddingResponse: A response object containing the embeddings.
|
|
300
|
-
"""
|
|
301
|
-
request = {"model": model, "input": input}
|
|
302
|
-
single_response = self._request(
|
|
303
|
-
"post",
|
|
304
|
-
request,
|
|
305
|
-
"v1/embeddings",
|
|
306
|
-
check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model),
|
|
307
|
-
)
|
|
308
|
-
|
|
309
|
-
async for response in single_response:
|
|
310
|
-
return EmbeddingResponse(**response)
|
|
311
|
-
|
|
312
|
-
raise MistralException("No response received")
|
|
313
|
-
|
|
314
|
-
async def list_models(self) -> ModelList:
|
|
315
|
-
"""Returns a list of the available models
|
|
316
|
-
|
|
317
|
-
Returns:
|
|
318
|
-
ModelList: A response object containing the list of models.
|
|
319
|
-
"""
|
|
320
|
-
single_response = self._request("get", {}, "v1/models")
|
|
321
|
-
|
|
322
|
-
async for response in single_response:
|
|
323
|
-
return ModelList(**response)
|
|
324
|
-
|
|
325
|
-
raise MistralException("No response received")
|
|
326
|
-
|
|
327
|
-
async def delete_model(self, model_id: str) -> ModelDeleted:
|
|
328
|
-
single_response = self._request("delete", {}, f"v1/models/{model_id}")
|
|
329
|
-
|
|
330
|
-
async for response in single_response:
|
|
331
|
-
return ModelDeleted(**response)
|
|
332
|
-
|
|
333
|
-
raise MistralException("No response received")
|
|
334
|
-
|
|
335
|
-
async def completion(
|
|
336
|
-
self,
|
|
337
|
-
model: str,
|
|
338
|
-
prompt: str,
|
|
339
|
-
suffix: Optional[str] = None,
|
|
340
|
-
temperature: Optional[float] = None,
|
|
341
|
-
max_tokens: Optional[int] = None,
|
|
342
|
-
top_p: Optional[float] = None,
|
|
343
|
-
random_seed: Optional[int] = None,
|
|
344
|
-
stop: Optional[List[str]] = None,
|
|
345
|
-
) -> ChatCompletionResponse:
|
|
346
|
-
"""An asynchronous completion endpoint that returns a single response.
|
|
347
|
-
|
|
348
|
-
Args:
|
|
349
|
-
model (str): model the name of the model to get completions with, e.g. codestral-latest
|
|
350
|
-
prompt (str): the prompt to complete
|
|
351
|
-
suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion
|
|
352
|
-
temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
|
|
353
|
-
max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
|
|
354
|
-
top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
|
|
355
|
-
Defaults to None.
|
|
356
|
-
random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
|
|
357
|
-
stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n']
|
|
358
|
-
Returns:
|
|
359
|
-
Dict[str, Any]: a response object containing the generated text.
|
|
360
|
-
"""
|
|
361
|
-
request = self._make_completion_request(
|
|
362
|
-
prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop
|
|
363
|
-
)
|
|
364
|
-
single_response = self._request(
|
|
365
|
-
"post",
|
|
366
|
-
request,
|
|
367
|
-
"v1/fim/completions",
|
|
368
|
-
check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model),
|
|
369
|
-
)
|
|
370
|
-
|
|
371
|
-
async for response in single_response:
|
|
372
|
-
return ChatCompletionResponse(**response)
|
|
373
|
-
|
|
374
|
-
raise MistralException("No response received")
|
|
375
|
-
|
|
376
|
-
async def completion_stream(
|
|
377
|
-
self,
|
|
378
|
-
model: str,
|
|
379
|
-
prompt: str,
|
|
380
|
-
suffix: Optional[str] = None,
|
|
381
|
-
temperature: Optional[float] = None,
|
|
382
|
-
max_tokens: Optional[int] = None,
|
|
383
|
-
top_p: Optional[float] = None,
|
|
384
|
-
random_seed: Optional[int] = None,
|
|
385
|
-
stop: Optional[List[str]] = None,
|
|
386
|
-
) -> AsyncGenerator[ChatCompletionStreamResponse, None]:
|
|
387
|
-
"""An asynchronous completion endpoint that returns a streaming response.
|
|
388
|
-
|
|
389
|
-
Args:
|
|
390
|
-
model (str): model the name of the model to get completions with, e.g. codestral-latest
|
|
391
|
-
prompt (str): the prompt to complete
|
|
392
|
-
suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion
|
|
393
|
-
temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
|
|
394
|
-
max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
|
|
395
|
-
top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
|
|
396
|
-
Defaults to None.
|
|
397
|
-
random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
|
|
398
|
-
stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n']
|
|
399
|
-
|
|
400
|
-
Returns:
|
|
401
|
-
Dict[str, Any]: a response object containing the generated text.
|
|
402
|
-
"""
|
|
403
|
-
request = self._make_completion_request(
|
|
404
|
-
prompt,
|
|
405
|
-
model,
|
|
406
|
-
suffix,
|
|
407
|
-
temperature,
|
|
408
|
-
max_tokens,
|
|
409
|
-
top_p,
|
|
410
|
-
random_seed,
|
|
411
|
-
stop,
|
|
412
|
-
stream=True,
|
|
413
|
-
)
|
|
414
|
-
async_response = self._request(
|
|
415
|
-
"post",
|
|
416
|
-
request,
|
|
417
|
-
"v1/fim/completions",
|
|
418
|
-
stream=True,
|
|
419
|
-
check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model),
|
|
420
|
-
)
|
|
421
|
-
|
|
422
|
-
async for json_response in async_response:
|
|
423
|
-
yield ChatCompletionStreamResponse(**json_response)
|
|
15
|
+
raise NotImplementedError(MIGRATION_MESSAGE)
|
mistralai/basesdk.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from .sdkconfiguration import SDKConfiguration
|
|
4
|
+
import httpx
|
|
5
|
+
from mistralai import models, utils
|
|
6
|
+
from mistralai._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext
|
|
7
|
+
from mistralai.utils import RetryConfig, SerializedRequestBody, get_body_content
|
|
8
|
+
from typing import Callable, List, Optional, Tuple
|
|
9
|
+
|
|
10
|
+
class BaseSDK:
|
|
11
|
+
sdk_configuration: SDKConfiguration
|
|
12
|
+
|
|
13
|
+
def __init__(self, sdk_config: SDKConfiguration) -> None:
|
|
14
|
+
self.sdk_configuration = sdk_config
|
|
15
|
+
|
|
16
|
+
def get_url(self, base_url, url_variables):
|
|
17
|
+
sdk_url, sdk_variables = self.sdk_configuration.get_server_details()
|
|
18
|
+
|
|
19
|
+
if base_url is None:
|
|
20
|
+
base_url = sdk_url
|
|
21
|
+
|
|
22
|
+
if url_variables is None:
|
|
23
|
+
url_variables = sdk_variables
|
|
24
|
+
|
|
25
|
+
return utils.template_url(base_url, url_variables)
|
|
26
|
+
|
|
27
|
+
def build_request(
|
|
28
|
+
self,
|
|
29
|
+
method,
|
|
30
|
+
path,
|
|
31
|
+
base_url,
|
|
32
|
+
url_variables,
|
|
33
|
+
request,
|
|
34
|
+
request_body_required,
|
|
35
|
+
request_has_path_params,
|
|
36
|
+
request_has_query_params,
|
|
37
|
+
user_agent_header,
|
|
38
|
+
accept_header_value,
|
|
39
|
+
_globals=None,
|
|
40
|
+
security=None,
|
|
41
|
+
timeout_ms: Optional[int] = None,
|
|
42
|
+
get_serialized_body: Optional[
|
|
43
|
+
Callable[[], Optional[SerializedRequestBody]]
|
|
44
|
+
] = None,
|
|
45
|
+
url_override: Optional[str] = None,
|
|
46
|
+
) -> httpx.Request:
|
|
47
|
+
client = self.sdk_configuration.client
|
|
48
|
+
|
|
49
|
+
query_params = {}
|
|
50
|
+
|
|
51
|
+
url = url_override
|
|
52
|
+
if url is None:
|
|
53
|
+
url = utils.generate_url(
|
|
54
|
+
self.get_url(base_url, url_variables),
|
|
55
|
+
path,
|
|
56
|
+
request if request_has_path_params else None,
|
|
57
|
+
_globals if request_has_path_params else None,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
query_params = utils.get_query_params(
|
|
61
|
+
request if request_has_query_params else None,
|
|
62
|
+
_globals if request_has_query_params else None,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
headers = utils.get_headers(request, _globals)
|
|
66
|
+
headers["Accept"] = accept_header_value
|
|
67
|
+
headers[user_agent_header] = self.sdk_configuration.user_agent
|
|
68
|
+
|
|
69
|
+
if security is not None:
|
|
70
|
+
if callable(security):
|
|
71
|
+
security = security()
|
|
72
|
+
security = utils.get_security_from_env(security, models.Security)
|
|
73
|
+
if security is not None:
|
|
74
|
+
security_headers, security_query_params = utils.get_security(security)
|
|
75
|
+
headers = {**headers, **security_headers}
|
|
76
|
+
query_params = {**query_params, **security_query_params}
|
|
77
|
+
|
|
78
|
+
serialized_request_body = SerializedRequestBody("application/octet-stream")
|
|
79
|
+
if get_serialized_body is not None:
|
|
80
|
+
rb = get_serialized_body()
|
|
81
|
+
if request_body_required and rb is None:
|
|
82
|
+
raise ValueError("request body is required")
|
|
83
|
+
|
|
84
|
+
if rb is not None:
|
|
85
|
+
serialized_request_body = rb
|
|
86
|
+
|
|
87
|
+
if (
|
|
88
|
+
serialized_request_body.media_type is not None
|
|
89
|
+
and serialized_request_body.media_type
|
|
90
|
+
not in (
|
|
91
|
+
"multipart/form-data",
|
|
92
|
+
"multipart/mixed",
|
|
93
|
+
)
|
|
94
|
+
):
|
|
95
|
+
headers["content-type"] = serialized_request_body.media_type
|
|
96
|
+
|
|
97
|
+
timeout = timeout_ms / 1000 if timeout_ms is not None else None
|
|
98
|
+
|
|
99
|
+
return client.build_request(
|
|
100
|
+
method,
|
|
101
|
+
url,
|
|
102
|
+
params=query_params,
|
|
103
|
+
content=serialized_request_body.content,
|
|
104
|
+
data=serialized_request_body.data,
|
|
105
|
+
files=serialized_request_body.files,
|
|
106
|
+
headers=headers,
|
|
107
|
+
timeout=timeout,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
def do_request(
|
|
111
|
+
self,
|
|
112
|
+
hook_ctx,
|
|
113
|
+
request,
|
|
114
|
+
error_status_codes,
|
|
115
|
+
stream=False,
|
|
116
|
+
retry_config: Optional[Tuple[RetryConfig, List[str]]] = None,
|
|
117
|
+
) -> httpx.Response:
|
|
118
|
+
client = self.sdk_configuration.client
|
|
119
|
+
logger = self.sdk_configuration.debug_logger
|
|
120
|
+
|
|
121
|
+
def do():
|
|
122
|
+
http_res = None
|
|
123
|
+
try:
|
|
124
|
+
req = self.sdk_configuration.get_hooks().before_request(
|
|
125
|
+
BeforeRequestContext(hook_ctx), request
|
|
126
|
+
)
|
|
127
|
+
logger.debug(
|
|
128
|
+
"Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s",
|
|
129
|
+
req.method,
|
|
130
|
+
req.url,
|
|
131
|
+
req.headers,
|
|
132
|
+
get_body_content(req)
|
|
133
|
+
)
|
|
134
|
+
http_res = client.send(req, stream=stream)
|
|
135
|
+
except Exception as e:
|
|
136
|
+
_, e = self.sdk_configuration.get_hooks().after_error(
|
|
137
|
+
AfterErrorContext(hook_ctx), None, e
|
|
138
|
+
)
|
|
139
|
+
if e is not None:
|
|
140
|
+
logger.debug("Request Exception", exc_info=True)
|
|
141
|
+
raise e
|
|
142
|
+
|
|
143
|
+
if http_res is None:
|
|
144
|
+
logger.debug("Raising no response SDK error")
|
|
145
|
+
raise models.SDKError("No response received")
|
|
146
|
+
|
|
147
|
+
logger.debug(
|
|
148
|
+
"Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s",
|
|
149
|
+
http_res.status_code,
|
|
150
|
+
http_res.url,
|
|
151
|
+
http_res.headers,
|
|
152
|
+
"<streaming response>" if stream else http_res.text
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
if utils.match_status_codes(error_status_codes, http_res.status_code):
|
|
156
|
+
result, err = self.sdk_configuration.get_hooks().after_error(
|
|
157
|
+
AfterErrorContext(hook_ctx), http_res, None
|
|
158
|
+
)
|
|
159
|
+
if err is not None:
|
|
160
|
+
logger.debug("Request Exception", exc_info=True)
|
|
161
|
+
raise err
|
|
162
|
+
if result is not None:
|
|
163
|
+
http_res = result
|
|
164
|
+
else:
|
|
165
|
+
logger.debug("Raising unexpected SDK error")
|
|
166
|
+
raise models.SDKError("Unexpected error occurred")
|
|
167
|
+
|
|
168
|
+
return http_res
|
|
169
|
+
|
|
170
|
+
if retry_config is not None:
|
|
171
|
+
http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1]))
|
|
172
|
+
else:
|
|
173
|
+
http_res = do()
|
|
174
|
+
|
|
175
|
+
if not utils.match_status_codes(error_status_codes, http_res.status_code):
|
|
176
|
+
http_res = self.sdk_configuration.get_hooks().after_success(
|
|
177
|
+
AfterSuccessContext(hook_ctx), http_res
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
return http_res
|
|
181
|
+
|
|
182
|
+
async def do_request_async(
|
|
183
|
+
self,
|
|
184
|
+
hook_ctx,
|
|
185
|
+
request,
|
|
186
|
+
error_status_codes,
|
|
187
|
+
stream=False,
|
|
188
|
+
retry_config: Optional[Tuple[RetryConfig, List[str]]] = None,
|
|
189
|
+
) -> httpx.Response:
|
|
190
|
+
client = self.sdk_configuration.async_client
|
|
191
|
+
logger = self.sdk_configuration.debug_logger
|
|
192
|
+
async def do():
|
|
193
|
+
http_res = None
|
|
194
|
+
try:
|
|
195
|
+
req = self.sdk_configuration.get_hooks().before_request(
|
|
196
|
+
BeforeRequestContext(hook_ctx), request
|
|
197
|
+
)
|
|
198
|
+
logger.debug(
|
|
199
|
+
"Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s",
|
|
200
|
+
req.method,
|
|
201
|
+
req.url,
|
|
202
|
+
req.headers,
|
|
203
|
+
get_body_content(req)
|
|
204
|
+
)
|
|
205
|
+
http_res = await client.send(req, stream=stream)
|
|
206
|
+
except Exception as e:
|
|
207
|
+
_, e = self.sdk_configuration.get_hooks().after_error(
|
|
208
|
+
AfterErrorContext(hook_ctx), None, e
|
|
209
|
+
)
|
|
210
|
+
if e is not None:
|
|
211
|
+
logger.debug("Request Exception", exc_info=True)
|
|
212
|
+
raise e
|
|
213
|
+
|
|
214
|
+
if http_res is None:
|
|
215
|
+
logger.debug("Raising no response SDK error")
|
|
216
|
+
raise models.SDKError("No response received")
|
|
217
|
+
|
|
218
|
+
logger.debug(
|
|
219
|
+
"Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s",
|
|
220
|
+
http_res.status_code,
|
|
221
|
+
http_res.url,
|
|
222
|
+
http_res.headers,
|
|
223
|
+
"<streaming response>" if stream else http_res.text
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
if utils.match_status_codes(error_status_codes, http_res.status_code):
|
|
227
|
+
result, err = self.sdk_configuration.get_hooks().after_error(
|
|
228
|
+
AfterErrorContext(hook_ctx), http_res, None
|
|
229
|
+
)
|
|
230
|
+
if err is not None:
|
|
231
|
+
logger.debug("Request Exception", exc_info=True)
|
|
232
|
+
raise err
|
|
233
|
+
if result is not None:
|
|
234
|
+
http_res = result
|
|
235
|
+
else:
|
|
236
|
+
logger.debug("Raising unexpected SDK error")
|
|
237
|
+
raise models.SDKError("Unexpected error occurred")
|
|
238
|
+
|
|
239
|
+
return http_res
|
|
240
|
+
|
|
241
|
+
if retry_config is not None:
|
|
242
|
+
http_res = await utils.retry_async(
|
|
243
|
+
do, utils.Retries(retry_config[0], retry_config[1])
|
|
244
|
+
)
|
|
245
|
+
else:
|
|
246
|
+
http_res = await do()
|
|
247
|
+
|
|
248
|
+
if not utils.match_status_codes(error_status_codes, http_res.status_code):
|
|
249
|
+
http_res = self.sdk_configuration.get_hooks().after_success(
|
|
250
|
+
AfterSuccessContext(hook_ctx), http_res
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
return http_res
|