mistralai 0.4.1__py3-none-any.whl → 0.5.5a50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/__init__.py +5 -0
- mistralai/_hooks/__init__.py +5 -0
- mistralai/_hooks/custom_user_agent.py +16 -0
- mistralai/_hooks/deprecation_warning.py +26 -0
- mistralai/_hooks/registration.py +17 -0
- mistralai/_hooks/sdkhooks.py +57 -0
- mistralai/_hooks/types.py +76 -0
- mistralai/async_client.py +5 -374
- mistralai/basesdk.py +216 -0
- mistralai/chat.py +475 -0
- mistralai/client.py +5 -372
- mistralai/embeddings.py +182 -0
- mistralai/files.py +600 -84
- mistralai/fim.py +439 -0
- mistralai/fine_tuning.py +855 -0
- mistralai/httpclient.py +78 -0
- mistralai/models/__init__.py +80 -0
- mistralai/models/archiveftmodelout.py +19 -0
- mistralai/models/assistantmessage.py +58 -0
- mistralai/models/chatcompletionchoice.py +33 -0
- mistralai/models/chatcompletionrequest.py +114 -0
- mistralai/models/chatcompletionresponse.py +27 -0
- mistralai/models/chatcompletionstreamrequest.py +112 -0
- mistralai/models/checkpointout.py +25 -0
- mistralai/models/completionchunk.py +27 -0
- mistralai/models/completionevent.py +15 -0
- mistralai/models/completionresponsestreamchoice.py +53 -0
- mistralai/models/contentchunk.py +17 -0
- mistralai/models/delete_model_v1_models_model_id_deleteop.py +16 -0
- mistralai/models/deletefileout.py +24 -0
- mistralai/models/deletemodelout.py +25 -0
- mistralai/models/deltamessage.py +52 -0
- mistralai/models/detailedjobout.py +96 -0
- mistralai/models/embeddingrequest.py +66 -0
- mistralai/models/embeddingresponse.py +24 -0
- mistralai/models/embeddingresponsedata.py +19 -0
- mistralai/models/eventout.py +55 -0
- mistralai/models/files_api_routes_delete_fileop.py +16 -0
- mistralai/models/files_api_routes_retrieve_fileop.py +16 -0
- mistralai/models/files_api_routes_upload_fileop.py +51 -0
- mistralai/models/fileschema.py +76 -0
- mistralai/models/fimcompletionrequest.py +99 -0
- mistralai/models/fimcompletionresponse.py +27 -0
- mistralai/models/fimcompletionstreamrequest.py +97 -0
- mistralai/models/finetuneablemodel.py +8 -0
- mistralai/models/ftmodelcapabilitiesout.py +21 -0
- mistralai/models/ftmodelout.py +70 -0
- mistralai/models/function.py +19 -0
- mistralai/models/functioncall.py +16 -0
- mistralai/models/githubrepositoryin.py +57 -0
- mistralai/models/githubrepositoryout.py +57 -0
- mistralai/models/httpvalidationerror.py +23 -0
- mistralai/models/jobin.py +78 -0
- mistralai/models/jobmetadataout.py +59 -0
- mistralai/models/jobout.py +112 -0
- mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +16 -0
- mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +18 -0
- mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +73 -0
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +18 -0
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +86 -0
- mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +16 -0
- mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +16 -0
- mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +19 -0
- mistralai/models/jobsout.py +20 -0
- mistralai/models/legacyjobmetadataout.py +85 -0
- mistralai/models/listfilesout.py +17 -0
- mistralai/models/metricout.py +55 -0
- mistralai/models/modelcapabilities.py +21 -0
- mistralai/models/modelcard.py +71 -0
- mistralai/models/modellist.py +18 -0
- mistralai/models/responseformat.py +18 -0
- mistralai/models/retrieve_model_v1_models_model_id_getop.py +16 -0
- mistralai/models/retrievefileout.py +76 -0
- mistralai/models/sampletype.py +7 -0
- mistralai/models/sdkerror.py +22 -0
- mistralai/models/security.py +16 -0
- mistralai/models/source.py +7 -0
- mistralai/models/systemmessage.py +26 -0
- mistralai/models/textchunk.py +17 -0
- mistralai/models/tool.py +18 -0
- mistralai/models/toolcall.py +20 -0
- mistralai/models/toolmessage.py +55 -0
- mistralai/models/trainingfile.py +17 -0
- mistralai/models/trainingparameters.py +53 -0
- mistralai/models/trainingparametersin.py +61 -0
- mistralai/models/unarchiveftmodelout.py +19 -0
- mistralai/models/updateftmodelin.py +49 -0
- mistralai/models/uploadfileout.py +76 -0
- mistralai/models/usageinfo.py +18 -0
- mistralai/models/usermessage.py +26 -0
- mistralai/models/validationerror.py +24 -0
- mistralai/models/wandbintegration.py +61 -0
- mistralai/models/wandbintegrationout.py +57 -0
- mistralai/models_.py +928 -0
- mistralai/py.typed +1 -0
- mistralai/sdk.py +111 -0
- mistralai/sdkconfiguration.py +53 -0
- mistralai/types/__init__.py +21 -0
- mistralai/types/basemodel.py +35 -0
- mistralai/utils/__init__.py +82 -0
- mistralai/utils/annotations.py +19 -0
- mistralai/utils/enums.py +34 -0
- mistralai/utils/eventstreaming.py +179 -0
- mistralai/utils/forms.py +207 -0
- mistralai/utils/headers.py +136 -0
- mistralai/utils/metadata.py +118 -0
- mistralai/utils/queryparams.py +203 -0
- mistralai/utils/requestbodies.py +66 -0
- mistralai/utils/retries.py +216 -0
- mistralai/utils/security.py +182 -0
- mistralai/utils/serializers.py +181 -0
- mistralai/utils/url.py +150 -0
- mistralai/utils/values.py +128 -0
- {mistralai-0.4.1.dist-info → mistralai-0.5.5a50.dist-info}/LICENSE +1 -1
- mistralai-0.5.5a50.dist-info/METADATA +626 -0
- mistralai-0.5.5a50.dist-info/RECORD +228 -0
- mistralai_azure/__init__.py +5 -0
- mistralai_azure/_hooks/__init__.py +5 -0
- mistralai_azure/_hooks/custom_user_agent.py +16 -0
- mistralai_azure/_hooks/registration.py +15 -0
- mistralai_azure/_hooks/sdkhooks.py +57 -0
- mistralai_azure/_hooks/types.py +76 -0
- mistralai_azure/basesdk.py +215 -0
- mistralai_azure/chat.py +475 -0
- mistralai_azure/httpclient.py +78 -0
- mistralai_azure/models/__init__.py +28 -0
- mistralai_azure/models/assistantmessage.py +58 -0
- mistralai_azure/models/chatcompletionchoice.py +33 -0
- mistralai_azure/models/chatcompletionrequest.py +114 -0
- mistralai_azure/models/chatcompletionresponse.py +27 -0
- mistralai_azure/models/chatcompletionstreamrequest.py +112 -0
- mistralai_azure/models/completionchunk.py +27 -0
- mistralai_azure/models/completionevent.py +15 -0
- mistralai_azure/models/completionresponsestreamchoice.py +53 -0
- mistralai_azure/models/contentchunk.py +17 -0
- mistralai_azure/models/deltamessage.py +52 -0
- mistralai_azure/models/function.py +19 -0
- mistralai_azure/models/functioncall.py +16 -0
- mistralai_azure/models/httpvalidationerror.py +23 -0
- mistralai_azure/models/responseformat.py +18 -0
- mistralai_azure/models/sdkerror.py +22 -0
- mistralai_azure/models/security.py +16 -0
- mistralai_azure/models/systemmessage.py +26 -0
- mistralai_azure/models/textchunk.py +17 -0
- mistralai_azure/models/tool.py +18 -0
- mistralai_azure/models/toolcall.py +20 -0
- mistralai_azure/models/toolmessage.py +55 -0
- mistralai_azure/models/usageinfo.py +18 -0
- mistralai_azure/models/usermessage.py +26 -0
- mistralai_azure/models/validationerror.py +24 -0
- mistralai_azure/py.typed +1 -0
- mistralai_azure/sdk.py +102 -0
- mistralai_azure/sdkconfiguration.py +53 -0
- mistralai_azure/types/__init__.py +21 -0
- mistralai_azure/types/basemodel.py +35 -0
- mistralai_azure/utils/__init__.py +80 -0
- mistralai_azure/utils/annotations.py +19 -0
- mistralai_azure/utils/enums.py +34 -0
- mistralai_azure/utils/eventstreaming.py +179 -0
- mistralai_azure/utils/forms.py +207 -0
- mistralai_azure/utils/headers.py +136 -0
- mistralai_azure/utils/metadata.py +118 -0
- mistralai_azure/utils/queryparams.py +203 -0
- mistralai_azure/utils/requestbodies.py +66 -0
- mistralai_azure/utils/retries.py +216 -0
- mistralai_azure/utils/security.py +168 -0
- mistralai_azure/utils/serializers.py +181 -0
- mistralai_azure/utils/url.py +150 -0
- mistralai_azure/utils/values.py +128 -0
- mistralai_gcp/__init__.py +5 -0
- mistralai_gcp/_hooks/__init__.py +5 -0
- mistralai_gcp/_hooks/custom_user_agent.py +16 -0
- mistralai_gcp/_hooks/registration.py +15 -0
- mistralai_gcp/_hooks/sdkhooks.py +57 -0
- mistralai_gcp/_hooks/types.py +76 -0
- mistralai_gcp/basesdk.py +215 -0
- mistralai_gcp/chat.py +463 -0
- mistralai_gcp/fim.py +439 -0
- mistralai_gcp/httpclient.py +78 -0
- mistralai_gcp/models/__init__.py +31 -0
- mistralai_gcp/models/assistantmessage.py +58 -0
- mistralai_gcp/models/chatcompletionchoice.py +33 -0
- mistralai_gcp/models/chatcompletionrequest.py +110 -0
- mistralai_gcp/models/chatcompletionresponse.py +27 -0
- mistralai_gcp/models/chatcompletionstreamrequest.py +108 -0
- mistralai_gcp/models/completionchunk.py +27 -0
- mistralai_gcp/models/completionevent.py +15 -0
- mistralai_gcp/models/completionresponsestreamchoice.py +53 -0
- mistralai_gcp/models/contentchunk.py +17 -0
- mistralai_gcp/models/deltamessage.py +52 -0
- mistralai_gcp/models/fimcompletionrequest.py +99 -0
- mistralai_gcp/models/fimcompletionresponse.py +27 -0
- mistralai_gcp/models/fimcompletionstreamrequest.py +97 -0
- mistralai_gcp/models/function.py +19 -0
- mistralai_gcp/models/functioncall.py +16 -0
- mistralai_gcp/models/httpvalidationerror.py +23 -0
- mistralai_gcp/models/responseformat.py +18 -0
- mistralai_gcp/models/sdkerror.py +22 -0
- mistralai_gcp/models/security.py +16 -0
- mistralai_gcp/models/systemmessage.py +26 -0
- mistralai_gcp/models/textchunk.py +17 -0
- mistralai_gcp/models/tool.py +18 -0
- mistralai_gcp/models/toolcall.py +20 -0
- mistralai_gcp/models/toolmessage.py +55 -0
- mistralai_gcp/models/usageinfo.py +18 -0
- mistralai_gcp/models/usermessage.py +26 -0
- mistralai_gcp/models/validationerror.py +24 -0
- mistralai_gcp/py.typed +1 -0
- mistralai_gcp/sdk.py +165 -0
- mistralai_gcp/sdkconfiguration.py +53 -0
- mistralai_gcp/types/__init__.py +21 -0
- mistralai_gcp/types/basemodel.py +35 -0
- mistralai_gcp/utils/__init__.py +80 -0
- mistralai_gcp/utils/annotations.py +19 -0
- mistralai_gcp/utils/enums.py +34 -0
- mistralai_gcp/utils/eventstreaming.py +179 -0
- mistralai_gcp/utils/forms.py +207 -0
- mistralai_gcp/utils/headers.py +136 -0
- mistralai_gcp/utils/metadata.py +118 -0
- mistralai_gcp/utils/queryparams.py +203 -0
- mistralai_gcp/utils/requestbodies.py +66 -0
- mistralai_gcp/utils/retries.py +216 -0
- mistralai_gcp/utils/security.py +168 -0
- mistralai_gcp/utils/serializers.py +181 -0
- mistralai_gcp/utils/url.py +150 -0
- mistralai_gcp/utils/values.py +128 -0
- py.typed +1 -0
- mistralai/client_base.py +0 -186
- mistralai/constants.py +0 -3
- mistralai/exceptions.py +0 -54
- mistralai/jobs.py +0 -172
- mistralai/models/chat_completion.py +0 -93
- mistralai/models/common.py +0 -9
- mistralai/models/embeddings.py +0 -19
- mistralai/models/files.py +0 -23
- mistralai/models/jobs.py +0 -98
- mistralai/models/models.py +0 -39
- mistralai-0.4.1.dist-info/METADATA +0 -80
- mistralai-0.4.1.dist-info/RECORD +0 -20
- {mistralai-0.4.1.dist-info → mistralai-0.5.5a50.dist-info}/WHEEL +0 -0
mistralai/__init__.py
CHANGED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# MAKE SURE YOU UPDATE THE COPIES OF THIS FILES IN THE PROVIDERS'S PACKAGES WHEN YOU MAKE CHANGES HERE
|
|
2
|
+
from typing import Union
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
|
|
6
|
+
from .types import BeforeRequestContext, BeforeRequestHook
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class CustomUserAgentHook(BeforeRequestHook):
|
|
10
|
+
def before_request(
|
|
11
|
+
self, hook_ctx: BeforeRequestContext, request: httpx.Request
|
|
12
|
+
) -> Union[httpx.Request, Exception]:
|
|
13
|
+
request.headers["user-agent"] = (
|
|
14
|
+
"mistral-client-python/" + request.headers["user-agent"].split(" ")[1]
|
|
15
|
+
)
|
|
16
|
+
return request
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Union
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
|
|
6
|
+
from .types import AfterSuccessContext, AfterSuccessHook
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
HEADER_MODEL_DEPRECATION_TIMESTAMP = "x-model-deprecation-timestamp"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class DeprecationWarningHook(AfterSuccessHook):
|
|
14
|
+
|
|
15
|
+
def after_success(
|
|
16
|
+
self, hook_ctx: AfterSuccessContext, response: httpx.Response
|
|
17
|
+
) -> Union[httpx.Response, Exception]:
|
|
18
|
+
if HEADER_MODEL_DEPRECATION_TIMESTAMP in response.headers:
|
|
19
|
+
model = response.json()["model"]
|
|
20
|
+
# pylint: disable=logging-fstring-interpolation
|
|
21
|
+
logger.warning(
|
|
22
|
+
"WARNING: The model %s is deprecated and will be removed on %s. Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.",
|
|
23
|
+
model,
|
|
24
|
+
response.headers[HEADER_MODEL_DEPRECATION_TIMESTAMP],
|
|
25
|
+
)
|
|
26
|
+
return response
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from .custom_user_agent import CustomUserAgentHook
|
|
2
|
+
from .deprecation_warning import DeprecationWarningHook
|
|
3
|
+
from .types import Hooks
|
|
4
|
+
|
|
5
|
+
# This file is only ever generated once on the first generation and then is free to be modified.
|
|
6
|
+
# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them
|
|
7
|
+
# in this file or in separate files in the hooks folder.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def init_hooks(hooks: Hooks):
|
|
11
|
+
# pylint: disable=unused-argument
|
|
12
|
+
"""Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook
|
|
13
|
+
with an instance of a hook that implements that specific Hook interface
|
|
14
|
+
Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance
|
|
15
|
+
"""
|
|
16
|
+
hooks.register_before_request_hook(CustomUserAgentHook())
|
|
17
|
+
hooks.register_after_success_hook(DeprecationWarningHook())
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks
|
|
5
|
+
from .registration import init_hooks
|
|
6
|
+
from typing import List, Optional, Tuple
|
|
7
|
+
from mistralai.httpclient import HttpClient
|
|
8
|
+
|
|
9
|
+
class SDKHooks(Hooks):
|
|
10
|
+
def __init__(self) -> None:
|
|
11
|
+
self.sdk_init_hooks: List[SDKInitHook] = []
|
|
12
|
+
self.before_request_hooks: List[BeforeRequestHook] = []
|
|
13
|
+
self.after_success_hooks: List[AfterSuccessHook] = []
|
|
14
|
+
self.after_error_hooks: List[AfterErrorHook] = []
|
|
15
|
+
init_hooks(self)
|
|
16
|
+
|
|
17
|
+
def register_sdk_init_hook(self, hook: SDKInitHook) -> None:
|
|
18
|
+
self.sdk_init_hooks.append(hook)
|
|
19
|
+
|
|
20
|
+
def register_before_request_hook(self, hook: BeforeRequestHook) -> None:
|
|
21
|
+
self.before_request_hooks.append(hook)
|
|
22
|
+
|
|
23
|
+
def register_after_success_hook(self, hook: AfterSuccessHook) -> None:
|
|
24
|
+
self.after_success_hooks.append(hook)
|
|
25
|
+
|
|
26
|
+
def register_after_error_hook(self, hook: AfterErrorHook) -> None:
|
|
27
|
+
self.after_error_hooks.append(hook)
|
|
28
|
+
|
|
29
|
+
def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]:
|
|
30
|
+
for hook in self.sdk_init_hooks:
|
|
31
|
+
base_url, client = hook.sdk_init(base_url, client)
|
|
32
|
+
return base_url, client
|
|
33
|
+
|
|
34
|
+
def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request:
|
|
35
|
+
for hook in self.before_request_hooks:
|
|
36
|
+
out = hook.before_request(hook_ctx, request)
|
|
37
|
+
if isinstance(out, Exception):
|
|
38
|
+
raise out
|
|
39
|
+
request = out
|
|
40
|
+
|
|
41
|
+
return request
|
|
42
|
+
|
|
43
|
+
def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response:
|
|
44
|
+
for hook in self.after_success_hooks:
|
|
45
|
+
out = hook.after_success(hook_ctx, response)
|
|
46
|
+
if isinstance(out, Exception):
|
|
47
|
+
raise out
|
|
48
|
+
response = out
|
|
49
|
+
return response
|
|
50
|
+
|
|
51
|
+
def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]:
|
|
52
|
+
for hook in self.after_error_hooks:
|
|
53
|
+
result = hook.after_error(hook_ctx, response, error)
|
|
54
|
+
if isinstance(result, Exception):
|
|
55
|
+
raise result
|
|
56
|
+
response, error = result
|
|
57
|
+
return response, error
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
from abc import ABC, abstractmethod
|
|
5
|
+
import httpx
|
|
6
|
+
from mistralai.httpclient import HttpClient
|
|
7
|
+
from typing import Any, Callable, List, Optional, Tuple, Union
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class HookContext:
|
|
11
|
+
operation_id: str
|
|
12
|
+
oauth2_scopes: Optional[List[str]] = None
|
|
13
|
+
security_source: Optional[Union[Any, Callable[[], Any]]] = None
|
|
14
|
+
|
|
15
|
+
def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]):
|
|
16
|
+
self.operation_id = operation_id
|
|
17
|
+
self.oauth2_scopes = oauth2_scopes
|
|
18
|
+
self.security_source = security_source
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class BeforeRequestContext(HookContext):
|
|
22
|
+
def __init__(self, hook_ctx: HookContext):
|
|
23
|
+
super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class AfterSuccessContext(HookContext):
|
|
27
|
+
def __init__(self, hook_ctx: HookContext):
|
|
28
|
+
super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class AfterErrorContext(HookContext):
|
|
33
|
+
def __init__(self, hook_ctx: HookContext):
|
|
34
|
+
super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class SDKInitHook(ABC):
|
|
38
|
+
@abstractmethod
|
|
39
|
+
def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]:
|
|
40
|
+
pass
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class BeforeRequestHook(ABC):
|
|
44
|
+
@abstractmethod
|
|
45
|
+
def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]:
|
|
46
|
+
pass
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AfterSuccessHook(ABC):
|
|
50
|
+
@abstractmethod
|
|
51
|
+
def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]:
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class AfterErrorHook(ABC):
|
|
56
|
+
@abstractmethod
|
|
57
|
+
def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]:
|
|
58
|
+
pass
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class Hooks(ABC):
|
|
62
|
+
@abstractmethod
|
|
63
|
+
def register_sdk_init_hook(self, hook: SDKInitHook):
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
@abstractmethod
|
|
67
|
+
def register_before_request_hook(self, hook: BeforeRequestHook):
|
|
68
|
+
pass
|
|
69
|
+
|
|
70
|
+
@abstractmethod
|
|
71
|
+
def register_after_success_hook(self, hook: AfterSuccessHook):
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
@abstractmethod
|
|
75
|
+
def register_after_error_hook(self, hook: AfterErrorHook):
|
|
76
|
+
pass
|
mistralai/async_client.py
CHANGED
|
@@ -1,384 +1,15 @@
|
|
|
1
|
-
import
|
|
2
|
-
import posixpath
|
|
3
|
-
from json import JSONDecodeError
|
|
4
|
-
from typing import Any, AsyncGenerator, Dict, List, Optional, Union
|
|
1
|
+
from typing import Optional
|
|
5
2
|
|
|
6
|
-
from
|
|
7
|
-
AsyncClient,
|
|
8
|
-
AsyncHTTPTransport,
|
|
9
|
-
ConnectError,
|
|
10
|
-
Limits,
|
|
11
|
-
RequestError,
|
|
12
|
-
Response,
|
|
13
|
-
)
|
|
3
|
+
from .client import MIGRATION_MESSAGE
|
|
14
4
|
|
|
15
|
-
from mistralai.client_base import ClientBase
|
|
16
|
-
from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES
|
|
17
|
-
from mistralai.exceptions import (
|
|
18
|
-
MistralAPIException,
|
|
19
|
-
MistralAPIStatusException,
|
|
20
|
-
MistralConnectionException,
|
|
21
|
-
MistralException,
|
|
22
|
-
)
|
|
23
|
-
from mistralai.files import FilesAsyncClient
|
|
24
|
-
from mistralai.jobs import JobsAsyncClient
|
|
25
|
-
from mistralai.models.chat_completion import (
|
|
26
|
-
ChatCompletionResponse,
|
|
27
|
-
ChatCompletionStreamResponse,
|
|
28
|
-
ResponseFormat,
|
|
29
|
-
ToolChoice,
|
|
30
|
-
)
|
|
31
|
-
from mistralai.models.embeddings import EmbeddingResponse
|
|
32
|
-
from mistralai.models.models import ModelDeleted, ModelList
|
|
33
5
|
|
|
34
|
-
|
|
35
|
-
class MistralAsyncClient(ClientBase):
|
|
6
|
+
class MistralAsyncClient:
|
|
36
7
|
def __init__(
|
|
37
8
|
self,
|
|
38
9
|
api_key: Optional[str] = None,
|
|
39
|
-
endpoint: str =
|
|
10
|
+
endpoint: str = "",
|
|
40
11
|
max_retries: int = 5,
|
|
41
12
|
timeout: int = 120,
|
|
42
13
|
max_concurrent_requests: int = 64,
|
|
43
14
|
):
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
self._client = AsyncClient(
|
|
47
|
-
follow_redirects=True,
|
|
48
|
-
timeout=timeout,
|
|
49
|
-
limits=Limits(max_connections=max_concurrent_requests),
|
|
50
|
-
transport=AsyncHTTPTransport(retries=max_retries),
|
|
51
|
-
)
|
|
52
|
-
self.files = FilesAsyncClient(self)
|
|
53
|
-
self.jobs = JobsAsyncClient(self)
|
|
54
|
-
|
|
55
|
-
async def close(self) -> None:
|
|
56
|
-
await self._client.aclose()
|
|
57
|
-
|
|
58
|
-
async def _check_response_status_codes(self, response: Response) -> None:
|
|
59
|
-
if response.status_code in RETRY_STATUS_CODES:
|
|
60
|
-
raise MistralAPIStatusException.from_response(
|
|
61
|
-
response,
|
|
62
|
-
message=f"Status: {response.status_code}. Message: {response.text}",
|
|
63
|
-
)
|
|
64
|
-
elif 400 <= response.status_code < 500:
|
|
65
|
-
if response.stream:
|
|
66
|
-
await response.aread()
|
|
67
|
-
raise MistralAPIException.from_response(
|
|
68
|
-
response,
|
|
69
|
-
message=f"Status: {response.status_code}. Message: {response.text}",
|
|
70
|
-
)
|
|
71
|
-
elif response.status_code >= 500:
|
|
72
|
-
if response.stream:
|
|
73
|
-
await response.aread()
|
|
74
|
-
raise MistralException(
|
|
75
|
-
message=f"Status: {response.status_code}. Message: {response.text}",
|
|
76
|
-
)
|
|
77
|
-
|
|
78
|
-
async def _check_streaming_response(self, response: Response) -> None:
|
|
79
|
-
await self._check_response_status_codes(response)
|
|
80
|
-
|
|
81
|
-
async def _check_response(self, response: Response) -> Dict[str, Any]:
|
|
82
|
-
await self._check_response_status_codes(response)
|
|
83
|
-
|
|
84
|
-
json_response: Dict[str, Any] = response.json()
|
|
85
|
-
|
|
86
|
-
if "object" not in json_response:
|
|
87
|
-
raise MistralException(message=f"Unexpected response: {json_response}")
|
|
88
|
-
if "error" == json_response["object"]: # has errors
|
|
89
|
-
raise MistralAPIException.from_response(
|
|
90
|
-
response,
|
|
91
|
-
message=json_response["message"],
|
|
92
|
-
)
|
|
93
|
-
|
|
94
|
-
return json_response
|
|
95
|
-
|
|
96
|
-
async def _request(
|
|
97
|
-
self,
|
|
98
|
-
method: str,
|
|
99
|
-
json: Optional[Dict[str, Any]],
|
|
100
|
-
path: str,
|
|
101
|
-
stream: bool = False,
|
|
102
|
-
attempt: int = 1,
|
|
103
|
-
data: Optional[Dict[str, Any]] = None,
|
|
104
|
-
**kwargs: Any,
|
|
105
|
-
) -> AsyncGenerator[Dict[str, Any], None]:
|
|
106
|
-
accept_header = "text/event-stream" if stream else "application/json"
|
|
107
|
-
headers = {
|
|
108
|
-
"Accept": accept_header,
|
|
109
|
-
"User-Agent": f"mistral-client-python/{self._version}",
|
|
110
|
-
"Authorization": f"Bearer {self._api_key}",
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
if json is not None:
|
|
114
|
-
headers["Content-Type"] = "application/json"
|
|
115
|
-
|
|
116
|
-
url = posixpath.join(self._endpoint, path)
|
|
117
|
-
|
|
118
|
-
self._logger.debug(f"Sending request: {method} {url} {json}")
|
|
119
|
-
|
|
120
|
-
response: Response
|
|
121
|
-
|
|
122
|
-
try:
|
|
123
|
-
if stream:
|
|
124
|
-
async with self._client.stream(
|
|
125
|
-
method,
|
|
126
|
-
url,
|
|
127
|
-
headers=headers,
|
|
128
|
-
json=json,
|
|
129
|
-
data=data,
|
|
130
|
-
**kwargs,
|
|
131
|
-
) as response:
|
|
132
|
-
await self._check_streaming_response(response)
|
|
133
|
-
|
|
134
|
-
async for line in response.aiter_lines():
|
|
135
|
-
json_streamed_response = self._process_line(line)
|
|
136
|
-
if json_streamed_response:
|
|
137
|
-
yield json_streamed_response
|
|
138
|
-
|
|
139
|
-
else:
|
|
140
|
-
response = await self._client.request(
|
|
141
|
-
method,
|
|
142
|
-
url,
|
|
143
|
-
headers=headers,
|
|
144
|
-
json=json,
|
|
145
|
-
data=data,
|
|
146
|
-
**kwargs,
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
yield await self._check_response(response)
|
|
150
|
-
|
|
151
|
-
except ConnectError as e:
|
|
152
|
-
raise MistralConnectionException(str(e)) from e
|
|
153
|
-
except RequestError as e:
|
|
154
|
-
raise MistralException(f"Unexpected exception ({e.__class__.__name__}): {e}") from e
|
|
155
|
-
except JSONDecodeError as e:
|
|
156
|
-
raise MistralAPIException.from_response(
|
|
157
|
-
response,
|
|
158
|
-
message=f"Failed to decode json body: {response.text}",
|
|
159
|
-
) from e
|
|
160
|
-
except MistralAPIStatusException as e:
|
|
161
|
-
attempt += 1
|
|
162
|
-
if attempt > self._max_retries:
|
|
163
|
-
raise MistralAPIStatusException.from_response(response, message=str(e)) from e
|
|
164
|
-
backoff = 2.0**attempt # exponential backoff
|
|
165
|
-
await asyncio.sleep(backoff)
|
|
166
|
-
|
|
167
|
-
# Retry as a generator
|
|
168
|
-
async for r in self._request(method, json, path, stream=stream, attempt=attempt):
|
|
169
|
-
yield r
|
|
170
|
-
|
|
171
|
-
async def chat(
|
|
172
|
-
self,
|
|
173
|
-
messages: List[Any],
|
|
174
|
-
model: Optional[str] = None,
|
|
175
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
176
|
-
temperature: Optional[float] = None,
|
|
177
|
-
max_tokens: Optional[int] = None,
|
|
178
|
-
top_p: Optional[float] = None,
|
|
179
|
-
random_seed: Optional[int] = None,
|
|
180
|
-
safe_mode: bool = False,
|
|
181
|
-
safe_prompt: bool = False,
|
|
182
|
-
tool_choice: Optional[Union[str, ToolChoice]] = None,
|
|
183
|
-
response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None,
|
|
184
|
-
) -> ChatCompletionResponse:
|
|
185
|
-
"""A asynchronous chat endpoint that returns a single response.
|
|
186
|
-
|
|
187
|
-
Args:
|
|
188
|
-
model (str): model the name of the model to chat with, e.g. mistral-tiny
|
|
189
|
-
messages (List[Any]): messages an array of messages to chat with, e.g.
|
|
190
|
-
[{role: 'user', content: 'What is the best French cheese?'}]
|
|
191
|
-
temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
|
|
192
|
-
max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
|
|
193
|
-
top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
|
|
194
|
-
Defaults to None.
|
|
195
|
-
random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
|
|
196
|
-
safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False.
|
|
197
|
-
safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False.
|
|
198
|
-
|
|
199
|
-
Returns:
|
|
200
|
-
ChatCompletionResponse: a response object containing the generated text.
|
|
201
|
-
"""
|
|
202
|
-
request = self._make_chat_request(
|
|
203
|
-
messages,
|
|
204
|
-
model,
|
|
205
|
-
tools=tools,
|
|
206
|
-
temperature=temperature,
|
|
207
|
-
max_tokens=max_tokens,
|
|
208
|
-
top_p=top_p,
|
|
209
|
-
random_seed=random_seed,
|
|
210
|
-
stream=False,
|
|
211
|
-
safe_prompt=safe_mode or safe_prompt,
|
|
212
|
-
tool_choice=tool_choice,
|
|
213
|
-
response_format=response_format,
|
|
214
|
-
)
|
|
215
|
-
|
|
216
|
-
single_response = self._request("post", request, "v1/chat/completions")
|
|
217
|
-
|
|
218
|
-
async for response in single_response:
|
|
219
|
-
return ChatCompletionResponse(**response)
|
|
220
|
-
|
|
221
|
-
raise MistralException("No response received")
|
|
222
|
-
|
|
223
|
-
async def chat_stream(
|
|
224
|
-
self,
|
|
225
|
-
messages: List[Any],
|
|
226
|
-
model: Optional[str] = None,
|
|
227
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
228
|
-
temperature: Optional[float] = None,
|
|
229
|
-
max_tokens: Optional[int] = None,
|
|
230
|
-
top_p: Optional[float] = None,
|
|
231
|
-
random_seed: Optional[int] = None,
|
|
232
|
-
safe_mode: bool = False,
|
|
233
|
-
safe_prompt: bool = False,
|
|
234
|
-
tool_choice: Optional[Union[str, ToolChoice]] = None,
|
|
235
|
-
response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None,
|
|
236
|
-
) -> AsyncGenerator[ChatCompletionStreamResponse, None]:
|
|
237
|
-
"""An Asynchronous chat endpoint that streams responses.
|
|
238
|
-
|
|
239
|
-
Args:
|
|
240
|
-
model (str): model the name of the model to chat with, e.g. mistral-tiny
|
|
241
|
-
messages (List[Any]): messages an array of messages to chat with, e.g.
|
|
242
|
-
[{role: 'user', content: 'What is the best French cheese?'}]
|
|
243
|
-
tools (Optional[List[Function]], optional): a list of tools to use.
|
|
244
|
-
temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
|
|
245
|
-
max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
|
|
246
|
-
top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
|
|
247
|
-
Defaults to None.
|
|
248
|
-
random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
|
|
249
|
-
safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False.
|
|
250
|
-
safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False.
|
|
251
|
-
|
|
252
|
-
Returns:
|
|
253
|
-
AsyncGenerator[ChatCompletionStreamResponse, None]:
|
|
254
|
-
An async generator that yields ChatCompletionStreamResponse objects.
|
|
255
|
-
"""
|
|
256
|
-
|
|
257
|
-
request = self._make_chat_request(
|
|
258
|
-
messages,
|
|
259
|
-
model,
|
|
260
|
-
tools=tools,
|
|
261
|
-
temperature=temperature,
|
|
262
|
-
max_tokens=max_tokens,
|
|
263
|
-
top_p=top_p,
|
|
264
|
-
random_seed=random_seed,
|
|
265
|
-
stream=True,
|
|
266
|
-
safe_prompt=safe_mode or safe_prompt,
|
|
267
|
-
tool_choice=tool_choice,
|
|
268
|
-
response_format=response_format,
|
|
269
|
-
)
|
|
270
|
-
async_response = self._request("post", request, "v1/chat/completions", stream=True)
|
|
271
|
-
|
|
272
|
-
async for json_response in async_response:
|
|
273
|
-
yield ChatCompletionStreamResponse(**json_response)
|
|
274
|
-
|
|
275
|
-
async def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingResponse:
|
|
276
|
-
"""An asynchronous embeddings endpoint that returns embeddings for a single, or batch of inputs
|
|
277
|
-
|
|
278
|
-
Args:
|
|
279
|
-
model (str): The embedding model to use, e.g. mistral-embed
|
|
280
|
-
input (Union[str, List[str]]): The input to embed,
|
|
281
|
-
e.g. ['What is the best French cheese?']
|
|
282
|
-
|
|
283
|
-
Returns:
|
|
284
|
-
EmbeddingResponse: A response object containing the embeddings.
|
|
285
|
-
"""
|
|
286
|
-
request = {"model": model, "input": input}
|
|
287
|
-
single_response = self._request("post", request, "v1/embeddings")
|
|
288
|
-
|
|
289
|
-
async for response in single_response:
|
|
290
|
-
return EmbeddingResponse(**response)
|
|
291
|
-
|
|
292
|
-
raise MistralException("No response received")
|
|
293
|
-
|
|
294
|
-
async def list_models(self) -> ModelList:
|
|
295
|
-
"""Returns a list of the available models
|
|
296
|
-
|
|
297
|
-
Returns:
|
|
298
|
-
ModelList: A response object containing the list of models.
|
|
299
|
-
"""
|
|
300
|
-
single_response = self._request("get", {}, "v1/models")
|
|
301
|
-
|
|
302
|
-
async for response in single_response:
|
|
303
|
-
return ModelList(**response)
|
|
304
|
-
|
|
305
|
-
raise MistralException("No response received")
|
|
306
|
-
|
|
307
|
-
async def delete_model(self, model_id: str) -> ModelDeleted:
|
|
308
|
-
single_response = self._request("delete", {}, f"v1/models/{model_id}")
|
|
309
|
-
|
|
310
|
-
async for response in single_response:
|
|
311
|
-
return ModelDeleted(**response)
|
|
312
|
-
|
|
313
|
-
raise MistralException("No response received")
|
|
314
|
-
|
|
315
|
-
async def completion(
|
|
316
|
-
self,
|
|
317
|
-
model: str,
|
|
318
|
-
prompt: str,
|
|
319
|
-
suffix: Optional[str] = None,
|
|
320
|
-
temperature: Optional[float] = None,
|
|
321
|
-
max_tokens: Optional[int] = None,
|
|
322
|
-
top_p: Optional[float] = None,
|
|
323
|
-
random_seed: Optional[int] = None,
|
|
324
|
-
stop: Optional[List[str]] = None,
|
|
325
|
-
) -> ChatCompletionResponse:
|
|
326
|
-
"""An asynchronous completion endpoint that returns a single response.
|
|
327
|
-
|
|
328
|
-
Args:
|
|
329
|
-
model (str): model the name of the model to get completions with, e.g. codestral-latest
|
|
330
|
-
prompt (str): the prompt to complete
|
|
331
|
-
suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion
|
|
332
|
-
temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
|
|
333
|
-
max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
|
|
334
|
-
top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
|
|
335
|
-
Defaults to None.
|
|
336
|
-
random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
|
|
337
|
-
stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n']
|
|
338
|
-
Returns:
|
|
339
|
-
Dict[str, Any]: a response object containing the generated text.
|
|
340
|
-
"""
|
|
341
|
-
request = self._make_completion_request(
|
|
342
|
-
prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop
|
|
343
|
-
)
|
|
344
|
-
single_response = self._request("post", request, "v1/fim/completions")
|
|
345
|
-
|
|
346
|
-
async for response in single_response:
|
|
347
|
-
return ChatCompletionResponse(**response)
|
|
348
|
-
|
|
349
|
-
raise MistralException("No response received")
|
|
350
|
-
|
|
351
|
-
async def completion_stream(
|
|
352
|
-
self,
|
|
353
|
-
model: str,
|
|
354
|
-
prompt: str,
|
|
355
|
-
suffix: Optional[str] = None,
|
|
356
|
-
temperature: Optional[float] = None,
|
|
357
|
-
max_tokens: Optional[int] = None,
|
|
358
|
-
top_p: Optional[float] = None,
|
|
359
|
-
random_seed: Optional[int] = None,
|
|
360
|
-
stop: Optional[List[str]] = None,
|
|
361
|
-
) -> AsyncGenerator[ChatCompletionStreamResponse, None]:
|
|
362
|
-
"""An asynchronous completion endpoint that returns a streaming response.
|
|
363
|
-
|
|
364
|
-
Args:
|
|
365
|
-
model (str): model the name of the model to get completions with, e.g. codestral-latest
|
|
366
|
-
prompt (str): the prompt to complete
|
|
367
|
-
suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion
|
|
368
|
-
temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
|
|
369
|
-
max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
|
|
370
|
-
top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
|
|
371
|
-
Defaults to None.
|
|
372
|
-
random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
|
|
373
|
-
stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n']
|
|
374
|
-
|
|
375
|
-
Returns:
|
|
376
|
-
Dict[str, Any]: a response object containing the generated text.
|
|
377
|
-
"""
|
|
378
|
-
request = self._make_completion_request(
|
|
379
|
-
prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop, stream=True
|
|
380
|
-
)
|
|
381
|
-
async_response = self._request("post", request, "v1/fim/completions", stream=True)
|
|
382
|
-
|
|
383
|
-
async for json_response in async_response:
|
|
384
|
-
yield ChatCompletionStreamResponse(**json_response)
|
|
15
|
+
raise NotImplementedError(MIGRATION_MESSAGE)
|