mistralai 1.9.11__py3-none-any.whl → 1.10.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_hooks/registration.py +5 -0
- mistralai/_hooks/tracing.py +75 -0
- mistralai/_version.py +2 -2
- mistralai/accesses.py +8 -8
- mistralai/agents.py +29 -17
- mistralai/chat.py +41 -29
- mistralai/classifiers.py +13 -1
- mistralai/conversations.py +294 -62
- mistralai/documents.py +19 -3
- mistralai/embeddings.py +13 -7
- mistralai/extra/README.md +1 -1
- mistralai/extra/mcp/auth.py +10 -11
- mistralai/extra/mcp/base.py +17 -16
- mistralai/extra/mcp/sse.py +13 -15
- mistralai/extra/mcp/stdio.py +5 -6
- mistralai/extra/observability/__init__.py +15 -0
- mistralai/extra/observability/otel.py +372 -0
- mistralai/extra/run/context.py +33 -43
- mistralai/extra/run/result.py +29 -30
- mistralai/extra/run/tools.py +34 -23
- mistralai/extra/struct_chat.py +15 -8
- mistralai/extra/utils/response_format.py +5 -3
- mistralai/files.py +6 -0
- mistralai/fim.py +17 -5
- mistralai/mistral_agents.py +229 -1
- mistralai/mistral_jobs.py +39 -13
- mistralai/models/__init__.py +99 -3
- mistralai/models/agent.py +15 -2
- mistralai/models/agentconversation.py +11 -3
- mistralai/models/agentcreationrequest.py +6 -2
- mistralai/models/agents_api_v1_agents_deleteop.py +16 -0
- mistralai/models/agents_api_v1_agents_getop.py +40 -3
- mistralai/models/agents_api_v1_agents_listop.py +72 -2
- mistralai/models/agents_api_v1_conversations_deleteop.py +18 -0
- mistralai/models/agents_api_v1_conversations_listop.py +39 -2
- mistralai/models/agentscompletionrequest.py +21 -6
- mistralai/models/agentscompletionstreamrequest.py +21 -6
- mistralai/models/agentupdaterequest.py +18 -2
- mistralai/models/audioencoding.py +13 -0
- mistralai/models/audioformat.py +19 -0
- mistralai/models/audiotranscriptionrequest.py +2 -0
- mistralai/models/batchjobin.py +26 -5
- mistralai/models/batchjobout.py +5 -0
- mistralai/models/batchrequest.py +48 -0
- mistralai/models/chatcompletionrequest.py +22 -5
- mistralai/models/chatcompletionstreamrequest.py +22 -5
- mistralai/models/classificationrequest.py +37 -3
- mistralai/models/conversationrequest.py +15 -4
- mistralai/models/conversationrestartrequest.py +50 -2
- mistralai/models/conversationrestartstreamrequest.py +50 -2
- mistralai/models/conversationstreamrequest.py +15 -4
- mistralai/models/documentout.py +26 -10
- mistralai/models/documentupdatein.py +24 -3
- mistralai/models/embeddingrequest.py +19 -11
- mistralai/models/files_api_routes_list_filesop.py +7 -0
- mistralai/models/fimcompletionrequest.py +8 -9
- mistralai/models/fimcompletionstreamrequest.py +8 -9
- mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
- mistralai/models/libraries_documents_list_v1op.py +15 -2
- mistralai/models/libraryout.py +10 -7
- mistralai/models/listfilesout.py +35 -4
- mistralai/models/modelcapabilities.py +13 -4
- mistralai/models/modelconversation.py +8 -2
- mistralai/models/ocrpageobject.py +26 -5
- mistralai/models/ocrrequest.py +17 -1
- mistralai/models/ocrtableobject.py +31 -0
- mistralai/models/prediction.py +4 -0
- mistralai/models/requestsource.py +7 -0
- mistralai/models/responseformat.py +4 -2
- mistralai/models/responseformats.py +0 -1
- mistralai/models/sharingdelete.py +36 -5
- mistralai/models/sharingin.py +36 -5
- mistralai/models/sharingout.py +3 -3
- mistralai/models/toolexecutiondeltaevent.py +13 -4
- mistralai/models/toolexecutiondoneevent.py +13 -4
- mistralai/models/toolexecutionentry.py +9 -4
- mistralai/models/toolexecutionstartedevent.py +13 -4
- mistralai/models/toolfilechunk.py +11 -4
- mistralai/models/toolreferencechunk.py +13 -4
- mistralai/models_.py +2 -14
- mistralai/ocr.py +18 -0
- mistralai/transcriptions.py +4 -4
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/METADATA +162 -152
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/RECORD +168 -144
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/WHEEL +1 -1
- mistralai_azure/_version.py +3 -3
- mistralai_azure/basesdk.py +15 -5
- mistralai_azure/chat.py +59 -98
- mistralai_azure/models/__init__.py +50 -3
- mistralai_azure/models/chatcompletionrequest.py +16 -4
- mistralai_azure/models/chatcompletionstreamrequest.py +16 -4
- mistralai_azure/models/httpvalidationerror.py +11 -6
- mistralai_azure/models/mistralazureerror.py +26 -0
- mistralai_azure/models/no_response_error.py +13 -0
- mistralai_azure/models/prediction.py +4 -0
- mistralai_azure/models/responseformat.py +4 -2
- mistralai_azure/models/responseformats.py +0 -1
- mistralai_azure/models/responsevalidationerror.py +25 -0
- mistralai_azure/models/sdkerror.py +30 -14
- mistralai_azure/models/systemmessage.py +7 -3
- mistralai_azure/models/systemmessagecontentchunks.py +21 -0
- mistralai_azure/models/thinkchunk.py +35 -0
- mistralai_azure/ocr.py +15 -36
- mistralai_azure/utils/__init__.py +18 -5
- mistralai_azure/utils/eventstreaming.py +10 -0
- mistralai_azure/utils/serializers.py +3 -2
- mistralai_azure/utils/unmarshal_json_response.py +24 -0
- mistralai_gcp/_hooks/types.py +7 -0
- mistralai_gcp/_version.py +4 -4
- mistralai_gcp/basesdk.py +27 -25
- mistralai_gcp/chat.py +75 -98
- mistralai_gcp/fim.py +39 -74
- mistralai_gcp/httpclient.py +6 -16
- mistralai_gcp/models/__init__.py +321 -116
- mistralai_gcp/models/assistantmessage.py +1 -1
- mistralai_gcp/models/chatcompletionrequest.py +36 -7
- mistralai_gcp/models/chatcompletionresponse.py +6 -6
- mistralai_gcp/models/chatcompletionstreamrequest.py +36 -7
- mistralai_gcp/models/completionresponsestreamchoice.py +1 -1
- mistralai_gcp/models/deltamessage.py +1 -1
- mistralai_gcp/models/fimcompletionrequest.py +3 -9
- mistralai_gcp/models/fimcompletionresponse.py +6 -6
- mistralai_gcp/models/fimcompletionstreamrequest.py +3 -9
- mistralai_gcp/models/httpvalidationerror.py +11 -6
- mistralai_gcp/models/imageurl.py +1 -1
- mistralai_gcp/models/jsonschema.py +1 -1
- mistralai_gcp/models/mistralgcperror.py +26 -0
- mistralai_gcp/models/mistralpromptmode.py +8 -0
- mistralai_gcp/models/no_response_error.py +13 -0
- mistralai_gcp/models/prediction.py +4 -0
- mistralai_gcp/models/responseformat.py +5 -3
- mistralai_gcp/models/responseformats.py +0 -1
- mistralai_gcp/models/responsevalidationerror.py +25 -0
- mistralai_gcp/models/sdkerror.py +30 -14
- mistralai_gcp/models/systemmessage.py +7 -3
- mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
- mistralai_gcp/models/thinkchunk.py +35 -0
- mistralai_gcp/models/toolmessage.py +1 -1
- mistralai_gcp/models/usageinfo.py +71 -8
- mistralai_gcp/models/usermessage.py +1 -1
- mistralai_gcp/sdk.py +12 -10
- mistralai_gcp/sdkconfiguration.py +0 -7
- mistralai_gcp/types/basemodel.py +3 -3
- mistralai_gcp/utils/__init__.py +143 -45
- mistralai_gcp/utils/datetimes.py +23 -0
- mistralai_gcp/utils/enums.py +67 -27
- mistralai_gcp/utils/eventstreaming.py +10 -0
- mistralai_gcp/utils/forms.py +49 -28
- mistralai_gcp/utils/serializers.py +33 -3
- mistralai_gcp/utils/unmarshal_json_response.py +24 -0
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -2,7 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .validationerror import ValidationError
|
|
5
|
-
|
|
5
|
+
import httpx
|
|
6
|
+
from mistralai_azure.models import MistralAzureError
|
|
6
7
|
from mistralai_azure.types import BaseModel
|
|
7
8
|
from typing import List, Optional
|
|
8
9
|
|
|
@@ -11,11 +12,15 @@ class HTTPValidationErrorData(BaseModel):
|
|
|
11
12
|
detail: Optional[List[ValidationError]] = None
|
|
12
13
|
|
|
13
14
|
|
|
14
|
-
class HTTPValidationError(
|
|
15
|
+
class HTTPValidationError(MistralAzureError):
|
|
15
16
|
data: HTTPValidationErrorData
|
|
16
17
|
|
|
17
|
-
def __init__(
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
data: HTTPValidationErrorData,
|
|
21
|
+
raw_response: httpx.Response,
|
|
22
|
+
body: Optional[str] = None,
|
|
23
|
+
):
|
|
24
|
+
message = body or raw_response.text
|
|
25
|
+
super().__init__(message, raw_response, body)
|
|
18
26
|
self.data = data
|
|
19
|
-
|
|
20
|
-
def __str__(self) -> str:
|
|
21
|
-
return utils.marshal_json(self.data, HTTPValidationErrorData)
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class MistralAzureError(Exception):
|
|
8
|
+
"""The base class for all HTTP error responses."""
|
|
9
|
+
|
|
10
|
+
message: str
|
|
11
|
+
status_code: int
|
|
12
|
+
body: str
|
|
13
|
+
headers: httpx.Headers
|
|
14
|
+
raw_response: httpx.Response
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self, message: str, raw_response: httpx.Response, body: Optional[str] = None
|
|
18
|
+
):
|
|
19
|
+
self.message = message
|
|
20
|
+
self.status_code = raw_response.status_code
|
|
21
|
+
self.body = body if body is not None else raw_response.text
|
|
22
|
+
self.headers = raw_response.headers
|
|
23
|
+
self.raw_response = raw_response
|
|
24
|
+
|
|
25
|
+
def __str__(self):
|
|
26
|
+
return self.message
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
class NoResponseError(Exception):
|
|
4
|
+
"""Error raised when no HTTP response is received from the server."""
|
|
5
|
+
|
|
6
|
+
message: str
|
|
7
|
+
|
|
8
|
+
def __init__(self, message: str = "No response received"):
|
|
9
|
+
self.message = message
|
|
10
|
+
super().__init__(message)
|
|
11
|
+
|
|
12
|
+
def __str__(self):
|
|
13
|
+
return self.message
|
|
@@ -10,11 +10,15 @@ from typing_extensions import Annotated, NotRequired, TypedDict
|
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class PredictionTypedDict(TypedDict):
|
|
13
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
14
|
+
|
|
13
15
|
type: Literal["content"]
|
|
14
16
|
content: NotRequired[str]
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
class Prediction(BaseModel):
|
|
20
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
21
|
+
|
|
18
22
|
TYPE: Annotated[
|
|
19
23
|
Annotated[
|
|
20
24
|
Optional[Literal["content"]], AfterValidator(validate_const("content"))
|
|
@@ -16,14 +16,16 @@ from typing_extensions import NotRequired, TypedDict
|
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
class ResponseFormatTypedDict(TypedDict):
|
|
19
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
20
|
+
|
|
19
21
|
type: NotRequired[ResponseFormats]
|
|
20
|
-
r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message."""
|
|
21
22
|
json_schema: NotRequired[Nullable[JSONSchemaTypedDict]]
|
|
22
23
|
|
|
23
24
|
|
|
24
25
|
class ResponseFormat(BaseModel):
|
|
26
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
27
|
+
|
|
25
28
|
type: Optional[ResponseFormats] = None
|
|
26
|
-
r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message."""
|
|
27
29
|
|
|
28
30
|
json_schema: OptionalNullable[JSONSchema] = UNSET
|
|
29
31
|
|
|
@@ -5,4 +5,3 @@ from typing import Literal
|
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
ResponseFormats = Literal["text", "json_object", "json_schema"]
|
|
8
|
-
r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message."""
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
from mistralai_azure.models import MistralAzureError
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ResponseValidationError(MistralAzureError):
|
|
10
|
+
"""Error raised when there is a type mismatch between the response data and the expected Pydantic model."""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
message: str,
|
|
15
|
+
raw_response: httpx.Response,
|
|
16
|
+
cause: Exception,
|
|
17
|
+
body: Optional[str] = None,
|
|
18
|
+
):
|
|
19
|
+
message = f"{message}: {cause}"
|
|
20
|
+
super().__init__(message, raw_response, body)
|
|
21
|
+
|
|
22
|
+
@property
|
|
23
|
+
def cause(self):
|
|
24
|
+
"""Normally the Pydantic ValidationError"""
|
|
25
|
+
return self.__cause__
|
|
@@ -1,22 +1,38 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
|
-
from dataclasses import dataclass
|
|
4
|
-
from typing import Optional
|
|
5
3
|
import httpx
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
from mistralai_azure.models import MistralAzureError
|
|
7
|
+
|
|
8
|
+
MAX_MESSAGE_LEN = 10_000
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SDKError(MistralAzureError):
|
|
12
|
+
"""The fallback error class if no more specific error class is matched."""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self, message: str, raw_response: httpx.Response, body: Optional[str] = None
|
|
16
|
+
):
|
|
17
|
+
body_display = body or raw_response.text or '""'
|
|
6
18
|
|
|
19
|
+
if message:
|
|
20
|
+
message += ": "
|
|
21
|
+
message += f"Status {raw_response.status_code}"
|
|
7
22
|
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
23
|
+
headers = raw_response.headers
|
|
24
|
+
content_type = headers.get("content-type", '""')
|
|
25
|
+
if content_type != "application/json":
|
|
26
|
+
if " " in content_type:
|
|
27
|
+
content_type = f'"{content_type}"'
|
|
28
|
+
message += f" Content-Type {content_type}"
|
|
11
29
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
30
|
+
if len(body_display) > MAX_MESSAGE_LEN:
|
|
31
|
+
truncated = body_display[:MAX_MESSAGE_LEN]
|
|
32
|
+
remaining = len(body_display) - MAX_MESSAGE_LEN
|
|
33
|
+
body_display = f"{truncated}...and {remaining} more chars"
|
|
16
34
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
if len(self.body) > 0:
|
|
20
|
-
body = f"\n{self.body}"
|
|
35
|
+
message += f". Body: {body_display}"
|
|
36
|
+
message = message.strip()
|
|
21
37
|
|
|
22
|
-
|
|
38
|
+
super().__init__(message, raw_response, body)
|
|
@@ -1,19 +1,23 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from .
|
|
4
|
+
from .systemmessagecontentchunks import (
|
|
5
|
+
SystemMessageContentChunks,
|
|
6
|
+
SystemMessageContentChunksTypedDict,
|
|
7
|
+
)
|
|
5
8
|
from mistralai_azure.types import BaseModel
|
|
6
9
|
from typing import List, Literal, Optional, Union
|
|
7
10
|
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
8
11
|
|
|
9
12
|
|
|
10
13
|
SystemMessageContentTypedDict = TypeAliasType(
|
|
11
|
-
"SystemMessageContentTypedDict",
|
|
14
|
+
"SystemMessageContentTypedDict",
|
|
15
|
+
Union[str, List[SystemMessageContentChunksTypedDict]],
|
|
12
16
|
)
|
|
13
17
|
|
|
14
18
|
|
|
15
19
|
SystemMessageContent = TypeAliasType(
|
|
16
|
-
"SystemMessageContent", Union[str, List[
|
|
20
|
+
"SystemMessageContent", Union[str, List[SystemMessageContentChunks]]
|
|
17
21
|
)
|
|
18
22
|
|
|
19
23
|
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .textchunk import TextChunk, TextChunkTypedDict
|
|
5
|
+
from .thinkchunk import ThinkChunk, ThinkChunkTypedDict
|
|
6
|
+
from mistralai_azure.utils import get_discriminator
|
|
7
|
+
from pydantic import Discriminator, Tag
|
|
8
|
+
from typing import Union
|
|
9
|
+
from typing_extensions import Annotated, TypeAliasType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
SystemMessageContentChunksTypedDict = TypeAliasType(
|
|
13
|
+
"SystemMessageContentChunksTypedDict",
|
|
14
|
+
Union[TextChunkTypedDict, ThinkChunkTypedDict],
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
SystemMessageContentChunks = Annotated[
|
|
19
|
+
Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]],
|
|
20
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
21
|
+
]
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict
|
|
5
|
+
from .textchunk import TextChunk, TextChunkTypedDict
|
|
6
|
+
from mistralai_azure.types import BaseModel
|
|
7
|
+
from typing import List, Literal, Optional, Union
|
|
8
|
+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
ThinkingTypedDict = TypeAliasType(
|
|
12
|
+
"ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict]
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk])
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
ThinkChunkType = Literal["thinking"]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ThinkChunkTypedDict(TypedDict):
|
|
23
|
+
thinking: List[ThinkingTypedDict]
|
|
24
|
+
closed: NotRequired[bool]
|
|
25
|
+
r"""Whether the thinking chunk is closed or not. Currently only used for prefixing."""
|
|
26
|
+
type: NotRequired[ThinkChunkType]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ThinkChunk(BaseModel):
|
|
30
|
+
thinking: List[Thinking]
|
|
31
|
+
|
|
32
|
+
closed: Optional[bool] = None
|
|
33
|
+
r"""Whether the thinking chunk is closed or not. Currently only used for prefixing."""
|
|
34
|
+
|
|
35
|
+
type: Optional[ThinkChunkType] = "thinking"
|
mistralai_azure/ocr.py
CHANGED
|
@@ -4,6 +4,7 @@ from .basesdk import BaseSDK
|
|
|
4
4
|
from mistralai_azure import models, utils
|
|
5
5
|
from mistralai_azure._hooks import HookContext
|
|
6
6
|
from mistralai_azure.types import Nullable, OptionalNullable, UNSET
|
|
7
|
+
from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response
|
|
7
8
|
from typing import Any, List, Mapping, Optional, Union
|
|
8
9
|
|
|
9
10
|
|
|
@@ -113,31 +114,20 @@ class Ocr(BaseSDK):
|
|
|
113
114
|
|
|
114
115
|
response_data: Any = None
|
|
115
116
|
if utils.match_response(http_res, "200", "application/json"):
|
|
116
|
-
return
|
|
117
|
+
return unmarshal_json_response(Optional[models.OCRResponse], http_res)
|
|
117
118
|
if utils.match_response(http_res, "422", "application/json"):
|
|
118
|
-
response_data =
|
|
119
|
-
|
|
119
|
+
response_data = unmarshal_json_response(
|
|
120
|
+
models.HTTPValidationErrorData, http_res
|
|
120
121
|
)
|
|
121
|
-
raise models.HTTPValidationError(
|
|
122
|
+
raise models.HTTPValidationError(response_data, http_res)
|
|
122
123
|
if utils.match_response(http_res, "4XX", "*"):
|
|
123
124
|
http_res_text = utils.stream_to_text(http_res)
|
|
124
|
-
raise models.SDKError(
|
|
125
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
126
|
-
)
|
|
125
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
127
126
|
if utils.match_response(http_res, "5XX", "*"):
|
|
128
127
|
http_res_text = utils.stream_to_text(http_res)
|
|
129
|
-
raise models.SDKError(
|
|
130
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
131
|
-
)
|
|
128
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
132
129
|
|
|
133
|
-
|
|
134
|
-
http_res_text = utils.stream_to_text(http_res)
|
|
135
|
-
raise models.SDKError(
|
|
136
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
137
|
-
http_res.status_code,
|
|
138
|
-
http_res_text,
|
|
139
|
-
http_res,
|
|
140
|
-
)
|
|
130
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
141
131
|
|
|
142
132
|
async def process_async(
|
|
143
133
|
self,
|
|
@@ -244,28 +234,17 @@ class Ocr(BaseSDK):
|
|
|
244
234
|
|
|
245
235
|
response_data: Any = None
|
|
246
236
|
if utils.match_response(http_res, "200", "application/json"):
|
|
247
|
-
return
|
|
237
|
+
return unmarshal_json_response(Optional[models.OCRResponse], http_res)
|
|
248
238
|
if utils.match_response(http_res, "422", "application/json"):
|
|
249
|
-
response_data =
|
|
250
|
-
|
|
239
|
+
response_data = unmarshal_json_response(
|
|
240
|
+
models.HTTPValidationErrorData, http_res
|
|
251
241
|
)
|
|
252
|
-
raise models.HTTPValidationError(
|
|
242
|
+
raise models.HTTPValidationError(response_data, http_res)
|
|
253
243
|
if utils.match_response(http_res, "4XX", "*"):
|
|
254
244
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
255
|
-
raise models.SDKError(
|
|
256
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
257
|
-
)
|
|
245
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
258
246
|
if utils.match_response(http_res, "5XX", "*"):
|
|
259
247
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
260
|
-
raise models.SDKError(
|
|
261
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
262
|
-
)
|
|
248
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
263
249
|
|
|
264
|
-
|
|
265
|
-
http_res_text = await utils.stream_to_text_async(http_res)
|
|
266
|
-
raise models.SDKError(
|
|
267
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
268
|
-
http_res.status_code,
|
|
269
|
-
http_res_text,
|
|
270
|
-
http_res,
|
|
271
|
-
)
|
|
250
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
@@ -2,6 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
from typing import TYPE_CHECKING
|
|
4
4
|
from importlib import import_module
|
|
5
|
+
import builtins
|
|
6
|
+
import sys
|
|
5
7
|
|
|
6
8
|
if TYPE_CHECKING:
|
|
7
9
|
from .annotations import get_discriminator
|
|
@@ -158,6 +160,18 @@ _dynamic_imports: dict[str, str] = {
|
|
|
158
160
|
}
|
|
159
161
|
|
|
160
162
|
|
|
163
|
+
def dynamic_import(modname, retries=3):
|
|
164
|
+
for attempt in range(retries):
|
|
165
|
+
try:
|
|
166
|
+
return import_module(modname, __package__)
|
|
167
|
+
except KeyError:
|
|
168
|
+
# Clear any half-initialized module and retry
|
|
169
|
+
sys.modules.pop(modname, None)
|
|
170
|
+
if attempt == retries - 1:
|
|
171
|
+
break
|
|
172
|
+
raise KeyError(f"Failed to import module '{modname}' after {retries} attempts")
|
|
173
|
+
|
|
174
|
+
|
|
161
175
|
def __getattr__(attr_name: str) -> object:
|
|
162
176
|
module_name = _dynamic_imports.get(attr_name)
|
|
163
177
|
if module_name is None:
|
|
@@ -166,9 +180,8 @@ def __getattr__(attr_name: str) -> object:
|
|
|
166
180
|
)
|
|
167
181
|
|
|
168
182
|
try:
|
|
169
|
-
module =
|
|
170
|
-
|
|
171
|
-
return result
|
|
183
|
+
module = dynamic_import(module_name)
|
|
184
|
+
return getattr(module, attr_name)
|
|
172
185
|
except ImportError as e:
|
|
173
186
|
raise ImportError(
|
|
174
187
|
f"Failed to import {attr_name} from {module_name}: {e}"
|
|
@@ -180,5 +193,5 @@ def __getattr__(attr_name: str) -> object:
|
|
|
180
193
|
|
|
181
194
|
|
|
182
195
|
def __dir__():
|
|
183
|
-
lazy_attrs = list(_dynamic_imports.keys())
|
|
184
|
-
return sorted(lazy_attrs)
|
|
196
|
+
lazy_attrs = builtins.list(_dynamic_imports.keys())
|
|
197
|
+
return builtins.sorted(lazy_attrs)
|
|
@@ -17,6 +17,9 @@ T = TypeVar("T")
|
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
class EventStream(Generic[T]):
|
|
20
|
+
# Holds a reference to the SDK client to avoid it being garbage collected
|
|
21
|
+
# and cause termination of the underlying httpx client.
|
|
22
|
+
client_ref: Optional[object]
|
|
20
23
|
response: httpx.Response
|
|
21
24
|
generator: Generator[T, None, None]
|
|
22
25
|
|
|
@@ -25,9 +28,11 @@ class EventStream(Generic[T]):
|
|
|
25
28
|
response: httpx.Response,
|
|
26
29
|
decoder: Callable[[str], T],
|
|
27
30
|
sentinel: Optional[str] = None,
|
|
31
|
+
client_ref: Optional[object] = None,
|
|
28
32
|
):
|
|
29
33
|
self.response = response
|
|
30
34
|
self.generator = stream_events(response, decoder, sentinel)
|
|
35
|
+
self.client_ref = client_ref
|
|
31
36
|
|
|
32
37
|
def __iter__(self):
|
|
33
38
|
return self
|
|
@@ -43,6 +48,9 @@ class EventStream(Generic[T]):
|
|
|
43
48
|
|
|
44
49
|
|
|
45
50
|
class EventStreamAsync(Generic[T]):
|
|
51
|
+
# Holds a reference to the SDK client to avoid it being garbage collected
|
|
52
|
+
# and cause termination of the underlying httpx client.
|
|
53
|
+
client_ref: Optional[object]
|
|
46
54
|
response: httpx.Response
|
|
47
55
|
generator: AsyncGenerator[T, None]
|
|
48
56
|
|
|
@@ -51,9 +59,11 @@ class EventStreamAsync(Generic[T]):
|
|
|
51
59
|
response: httpx.Response,
|
|
52
60
|
decoder: Callable[[str], T],
|
|
53
61
|
sentinel: Optional[str] = None,
|
|
62
|
+
client_ref: Optional[object] = None,
|
|
54
63
|
):
|
|
55
64
|
self.response = response
|
|
56
65
|
self.generator = stream_events_async(response, decoder, sentinel)
|
|
66
|
+
self.client_ref = client_ref
|
|
57
67
|
|
|
58
68
|
def __aiter__(self):
|
|
59
69
|
return self
|
|
@@ -192,7 +192,9 @@ def is_union(obj: object) -> bool:
|
|
|
192
192
|
"""
|
|
193
193
|
Returns True if the given object is a typing.Union or typing_extensions.Union.
|
|
194
194
|
"""
|
|
195
|
-
return any(
|
|
195
|
+
return any(
|
|
196
|
+
obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union")
|
|
197
|
+
)
|
|
196
198
|
|
|
197
199
|
|
|
198
200
|
def stream_to_text(stream: httpx.Response) -> str:
|
|
@@ -245,4 +247,3 @@ def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]:
|
|
|
245
247
|
f"Neither typing nor typing_extensions has an object called {name!r}"
|
|
246
248
|
)
|
|
247
249
|
return result
|
|
248
|
-
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Optional
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from .serializers import unmarshal_json
|
|
8
|
+
from mistralai_azure import models
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def unmarshal_json_response(
|
|
12
|
+
typ: Any, http_res: httpx.Response, body: Optional[str] = None
|
|
13
|
+
) -> Any:
|
|
14
|
+
if body is None:
|
|
15
|
+
body = http_res.text
|
|
16
|
+
try:
|
|
17
|
+
return unmarshal_json(body, typ)
|
|
18
|
+
except Exception as e:
|
|
19
|
+
raise models.ResponseValidationError(
|
|
20
|
+
"Response validation failed",
|
|
21
|
+
http_res,
|
|
22
|
+
e,
|
|
23
|
+
body,
|
|
24
|
+
) from e
|
mistralai_gcp/_hooks/types.py
CHANGED
|
@@ -3,10 +3,12 @@
|
|
|
3
3
|
from abc import ABC, abstractmethod
|
|
4
4
|
import httpx
|
|
5
5
|
from mistralai_gcp.httpclient import HttpClient
|
|
6
|
+
from mistralai_gcp.sdkconfiguration import SDKConfiguration
|
|
6
7
|
from typing import Any, Callable, List, Optional, Tuple, Union
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
class HookContext:
|
|
11
|
+
config: SDKConfiguration
|
|
10
12
|
base_url: str
|
|
11
13
|
operation_id: str
|
|
12
14
|
oauth2_scopes: Optional[List[str]] = None
|
|
@@ -14,11 +16,13 @@ class HookContext:
|
|
|
14
16
|
|
|
15
17
|
def __init__(
|
|
16
18
|
self,
|
|
19
|
+
config: SDKConfiguration,
|
|
17
20
|
base_url: str,
|
|
18
21
|
operation_id: str,
|
|
19
22
|
oauth2_scopes: Optional[List[str]],
|
|
20
23
|
security_source: Optional[Union[Any, Callable[[], Any]]],
|
|
21
24
|
):
|
|
25
|
+
self.config = config
|
|
22
26
|
self.base_url = base_url
|
|
23
27
|
self.operation_id = operation_id
|
|
24
28
|
self.oauth2_scopes = oauth2_scopes
|
|
@@ -28,6 +32,7 @@ class HookContext:
|
|
|
28
32
|
class BeforeRequestContext(HookContext):
|
|
29
33
|
def __init__(self, hook_ctx: HookContext):
|
|
30
34
|
super().__init__(
|
|
35
|
+
hook_ctx.config,
|
|
31
36
|
hook_ctx.base_url,
|
|
32
37
|
hook_ctx.operation_id,
|
|
33
38
|
hook_ctx.oauth2_scopes,
|
|
@@ -38,6 +43,7 @@ class BeforeRequestContext(HookContext):
|
|
|
38
43
|
class AfterSuccessContext(HookContext):
|
|
39
44
|
def __init__(self, hook_ctx: HookContext):
|
|
40
45
|
super().__init__(
|
|
46
|
+
hook_ctx.config,
|
|
41
47
|
hook_ctx.base_url,
|
|
42
48
|
hook_ctx.operation_id,
|
|
43
49
|
hook_ctx.oauth2_scopes,
|
|
@@ -48,6 +54,7 @@ class AfterSuccessContext(HookContext):
|
|
|
48
54
|
class AfterErrorContext(HookContext):
|
|
49
55
|
def __init__(self, hook_ctx: HookContext):
|
|
50
56
|
super().__init__(
|
|
57
|
+
hook_ctx.config,
|
|
51
58
|
hook_ctx.base_url,
|
|
52
59
|
hook_ctx.operation_id,
|
|
53
60
|
hook_ctx.oauth2_scopes,
|
mistralai_gcp/_version.py
CHANGED
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
import importlib.metadata
|
|
4
4
|
|
|
5
5
|
__title__: str = "mistralai-gcp"
|
|
6
|
-
__version__: str = "1.
|
|
7
|
-
__openapi_doc_version__: str = "0.0
|
|
8
|
-
__gen_version__: str = "2.
|
|
9
|
-
__user_agent__: str = "speakeasy-sdk/python 1.
|
|
6
|
+
__version__: str = "1.7.0"
|
|
7
|
+
__openapi_doc_version__: str = "1.0.0"
|
|
8
|
+
__gen_version__: str = "2.687.13"
|
|
9
|
+
__user_agent__: str = "speakeasy-sdk/python 1.7.0 2.687.13 1.0.0 mistralai-gcp"
|
|
10
10
|
|
|
11
11
|
try:
|
|
12
12
|
if __package__ is not None:
|