mistralai 1.2.2__py3-none-any.whl → 1.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_version.py +1 -1
- mistralai/agents.py +5 -5
- mistralai/chat.py +5 -5
- mistralai/files.py +166 -0
- mistralai/fim.py +5 -5
- mistralai/httpclient.py +6 -0
- mistralai/jobs.py +2 -2
- mistralai/models/__init__.py +22 -3
- mistralai/models/agentscompletionrequest.py +23 -11
- mistralai/models/agentscompletionstreamrequest.py +23 -13
- mistralai/models/apiendpoint.py +11 -3
- mistralai/models/assistantmessage.py +7 -3
- mistralai/models/batchjobin.py +4 -2
- mistralai/models/chatclassificationrequest.py +26 -17
- mistralai/models/chatcompletionrequest.py +19 -11
- mistralai/models/chatcompletionstreamrequest.py +23 -13
- mistralai/models/classificationrequest.py +7 -3
- mistralai/models/contentchunk.py +9 -3
- mistralai/models/deltamessage.py +5 -3
- mistralai/models/detailedjobout.py +2 -3
- mistralai/models/embeddingrequest.py +3 -3
- mistralai/models/files_api_routes_get_signed_urlop.py +25 -0
- mistralai/models/filesignedurl.py +13 -0
- mistralai/models/fimcompletionrequest.py +7 -3
- mistralai/models/fimcompletionstreamrequest.py +7 -3
- mistralai/models/functioncall.py +3 -3
- mistralai/models/imageurlchunk.py +9 -14
- mistralai/models/jobin.py +2 -3
- mistralai/models/jobout.py +2 -3
- mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +9 -4
- mistralai/models/modellist.py +4 -2
- mistralai/models/referencechunk.py +20 -0
- mistralai/models/retrieve_model_v1_models_model_id_getop.py +5 -4
- mistralai/models/systemmessage.py +7 -3
- mistralai/models/textchunk.py +3 -9
- mistralai/models/toolmessage.py +14 -5
- mistralai/models/usermessage.py +5 -3
- mistralai/models/validationerror.py +3 -3
- mistralai/sdk.py +14 -0
- mistralai/sdkconfiguration.py +3 -3
- mistralai/utils/annotations.py +42 -17
- mistralai/utils/eventstreaming.py +61 -1
- {mistralai-1.2.2.dist-info → mistralai-1.2.4.dist-info}/METADATA +181 -176
- {mistralai-1.2.2.dist-info → mistralai-1.2.4.dist-info}/RECORD +88 -84
- mistralai_azure/_version.py +1 -1
- mistralai_azure/chat.py +5 -5
- mistralai_azure/httpclient.py +6 -0
- mistralai_azure/models/__init__.py +13 -1
- mistralai_azure/models/assistantmessage.py +7 -3
- mistralai_azure/models/chatcompletionrequest.py +23 -11
- mistralai_azure/models/chatcompletionstreamrequest.py +19 -13
- mistralai_azure/models/contentchunk.py +14 -2
- mistralai_azure/models/deltamessage.py +5 -3
- mistralai_azure/models/functioncall.py +3 -3
- mistralai_azure/models/referencechunk.py +20 -0
- mistralai_azure/models/systemmessage.py +7 -3
- mistralai_azure/models/textchunk.py +3 -9
- mistralai_azure/models/toolmessage.py +14 -5
- mistralai_azure/models/usermessage.py +5 -3
- mistralai_azure/models/validationerror.py +3 -3
- mistralai_azure/sdkconfiguration.py +3 -3
- mistralai_azure/utils/annotations.py +42 -17
- mistralai_azure/utils/eventstreaming.py +61 -1
- mistralai_gcp/_version.py +1 -1
- mistralai_gcp/chat.py +5 -5
- mistralai_gcp/fim.py +5 -5
- mistralai_gcp/httpclient.py +6 -0
- mistralai_gcp/models/__init__.py +13 -1
- mistralai_gcp/models/assistantmessage.py +7 -3
- mistralai_gcp/models/chatcompletionrequest.py +23 -11
- mistralai_gcp/models/chatcompletionstreamrequest.py +19 -13
- mistralai_gcp/models/contentchunk.py +14 -2
- mistralai_gcp/models/deltamessage.py +5 -3
- mistralai_gcp/models/fimcompletionrequest.py +7 -3
- mistralai_gcp/models/fimcompletionstreamrequest.py +7 -3
- mistralai_gcp/models/functioncall.py +3 -3
- mistralai_gcp/models/referencechunk.py +20 -0
- mistralai_gcp/models/systemmessage.py +7 -3
- mistralai_gcp/models/textchunk.py +3 -9
- mistralai_gcp/models/toolmessage.py +14 -5
- mistralai_gcp/models/usermessage.py +5 -3
- mistralai_gcp/models/validationerror.py +3 -3
- mistralai_gcp/sdk.py +5 -4
- mistralai_gcp/sdkconfiguration.py +3 -3
- mistralai_gcp/utils/annotations.py +42 -17
- mistralai_gcp/utils/eventstreaming.py +61 -1
- mistralai/models/finetuneablemodel.py +0 -14
- {mistralai-1.2.2.dist-info → mistralai-1.2.4.dist-info}/LICENSE +0 -0
- {mistralai-1.2.2.dist-info → mistralai-1.2.4.dist-info}/WHEEL +0 -0
|
@@ -12,13 +12,15 @@ from mistralai_azure.types import (
|
|
|
12
12
|
)
|
|
13
13
|
from pydantic import model_serializer
|
|
14
14
|
from typing import List, Union
|
|
15
|
-
from typing_extensions import NotRequired, TypedDict
|
|
15
|
+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
ContentTypedDict =
|
|
18
|
+
ContentTypedDict = TypeAliasType(
|
|
19
|
+
"ContentTypedDict", Union[str, List[ContentChunkTypedDict]]
|
|
20
|
+
)
|
|
19
21
|
|
|
20
22
|
|
|
21
|
-
Content = Union[str, List[ContentChunk]]
|
|
23
|
+
Content = TypeAliasType("Content", Union[str, List[ContentChunk]])
|
|
22
24
|
|
|
23
25
|
|
|
24
26
|
class DeltaMessageTypedDict(TypedDict):
|
|
@@ -3,13 +3,13 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from mistralai_azure.types import BaseModel
|
|
5
5
|
from typing import Any, Dict, Union
|
|
6
|
-
from typing_extensions import TypedDict
|
|
6
|
+
from typing_extensions import TypeAliasType, TypedDict
|
|
7
7
|
|
|
8
8
|
|
|
9
|
-
ArgumentsTypedDict = Union[Dict[str, Any], str]
|
|
9
|
+
ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str])
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
Arguments = Union[Dict[str, Any], str]
|
|
12
|
+
Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str])
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
class FunctionCallTypedDict(TypedDict):
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai_azure.types import BaseModel
|
|
5
|
+
from typing import List, Literal, Optional
|
|
6
|
+
from typing_extensions import NotRequired, TypedDict
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
ReferenceChunkType = Literal["reference"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ReferenceChunkTypedDict(TypedDict):
|
|
13
|
+
reference_ids: List[int]
|
|
14
|
+
type: NotRequired[ReferenceChunkType]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ReferenceChunk(BaseModel):
|
|
18
|
+
reference_ids: List[int]
|
|
19
|
+
|
|
20
|
+
type: Optional[ReferenceChunkType] = "reference"
|
|
@@ -4,13 +4,17 @@ from __future__ import annotations
|
|
|
4
4
|
from .textchunk import TextChunk, TextChunkTypedDict
|
|
5
5
|
from mistralai_azure.types import BaseModel
|
|
6
6
|
from typing import List, Literal, Optional, Union
|
|
7
|
-
from typing_extensions import NotRequired, TypedDict
|
|
7
|
+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
8
8
|
|
|
9
9
|
|
|
10
|
-
SystemMessageContentTypedDict =
|
|
10
|
+
SystemMessageContentTypedDict = TypeAliasType(
|
|
11
|
+
"SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]]
|
|
12
|
+
)
|
|
11
13
|
|
|
12
14
|
|
|
13
|
-
SystemMessageContent =
|
|
15
|
+
SystemMessageContent = TypeAliasType(
|
|
16
|
+
"SystemMessageContent", Union[str, List[TextChunk]]
|
|
17
|
+
)
|
|
14
18
|
|
|
15
19
|
|
|
16
20
|
Role = Literal["system"]
|
|
@@ -2,11 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from mistralai_azure.types import BaseModel
|
|
5
|
-
from mistralai_azure.utils import validate_const
|
|
6
|
-
import pydantic
|
|
7
|
-
from pydantic.functional_validators import AfterValidator
|
|
8
5
|
from typing import Literal, Optional
|
|
9
|
-
from typing_extensions import
|
|
6
|
+
from typing_extensions import NotRequired, TypedDict
|
|
10
7
|
|
|
11
8
|
|
|
12
9
|
Type = Literal["text"]
|
|
@@ -14,13 +11,10 @@ Type = Literal["text"]
|
|
|
14
11
|
|
|
15
12
|
class TextChunkTypedDict(TypedDict):
|
|
16
13
|
text: str
|
|
17
|
-
type: Type
|
|
14
|
+
type: NotRequired[Type]
|
|
18
15
|
|
|
19
16
|
|
|
20
17
|
class TextChunk(BaseModel):
|
|
21
18
|
text: str
|
|
22
19
|
|
|
23
|
-
|
|
24
|
-
Annotated[Optional[Type], AfterValidator(validate_const("text"))],
|
|
25
|
-
pydantic.Field(alias="type"),
|
|
26
|
-
] = "text"
|
|
20
|
+
type: Optional[Type] = "text"
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from .contentchunk import ContentChunk, ContentChunkTypedDict
|
|
4
5
|
from mistralai_azure.types import (
|
|
5
6
|
BaseModel,
|
|
6
7
|
Nullable,
|
|
@@ -9,22 +10,30 @@ from mistralai_azure.types import (
|
|
|
9
10
|
UNSET_SENTINEL,
|
|
10
11
|
)
|
|
11
12
|
from pydantic import model_serializer
|
|
12
|
-
from typing import Literal, Optional
|
|
13
|
-
from typing_extensions import NotRequired, TypedDict
|
|
13
|
+
from typing import List, Literal, Optional, Union
|
|
14
|
+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
ToolMessageContentTypedDict = TypeAliasType(
|
|
18
|
+
"ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]]
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]])
|
|
14
23
|
|
|
15
24
|
|
|
16
25
|
ToolMessageRole = Literal["tool"]
|
|
17
26
|
|
|
18
27
|
|
|
19
28
|
class ToolMessageTypedDict(TypedDict):
|
|
20
|
-
content:
|
|
29
|
+
content: Nullable[ToolMessageContentTypedDict]
|
|
21
30
|
tool_call_id: NotRequired[Nullable[str]]
|
|
22
31
|
name: NotRequired[Nullable[str]]
|
|
23
32
|
role: NotRequired[ToolMessageRole]
|
|
24
33
|
|
|
25
34
|
|
|
26
35
|
class ToolMessage(BaseModel):
|
|
27
|
-
content:
|
|
36
|
+
content: Nullable[ToolMessageContent]
|
|
28
37
|
|
|
29
38
|
tool_call_id: OptionalNullable[str] = UNSET
|
|
30
39
|
|
|
@@ -35,7 +44,7 @@ class ToolMessage(BaseModel):
|
|
|
35
44
|
@model_serializer(mode="wrap")
|
|
36
45
|
def serialize_model(self, handler):
|
|
37
46
|
optional_fields = ["tool_call_id", "name", "role"]
|
|
38
|
-
nullable_fields = ["tool_call_id", "name"]
|
|
47
|
+
nullable_fields = ["content", "tool_call_id", "name"]
|
|
39
48
|
null_default_fields = []
|
|
40
49
|
|
|
41
50
|
serialized = handler(self)
|
|
@@ -5,13 +5,15 @@ from .contentchunk import ContentChunk, ContentChunkTypedDict
|
|
|
5
5
|
from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL
|
|
6
6
|
from pydantic import model_serializer
|
|
7
7
|
from typing import List, Literal, Optional, Union
|
|
8
|
-
from typing_extensions import NotRequired, TypedDict
|
|
8
|
+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
UserMessageContentTypedDict =
|
|
11
|
+
UserMessageContentTypedDict = TypeAliasType(
|
|
12
|
+
"UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]]
|
|
13
|
+
)
|
|
12
14
|
|
|
13
15
|
|
|
14
|
-
UserMessageContent = Union[str, List[ContentChunk]]
|
|
16
|
+
UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]])
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
UserMessageRole = Literal["user"]
|
|
@@ -3,13 +3,13 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from mistralai_azure.types import BaseModel
|
|
5
5
|
from typing import List, Union
|
|
6
|
-
from typing_extensions import TypedDict
|
|
6
|
+
from typing_extensions import TypeAliasType, TypedDict
|
|
7
7
|
|
|
8
8
|
|
|
9
|
-
LocTypedDict = Union[str, int]
|
|
9
|
+
LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int])
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
Loc = Union[str, int]
|
|
12
|
+
Loc = TypeAliasType("Loc", Union[str, int])
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
class ValidationErrorTypedDict(TypedDict):
|
|
@@ -28,9 +28,9 @@ class SDKConfiguration:
|
|
|
28
28
|
server: Optional[str] = ""
|
|
29
29
|
language: str = "python"
|
|
30
30
|
openapi_doc_version: str = "0.0.2"
|
|
31
|
-
sdk_version: str = "1.2.
|
|
32
|
-
gen_version: str = "2.
|
|
33
|
-
user_agent: str = "speakeasy-sdk/python 1.2.
|
|
31
|
+
sdk_version: str = "1.2.3"
|
|
32
|
+
gen_version: str = "2.470.1"
|
|
33
|
+
user_agent: str = "speakeasy-sdk/python 1.2.3 2.470.1 0.0.2 mistralai_azure"
|
|
34
34
|
retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET)
|
|
35
35
|
timeout_ms: Optional[int] = None
|
|
36
36
|
|
|
@@ -1,30 +1,55 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from enum import Enum
|
|
4
|
-
from typing import Any
|
|
4
|
+
from typing import Any, Optional
|
|
5
5
|
|
|
6
6
|
def get_discriminator(model: Any, fieldname: str, key: str) -> str:
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
return f'{model.get(key)}'
|
|
10
|
-
except AttributeError as e:
|
|
11
|
-
raise ValueError(f'Could not find discriminator key {key} in {model}') from e
|
|
7
|
+
"""
|
|
8
|
+
Recursively search for the discriminator attribute in a model.
|
|
12
9
|
|
|
13
|
-
|
|
14
|
-
|
|
10
|
+
Args:
|
|
11
|
+
model (Any): The model to search within.
|
|
12
|
+
fieldname (str): The name of the field to search for.
|
|
13
|
+
key (str): The key to search for in dictionaries.
|
|
15
14
|
|
|
16
|
-
|
|
17
|
-
|
|
15
|
+
Returns:
|
|
16
|
+
str: The name of the discriminator attribute.
|
|
18
17
|
|
|
19
|
-
|
|
18
|
+
Raises:
|
|
19
|
+
ValueError: If the discriminator attribute is not found.
|
|
20
|
+
"""
|
|
21
|
+
upper_fieldname = fieldname.upper()
|
|
20
22
|
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
attr = getattr(model, fieldname)
|
|
23
|
+
def get_field_discriminator(field: Any) -> Optional[str]:
|
|
24
|
+
"""Search for the discriminator attribute in a given field."""
|
|
24
25
|
|
|
25
|
-
if isinstance(
|
|
26
|
-
|
|
26
|
+
if isinstance(field, dict):
|
|
27
|
+
if key in field:
|
|
28
|
+
return f'{field[key]}'
|
|
27
29
|
|
|
28
|
-
|
|
30
|
+
if hasattr(field, fieldname):
|
|
31
|
+
attr = getattr(field, fieldname)
|
|
32
|
+
if isinstance(attr, Enum):
|
|
33
|
+
return f'{attr.value}'
|
|
34
|
+
return f'{attr}'
|
|
35
|
+
|
|
36
|
+
if hasattr(field, upper_fieldname):
|
|
37
|
+
attr = getattr(field, upper_fieldname)
|
|
38
|
+
if isinstance(attr, Enum):
|
|
39
|
+
return f'{attr.value}'
|
|
40
|
+
return f'{attr}'
|
|
41
|
+
|
|
42
|
+
return None
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
if isinstance(model, list):
|
|
46
|
+
for field in model:
|
|
47
|
+
discriminator = get_field_discriminator(field)
|
|
48
|
+
if discriminator is not None:
|
|
49
|
+
return discriminator
|
|
50
|
+
|
|
51
|
+
discriminator = get_field_discriminator(model)
|
|
52
|
+
if discriminator is not None:
|
|
53
|
+
return discriminator
|
|
29
54
|
|
|
30
55
|
raise ValueError(f'Could not find discriminator field {fieldname} in {model}')
|
|
@@ -2,12 +2,72 @@
|
|
|
2
2
|
|
|
3
3
|
import re
|
|
4
4
|
import json
|
|
5
|
-
from typing import
|
|
5
|
+
from typing import (
|
|
6
|
+
Callable,
|
|
7
|
+
Generic,
|
|
8
|
+
TypeVar,
|
|
9
|
+
Optional,
|
|
10
|
+
Generator,
|
|
11
|
+
AsyncGenerator,
|
|
12
|
+
Tuple,
|
|
13
|
+
)
|
|
6
14
|
import httpx
|
|
7
15
|
|
|
8
16
|
T = TypeVar("T")
|
|
9
17
|
|
|
10
18
|
|
|
19
|
+
class EventStream(Generic[T]):
|
|
20
|
+
response: httpx.Response
|
|
21
|
+
generator: Generator[T, None, None]
|
|
22
|
+
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
response: httpx.Response,
|
|
26
|
+
decoder: Callable[[str], T],
|
|
27
|
+
sentinel: Optional[str] = None,
|
|
28
|
+
):
|
|
29
|
+
self.response = response
|
|
30
|
+
self.generator = stream_events(response, decoder, sentinel)
|
|
31
|
+
|
|
32
|
+
def __iter__(self):
|
|
33
|
+
return self
|
|
34
|
+
|
|
35
|
+
def __next__(self):
|
|
36
|
+
return next(self.generator)
|
|
37
|
+
|
|
38
|
+
def __enter__(self):
|
|
39
|
+
return self
|
|
40
|
+
|
|
41
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
42
|
+
self.response.close()
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class EventStreamAsync(Generic[T]):
|
|
46
|
+
response: httpx.Response
|
|
47
|
+
generator: AsyncGenerator[T, None]
|
|
48
|
+
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
response: httpx.Response,
|
|
52
|
+
decoder: Callable[[str], T],
|
|
53
|
+
sentinel: Optional[str] = None,
|
|
54
|
+
):
|
|
55
|
+
self.response = response
|
|
56
|
+
self.generator = stream_events_async(response, decoder, sentinel)
|
|
57
|
+
|
|
58
|
+
def __aiter__(self):
|
|
59
|
+
return self
|
|
60
|
+
|
|
61
|
+
async def __anext__(self):
|
|
62
|
+
return await self.generator.__anext__()
|
|
63
|
+
|
|
64
|
+
async def __aenter__(self):
|
|
65
|
+
return self
|
|
66
|
+
|
|
67
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
68
|
+
await self.response.aclose()
|
|
69
|
+
|
|
70
|
+
|
|
11
71
|
class ServerEvent:
|
|
12
72
|
id: Optional[str] = None
|
|
13
73
|
event: Optional[str] = None
|
mistralai_gcp/_version.py
CHANGED
mistralai_gcp/chat.py
CHANGED
|
@@ -5,7 +5,7 @@ from mistralai_gcp import models, utils
|
|
|
5
5
|
from mistralai_gcp._hooks import HookContext
|
|
6
6
|
from mistralai_gcp.types import Nullable, OptionalNullable, UNSET
|
|
7
7
|
from mistralai_gcp.utils import eventstreaming
|
|
8
|
-
from typing import Any,
|
|
8
|
+
from typing import Any, List, Optional, Union
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class Chat(BaseSDK):
|
|
@@ -40,7 +40,7 @@ class Chat(BaseSDK):
|
|
|
40
40
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
41
41
|
server_url: Optional[str] = None,
|
|
42
42
|
timeout_ms: Optional[int] = None,
|
|
43
|
-
) -> Optional[
|
|
43
|
+
) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]:
|
|
44
44
|
r"""Stream chat completion
|
|
45
45
|
|
|
46
46
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
@@ -132,7 +132,7 @@ class Chat(BaseSDK):
|
|
|
132
132
|
|
|
133
133
|
data: Any = None
|
|
134
134
|
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
135
|
-
return eventstreaming.
|
|
135
|
+
return eventstreaming.EventStream(
|
|
136
136
|
http_res,
|
|
137
137
|
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
|
|
138
138
|
sentinel="[DONE]",
|
|
@@ -185,7 +185,7 @@ class Chat(BaseSDK):
|
|
|
185
185
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
186
186
|
server_url: Optional[str] = None,
|
|
187
187
|
timeout_ms: Optional[int] = None,
|
|
188
|
-
) -> Optional[
|
|
188
|
+
) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]:
|
|
189
189
|
r"""Stream chat completion
|
|
190
190
|
|
|
191
191
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
@@ -277,7 +277,7 @@ class Chat(BaseSDK):
|
|
|
277
277
|
|
|
278
278
|
data: Any = None
|
|
279
279
|
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
280
|
-
return eventstreaming.
|
|
280
|
+
return eventstreaming.EventStreamAsync(
|
|
281
281
|
http_res,
|
|
282
282
|
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
|
|
283
283
|
sentinel="[DONE]",
|
mistralai_gcp/fim.py
CHANGED
|
@@ -5,7 +5,7 @@ from mistralai_gcp import models, utils
|
|
|
5
5
|
from mistralai_gcp._hooks import HookContext
|
|
6
6
|
from mistralai_gcp.types import Nullable, OptionalNullable, UNSET
|
|
7
7
|
from mistralai_gcp.utils import eventstreaming
|
|
8
|
-
from typing import Any,
|
|
8
|
+
from typing import Any, Optional, Union
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class Fim(BaseSDK):
|
|
@@ -32,7 +32,7 @@ class Fim(BaseSDK):
|
|
|
32
32
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
33
33
|
server_url: Optional[str] = None,
|
|
34
34
|
timeout_ms: Optional[int] = None,
|
|
35
|
-
) -> Optional[
|
|
35
|
+
) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]:
|
|
36
36
|
r"""Stream fim completion
|
|
37
37
|
|
|
38
38
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
@@ -112,7 +112,7 @@ class Fim(BaseSDK):
|
|
|
112
112
|
|
|
113
113
|
data: Any = None
|
|
114
114
|
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
115
|
-
return eventstreaming.
|
|
115
|
+
return eventstreaming.EventStream(
|
|
116
116
|
http_res,
|
|
117
117
|
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
|
|
118
118
|
sentinel="[DONE]",
|
|
@@ -157,7 +157,7 @@ class Fim(BaseSDK):
|
|
|
157
157
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
158
158
|
server_url: Optional[str] = None,
|
|
159
159
|
timeout_ms: Optional[int] = None,
|
|
160
|
-
) -> Optional[
|
|
160
|
+
) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]:
|
|
161
161
|
r"""Stream fim completion
|
|
162
162
|
|
|
163
163
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
@@ -237,7 +237,7 @@ class Fim(BaseSDK):
|
|
|
237
237
|
|
|
238
238
|
data: Any = None
|
|
239
239
|
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
240
|
-
return eventstreaming.
|
|
240
|
+
return eventstreaming.EventStreamAsync(
|
|
241
241
|
http_res,
|
|
242
242
|
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
|
|
243
243
|
sentinel="[DONE]",
|
mistralai_gcp/httpclient.py
CHANGED
|
@@ -41,6 +41,9 @@ class HttpClient(Protocol):
|
|
|
41
41
|
) -> httpx.Request:
|
|
42
42
|
pass
|
|
43
43
|
|
|
44
|
+
def close(self) -> None:
|
|
45
|
+
pass
|
|
46
|
+
|
|
44
47
|
|
|
45
48
|
@runtime_checkable
|
|
46
49
|
class AsyncHttpClient(Protocol):
|
|
@@ -76,3 +79,6 @@ class AsyncHttpClient(Protocol):
|
|
|
76
79
|
extensions: Optional[httpx._types.RequestExtensions] = None,
|
|
77
80
|
) -> httpx.Request:
|
|
78
81
|
pass
|
|
82
|
+
|
|
83
|
+
async def aclose(self) -> None:
|
|
84
|
+
pass
|
mistralai_gcp/models/__init__.py
CHANGED
|
@@ -67,6 +67,7 @@ from .functioncall import (
|
|
|
67
67
|
)
|
|
68
68
|
from .functionname import FunctionName, FunctionNameTypedDict
|
|
69
69
|
from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
|
|
70
|
+
from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict
|
|
70
71
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
71
72
|
from .responseformats import ResponseFormats
|
|
72
73
|
from .sdkerror import SDKError
|
|
@@ -83,7 +84,13 @@ from .tool import Tool, ToolTypedDict
|
|
|
83
84
|
from .toolcall import ToolCall, ToolCallTypedDict
|
|
84
85
|
from .toolchoice import ToolChoice, ToolChoiceTypedDict
|
|
85
86
|
from .toolchoiceenum import ToolChoiceEnum
|
|
86
|
-
from .toolmessage import
|
|
87
|
+
from .toolmessage import (
|
|
88
|
+
ToolMessage,
|
|
89
|
+
ToolMessageContent,
|
|
90
|
+
ToolMessageContentTypedDict,
|
|
91
|
+
ToolMessageRole,
|
|
92
|
+
ToolMessageTypedDict,
|
|
93
|
+
)
|
|
87
94
|
from .tooltypes import ToolTypes
|
|
88
95
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
89
96
|
from .usermessage import (
|
|
@@ -160,6 +167,9 @@ __all__ = [
|
|
|
160
167
|
"LocTypedDict",
|
|
161
168
|
"Messages",
|
|
162
169
|
"MessagesTypedDict",
|
|
170
|
+
"ReferenceChunk",
|
|
171
|
+
"ReferenceChunkType",
|
|
172
|
+
"ReferenceChunkTypedDict",
|
|
163
173
|
"ResponseFormat",
|
|
164
174
|
"ResponseFormatTypedDict",
|
|
165
175
|
"ResponseFormats",
|
|
@@ -182,6 +192,8 @@ __all__ = [
|
|
|
182
192
|
"ToolChoiceEnum",
|
|
183
193
|
"ToolChoiceTypedDict",
|
|
184
194
|
"ToolMessage",
|
|
195
|
+
"ToolMessageContent",
|
|
196
|
+
"ToolMessageContentTypedDict",
|
|
185
197
|
"ToolMessageRole",
|
|
186
198
|
"ToolMessageTypedDict",
|
|
187
199
|
"ToolTypedDict",
|
|
@@ -12,13 +12,17 @@ from mistralai_gcp.types import (
|
|
|
12
12
|
)
|
|
13
13
|
from pydantic import model_serializer
|
|
14
14
|
from typing import List, Literal, Optional, Union
|
|
15
|
-
from typing_extensions import NotRequired, TypedDict
|
|
15
|
+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
AssistantMessageContentTypedDict =
|
|
18
|
+
AssistantMessageContentTypedDict = TypeAliasType(
|
|
19
|
+
"AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]]
|
|
20
|
+
)
|
|
19
21
|
|
|
20
22
|
|
|
21
|
-
AssistantMessageContent =
|
|
23
|
+
AssistantMessageContent = TypeAliasType(
|
|
24
|
+
"AssistantMessageContent", Union[str, List[ContentChunk]]
|
|
25
|
+
)
|
|
22
26
|
|
|
23
27
|
|
|
24
28
|
AssistantMessageRole = Literal["assistant"]
|
|
@@ -19,23 +19,30 @@ from mistralai_gcp.types import (
|
|
|
19
19
|
from mistralai_gcp.utils import get_discriminator
|
|
20
20
|
from pydantic import Discriminator, Tag, model_serializer
|
|
21
21
|
from typing import List, Optional, Union
|
|
22
|
-
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
22
|
+
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
23
23
|
|
|
24
24
|
|
|
25
|
-
ChatCompletionRequestStopTypedDict =
|
|
25
|
+
ChatCompletionRequestStopTypedDict = TypeAliasType(
|
|
26
|
+
"ChatCompletionRequestStopTypedDict", Union[str, List[str]]
|
|
27
|
+
)
|
|
26
28
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
27
29
|
|
|
28
30
|
|
|
29
|
-
ChatCompletionRequestStop =
|
|
31
|
+
ChatCompletionRequestStop = TypeAliasType(
|
|
32
|
+
"ChatCompletionRequestStop", Union[str, List[str]]
|
|
33
|
+
)
|
|
30
34
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
31
35
|
|
|
32
36
|
|
|
33
|
-
ChatCompletionRequestMessagesTypedDict =
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
37
|
+
ChatCompletionRequestMessagesTypedDict = TypeAliasType(
|
|
38
|
+
"ChatCompletionRequestMessagesTypedDict",
|
|
39
|
+
Union[
|
|
40
|
+
SystemMessageTypedDict,
|
|
41
|
+
UserMessageTypedDict,
|
|
42
|
+
AssistantMessageTypedDict,
|
|
43
|
+
ToolMessageTypedDict,
|
|
44
|
+
],
|
|
45
|
+
)
|
|
39
46
|
|
|
40
47
|
|
|
41
48
|
ChatCompletionRequestMessages = Annotated[
|
|
@@ -49,10 +56,15 @@ ChatCompletionRequestMessages = Annotated[
|
|
|
49
56
|
]
|
|
50
57
|
|
|
51
58
|
|
|
52
|
-
ChatCompletionRequestToolChoiceTypedDict =
|
|
59
|
+
ChatCompletionRequestToolChoiceTypedDict = TypeAliasType(
|
|
60
|
+
"ChatCompletionRequestToolChoiceTypedDict",
|
|
61
|
+
Union[ToolChoiceTypedDict, ToolChoiceEnum],
|
|
62
|
+
)
|
|
53
63
|
|
|
54
64
|
|
|
55
|
-
ChatCompletionRequestToolChoice =
|
|
65
|
+
ChatCompletionRequestToolChoice = TypeAliasType(
|
|
66
|
+
"ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum]
|
|
67
|
+
)
|
|
56
68
|
|
|
57
69
|
|
|
58
70
|
class ChatCompletionRequestTypedDict(TypedDict):
|
|
@@ -19,23 +19,26 @@ from mistralai_gcp.types import (
|
|
|
19
19
|
from mistralai_gcp.utils import get_discriminator
|
|
20
20
|
from pydantic import Discriminator, Tag, model_serializer
|
|
21
21
|
from typing import List, Optional, Union
|
|
22
|
-
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
22
|
+
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
23
23
|
|
|
24
24
|
|
|
25
|
-
StopTypedDict = Union[str, List[str]]
|
|
25
|
+
StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]])
|
|
26
26
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
27
27
|
|
|
28
28
|
|
|
29
|
-
Stop = Union[str, List[str]]
|
|
29
|
+
Stop = TypeAliasType("Stop", Union[str, List[str]])
|
|
30
30
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
31
31
|
|
|
32
32
|
|
|
33
|
-
MessagesTypedDict =
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
33
|
+
MessagesTypedDict = TypeAliasType(
|
|
34
|
+
"MessagesTypedDict",
|
|
35
|
+
Union[
|
|
36
|
+
SystemMessageTypedDict,
|
|
37
|
+
UserMessageTypedDict,
|
|
38
|
+
AssistantMessageTypedDict,
|
|
39
|
+
ToolMessageTypedDict,
|
|
40
|
+
],
|
|
41
|
+
)
|
|
39
42
|
|
|
40
43
|
|
|
41
44
|
Messages = Annotated[
|
|
@@ -49,12 +52,15 @@ Messages = Annotated[
|
|
|
49
52
|
]
|
|
50
53
|
|
|
51
54
|
|
|
52
|
-
ChatCompletionStreamRequestToolChoiceTypedDict =
|
|
53
|
-
|
|
54
|
-
]
|
|
55
|
+
ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType(
|
|
56
|
+
"ChatCompletionStreamRequestToolChoiceTypedDict",
|
|
57
|
+
Union[ToolChoiceTypedDict, ToolChoiceEnum],
|
|
58
|
+
)
|
|
55
59
|
|
|
56
60
|
|
|
57
|
-
ChatCompletionStreamRequestToolChoice =
|
|
61
|
+
ChatCompletionStreamRequestToolChoice = TypeAliasType(
|
|
62
|
+
"ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum]
|
|
63
|
+
)
|
|
58
64
|
|
|
59
65
|
|
|
60
66
|
class ChatCompletionStreamRequestTypedDict(TypedDict):
|