mistralai 1.0.0rc2__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/agents.py +33 -33
- mistralai/chat.py +4 -4
- mistralai/client.py +1 -1
- mistralai/jobs.py +24 -34
- mistralai/models/__init__.py +22 -22
- mistralai/models/agentscompletionrequest.py +14 -14
- mistralai/models/agentscompletionstreamrequest.py +41 -39
- mistralai/models/archiveftmodelout.py +4 -2
- mistralai/models/chatcompletionchoice.py +3 -4
- mistralai/models/chatcompletionrequest.py +16 -16
- mistralai/models/chatcompletionstreamrequest.py +16 -16
- mistralai/models/delete_model_v1_models_model_id_deleteop.py +2 -0
- mistralai/models/deltamessage.py +6 -6
- mistralai/models/detailedjobout.py +19 -5
- mistralai/models/embeddingrequest.py +8 -8
- mistralai/models/files_api_routes_upload_fileop.py +7 -4
- mistralai/models/fileschema.py +8 -3
- mistralai/models/fimcompletionrequest.py +8 -8
- mistralai/models/fimcompletionstreamrequest.py +8 -8
- mistralai/models/ftmodelout.py +4 -2
- mistralai/models/functioncall.py +9 -3
- mistralai/models/githubrepositoryin.py +4 -2
- mistralai/models/githubrepositoryout.py +4 -2
- mistralai/models/jobin.py +16 -4
- mistralai/models/jobout.py +20 -5
- mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +2 -0
- mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +1 -54
- mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +2 -0
- mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +2 -0
- mistralai/models/jobsout.py +4 -2
- mistralai/models/legacyjobmetadataout.py +4 -2
- mistralai/models/retrieve_model_v1_models_model_id_getop.py +2 -0
- mistralai/models/retrievefileout.py +8 -3
- mistralai/models/systemmessage.py +6 -6
- mistralai/models/tool.py +9 -5
- mistralai/models/toolcall.py +8 -4
- mistralai/models/trainingparameters.py +6 -2
- mistralai/models/trainingparametersin.py +10 -2
- mistralai/models/unarchiveftmodelout.py +4 -2
- mistralai/models/uploadfileout.py +8 -3
- mistralai/models/usermessage.py +6 -6
- mistralai/models/validationerror.py +6 -6
- mistralai/models/wandbintegration.py +4 -2
- mistralai/models/wandbintegrationout.py +4 -2
- mistralai/models_.py +10 -10
- mistralai/sdk.py +2 -2
- mistralai/sdkconfiguration.py +3 -3
- mistralai/utils/__init__.py +2 -2
- mistralai/utils/forms.py +10 -9
- mistralai/utils/headers.py +8 -8
- mistralai/utils/logger.py +8 -0
- mistralai/utils/queryparams.py +16 -14
- mistralai/utils/serializers.py +17 -8
- mistralai/utils/url.py +13 -8
- mistralai/utils/values.py +6 -0
- mistralai/version.py +7 -0
- {mistralai-1.0.0rc2.dist-info → mistralai-1.0.2.dist-info}/METADATA +40 -18
- {mistralai-1.0.0rc2.dist-info → mistralai-1.0.2.dist-info}/RECORD +87 -86
- mistralai_azure/models/__init__.py +4 -4
- mistralai_azure/models/chatcompletionchoice.py +3 -4
- mistralai_azure/models/chatcompletionrequest.py +14 -14
- mistralai_azure/models/chatcompletionstreamrequest.py +14 -14
- mistralai_azure/models/deltamessage.py +6 -6
- mistralai_azure/models/functioncall.py +9 -3
- mistralai_azure/models/systemmessage.py +6 -6
- mistralai_azure/models/tool.py +9 -5
- mistralai_azure/models/toolcall.py +8 -4
- mistralai_azure/models/usermessage.py +6 -6
- mistralai_azure/models/validationerror.py +6 -6
- mistralai_azure/sdkconfiguration.py +3 -3
- mistralai_gcp/chat.py +4 -4
- mistralai_gcp/models/__init__.py +4 -4
- mistralai_gcp/models/chatcompletionchoice.py +3 -4
- mistralai_gcp/models/chatcompletionrequest.py +16 -16
- mistralai_gcp/models/chatcompletionstreamrequest.py +16 -16
- mistralai_gcp/models/deltamessage.py +6 -6
- mistralai_gcp/models/fimcompletionrequest.py +8 -8
- mistralai_gcp/models/fimcompletionstreamrequest.py +8 -8
- mistralai_gcp/models/functioncall.py +9 -3
- mistralai_gcp/models/systemmessage.py +6 -6
- mistralai_gcp/models/tool.py +9 -5
- mistralai_gcp/models/toolcall.py +8 -4
- mistralai_gcp/models/usermessage.py +6 -6
- mistralai_gcp/models/validationerror.py +6 -6
- mistralai_gcp/sdkconfiguration.py +3 -3
- {mistralai-1.0.0rc2.dist-info → mistralai-1.0.2.dist-info}/LICENSE +0 -0
- {mistralai-1.0.0rc2.dist-info → mistralai-1.0.2.dist-info}/WHEEL +0 -0
|
@@ -29,9 +29,9 @@ class SDKConfiguration:
|
|
|
29
29
|
server: Optional[str] = ""
|
|
30
30
|
language: str = "python"
|
|
31
31
|
openapi_doc_version: str = "0.0.2"
|
|
32
|
-
sdk_version: str = "1.0.
|
|
33
|
-
gen_version: str = "2.
|
|
34
|
-
user_agent: str = "speakeasy-sdk/python 1.0.
|
|
32
|
+
sdk_version: str = "1.0.1"
|
|
33
|
+
gen_version: str = "2.399.0"
|
|
34
|
+
user_agent: str = "speakeasy-sdk/python 1.0.1 2.399.0 0.0.2 mistralai_azure"
|
|
35
35
|
retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET)
|
|
36
36
|
timeout_ms: Optional[int] = None
|
|
37
37
|
|
mistralai_gcp/chat.py
CHANGED
|
@@ -33,7 +33,7 @@ class Chat(BaseSDK):
|
|
|
33
33
|
|
|
34
34
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
35
35
|
|
|
36
|
-
:param model: ID of the model to use. You can use the [List Available Models](/api
|
|
36
|
+
:param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
|
|
37
37
|
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
|
|
38
38
|
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
|
|
39
39
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -146,7 +146,7 @@ class Chat(BaseSDK):
|
|
|
146
146
|
|
|
147
147
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
148
148
|
|
|
149
|
-
:param model: ID of the model to use. You can use the [List Available Models](/api
|
|
149
|
+
:param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
|
|
150
150
|
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
|
|
151
151
|
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
|
|
152
152
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -257,7 +257,7 @@ class Chat(BaseSDK):
|
|
|
257
257
|
) -> Optional[models.ChatCompletionResponse]:
|
|
258
258
|
r"""Chat Completion
|
|
259
259
|
|
|
260
|
-
:param model: ID of the model to use. You can use the [List Available Models](/api
|
|
260
|
+
:param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
|
|
261
261
|
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
|
|
262
262
|
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
|
|
263
263
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -367,7 +367,7 @@ class Chat(BaseSDK):
|
|
|
367
367
|
) -> Optional[models.ChatCompletionResponse]:
|
|
368
368
|
r"""Chat Completion
|
|
369
369
|
|
|
370
|
-
:param model: ID of the model to use. You can use the [List Available Models](/api
|
|
370
|
+
:param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
|
|
371
371
|
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
|
|
372
372
|
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
|
|
373
373
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
mistralai_gcp/models/__init__.py
CHANGED
|
@@ -14,18 +14,18 @@ from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop
|
|
|
14
14
|
from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict
|
|
15
15
|
from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict
|
|
16
16
|
from .function import Function, FunctionTypedDict
|
|
17
|
-
from .functioncall import FunctionCall, FunctionCallTypedDict
|
|
17
|
+
from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict
|
|
18
18
|
from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
|
|
19
19
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats
|
|
20
20
|
from .sdkerror import SDKError
|
|
21
21
|
from .security import Security, SecurityTypedDict
|
|
22
22
|
from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict
|
|
23
23
|
from .textchunk import TextChunk, TextChunkTypedDict
|
|
24
|
-
from .tool import Tool, ToolTypedDict
|
|
25
|
-
from .toolcall import ToolCall, ToolCallTypedDict
|
|
24
|
+
from .tool import Tool, ToolToolTypes, ToolTypedDict
|
|
25
|
+
from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes
|
|
26
26
|
from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict
|
|
27
27
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
28
28
|
from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict
|
|
29
29
|
from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict
|
|
30
30
|
|
|
31
|
-
__all__ = ["AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"]
|
|
31
|
+
__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"]
|
|
@@ -3,20 +3,19 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
5
|
from mistralai_gcp.types import BaseModel
|
|
6
|
-
from typing import Literal,
|
|
7
|
-
from typing_extensions import NotRequired
|
|
6
|
+
from typing import Literal, TypedDict
|
|
8
7
|
|
|
9
8
|
|
|
10
9
|
ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"]
|
|
11
10
|
|
|
12
11
|
class ChatCompletionChoiceTypedDict(TypedDict):
|
|
13
12
|
index: int
|
|
13
|
+
message: AssistantMessageTypedDict
|
|
14
14
|
finish_reason: ChatCompletionChoiceFinishReason
|
|
15
|
-
message: NotRequired[AssistantMessageTypedDict]
|
|
16
15
|
|
|
17
16
|
|
|
18
17
|
class ChatCompletionChoice(BaseModel):
|
|
19
18
|
index: int
|
|
19
|
+
message: AssistantMessage
|
|
20
20
|
finish_reason: ChatCompletionChoiceFinishReason
|
|
21
|
-
message: Optional[AssistantMessage] = None
|
|
22
21
|
|
|
@@ -14,11 +14,25 @@ from typing import List, Literal, Optional, TypedDict, Union
|
|
|
14
14
|
from typing_extensions import Annotated, NotRequired
|
|
15
15
|
|
|
16
16
|
|
|
17
|
+
ChatCompletionRequestStopTypedDict = Union[str, List[str]]
|
|
18
|
+
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
ChatCompletionRequestStop = Union[str, List[str]]
|
|
22
|
+
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
|
|
29
|
+
|
|
30
|
+
|
|
17
31
|
ChatCompletionRequestToolChoice = Literal["auto", "none", "any"]
|
|
18
32
|
|
|
19
33
|
class ChatCompletionRequestTypedDict(TypedDict):
|
|
20
34
|
model: Nullable[str]
|
|
21
|
-
r"""ID of the model to use. You can use the [List Available Models](/api
|
|
35
|
+
r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
|
|
22
36
|
messages: List[ChatCompletionRequestMessagesTypedDict]
|
|
23
37
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
24
38
|
temperature: NotRequired[float]
|
|
@@ -42,7 +56,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
42
56
|
|
|
43
57
|
class ChatCompletionRequest(BaseModel):
|
|
44
58
|
model: Nullable[str]
|
|
45
|
-
r"""ID of the model to use. You can use the [List Available Models](/api
|
|
59
|
+
r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
|
|
46
60
|
messages: List[ChatCompletionRequestMessages]
|
|
47
61
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
48
62
|
temperature: Optional[float] = 0.7
|
|
@@ -89,17 +103,3 @@ class ChatCompletionRequest(BaseModel):
|
|
|
89
103
|
|
|
90
104
|
return m
|
|
91
105
|
|
|
92
|
-
|
|
93
|
-
ChatCompletionRequestStopTypedDict = Union[str, List[str]]
|
|
94
|
-
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
ChatCompletionRequestStop = Union[str, List[str]]
|
|
98
|
-
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
|
|
105
|
-
|
|
@@ -14,11 +14,25 @@ from typing import List, Literal, Optional, TypedDict, Union
|
|
|
14
14
|
from typing_extensions import Annotated, NotRequired
|
|
15
15
|
|
|
16
16
|
|
|
17
|
+
StopTypedDict = Union[str, List[str]]
|
|
18
|
+
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
Stop = Union[str, List[str]]
|
|
22
|
+
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
|
|
29
|
+
|
|
30
|
+
|
|
17
31
|
ToolChoice = Literal["auto", "none", "any"]
|
|
18
32
|
|
|
19
33
|
class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
20
34
|
model: Nullable[str]
|
|
21
|
-
r"""ID of the model to use. You can use the [List Available Models](/api
|
|
35
|
+
r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
|
|
22
36
|
messages: List[MessagesTypedDict]
|
|
23
37
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
24
38
|
temperature: NotRequired[float]
|
|
@@ -41,7 +55,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
41
55
|
|
|
42
56
|
class ChatCompletionStreamRequest(BaseModel):
|
|
43
57
|
model: Nullable[str]
|
|
44
|
-
r"""ID of the model to use. You can use the [List Available Models](/api
|
|
58
|
+
r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
|
|
45
59
|
messages: List[Messages]
|
|
46
60
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
47
61
|
temperature: Optional[float] = 0.7
|
|
@@ -87,17 +101,3 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
87
101
|
|
|
88
102
|
return m
|
|
89
103
|
|
|
90
|
-
|
|
91
|
-
StopTypedDict = Union[str, List[str]]
|
|
92
|
-
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
Stop = Union[str, List[str]]
|
|
96
|
-
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
|
|
103
|
-
|
|
@@ -4,25 +4,25 @@ from __future__ import annotations
|
|
|
4
4
|
from .toolcall import ToolCall, ToolCallTypedDict
|
|
5
5
|
from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
6
6
|
from pydantic import model_serializer
|
|
7
|
-
from typing import Optional, TypedDict
|
|
7
|
+
from typing import List, Optional, TypedDict
|
|
8
8
|
from typing_extensions import NotRequired
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class DeltaMessageTypedDict(TypedDict):
|
|
12
12
|
role: NotRequired[str]
|
|
13
|
-
content: NotRequired[str]
|
|
14
|
-
tool_calls: NotRequired[Nullable[ToolCallTypedDict]]
|
|
13
|
+
content: NotRequired[Nullable[str]]
|
|
14
|
+
tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]]
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class DeltaMessage(BaseModel):
|
|
18
18
|
role: Optional[str] = None
|
|
19
|
-
content:
|
|
20
|
-
tool_calls: OptionalNullable[ToolCall] = UNSET
|
|
19
|
+
content: OptionalNullable[str] = UNSET
|
|
20
|
+
tool_calls: OptionalNullable[List[ToolCall]] = UNSET
|
|
21
21
|
|
|
22
22
|
@model_serializer(mode="wrap")
|
|
23
23
|
def serialize_model(self, handler):
|
|
24
24
|
optional_fields = ["role", "content", "tool_calls"]
|
|
25
|
-
nullable_fields = ["tool_calls"]
|
|
25
|
+
nullable_fields = ["content", "tool_calls"]
|
|
26
26
|
null_default_fields = []
|
|
27
27
|
|
|
28
28
|
serialized = handler(self)
|
|
@@ -7,6 +7,14 @@ from typing import List, Optional, TypedDict, Union
|
|
|
7
7
|
from typing_extensions import NotRequired
|
|
8
8
|
|
|
9
9
|
|
|
10
|
+
FIMCompletionRequestStopTypedDict = Union[str, List[str]]
|
|
11
|
+
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
FIMCompletionRequestStop = Union[str, List[str]]
|
|
15
|
+
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
16
|
+
|
|
17
|
+
|
|
10
18
|
class FIMCompletionRequestTypedDict(TypedDict):
|
|
11
19
|
model: Nullable[str]
|
|
12
20
|
r"""ID of the model to use. Only compatible for now with:
|
|
@@ -84,11 +92,3 @@ class FIMCompletionRequest(BaseModel):
|
|
|
84
92
|
|
|
85
93
|
return m
|
|
86
94
|
|
|
87
|
-
|
|
88
|
-
FIMCompletionRequestStopTypedDict = Union[str, List[str]]
|
|
89
|
-
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
FIMCompletionRequestStop = Union[str, List[str]]
|
|
93
|
-
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
94
|
-
|
|
@@ -7,6 +7,14 @@ from typing import List, Optional, TypedDict, Union
|
|
|
7
7
|
from typing_extensions import NotRequired
|
|
8
8
|
|
|
9
9
|
|
|
10
|
+
FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]]
|
|
11
|
+
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
FIMCompletionStreamRequestStop = Union[str, List[str]]
|
|
15
|
+
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
16
|
+
|
|
17
|
+
|
|
10
18
|
class FIMCompletionStreamRequestTypedDict(TypedDict):
|
|
11
19
|
model: Nullable[str]
|
|
12
20
|
r"""ID of the model to use. Only compatible for now with:
|
|
@@ -82,11 +90,3 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
82
90
|
|
|
83
91
|
return m
|
|
84
92
|
|
|
85
|
-
|
|
86
|
-
FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]]
|
|
87
|
-
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
FIMCompletionStreamRequestStop = Union[str, List[str]]
|
|
91
|
-
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
92
|
-
|
|
@@ -2,15 +2,21 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from mistralai_gcp.types import BaseModel
|
|
5
|
-
from typing import TypedDict
|
|
5
|
+
from typing import Any, Dict, TypedDict, Union
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
ArgumentsTypedDict = Union[Dict[str, Any], str]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
Arguments = Union[Dict[str, Any], str]
|
|
6
12
|
|
|
7
13
|
|
|
8
14
|
class FunctionCallTypedDict(TypedDict):
|
|
9
15
|
name: str
|
|
10
|
-
arguments:
|
|
16
|
+
arguments: ArgumentsTypedDict
|
|
11
17
|
|
|
12
18
|
|
|
13
19
|
class FunctionCall(BaseModel):
|
|
14
20
|
name: str
|
|
15
|
-
arguments:
|
|
21
|
+
arguments: Arguments
|
|
16
22
|
|
|
@@ -7,6 +7,12 @@ from typing import List, Literal, Optional, TypedDict, Union
|
|
|
7
7
|
from typing_extensions import NotRequired
|
|
8
8
|
|
|
9
9
|
|
|
10
|
+
ContentTypedDict = Union[str, List[ContentChunkTypedDict]]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
Content = Union[str, List[ContentChunk]]
|
|
14
|
+
|
|
15
|
+
|
|
10
16
|
Role = Literal["system"]
|
|
11
17
|
|
|
12
18
|
class SystemMessageTypedDict(TypedDict):
|
|
@@ -18,9 +24,3 @@ class SystemMessage(BaseModel):
|
|
|
18
24
|
content: Content
|
|
19
25
|
role: Optional[Role] = "system"
|
|
20
26
|
|
|
21
|
-
|
|
22
|
-
ContentTypedDict = Union[str, List[ContentChunkTypedDict]]
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
Content = Union[str, List[ContentChunk]]
|
|
26
|
-
|
mistralai_gcp/models/tool.py
CHANGED
|
@@ -2,17 +2,21 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .function import Function, FunctionTypedDict
|
|
5
|
-
from mistralai_gcp.types import BaseModel
|
|
6
|
-
import
|
|
7
|
-
from
|
|
8
|
-
from
|
|
5
|
+
from mistralai_gcp.types import BaseModel, UnrecognizedStr
|
|
6
|
+
from mistralai_gcp.utils import validate_open_enum
|
|
7
|
+
from pydantic.functional_validators import PlainValidator
|
|
8
|
+
from typing import Literal, Optional, TypedDict, Union
|
|
9
|
+
from typing_extensions import Annotated, NotRequired
|
|
9
10
|
|
|
10
11
|
|
|
12
|
+
ToolToolTypes = Union[Literal["function"], UnrecognizedStr]
|
|
13
|
+
|
|
11
14
|
class ToolTypedDict(TypedDict):
|
|
12
15
|
function: FunctionTypedDict
|
|
16
|
+
type: NotRequired[ToolToolTypes]
|
|
13
17
|
|
|
14
18
|
|
|
15
19
|
class Tool(BaseModel):
|
|
16
20
|
function: Function
|
|
17
|
-
|
|
21
|
+
type: Annotated[Optional[ToolToolTypes], PlainValidator(validate_open_enum(False))] = "function"
|
|
18
22
|
|
mistralai_gcp/models/toolcall.py
CHANGED
|
@@ -2,19 +2,23 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .functioncall import FunctionCall, FunctionCallTypedDict
|
|
5
|
-
from mistralai_gcp.types import BaseModel
|
|
6
|
-
import
|
|
7
|
-
from
|
|
5
|
+
from mistralai_gcp.types import BaseModel, UnrecognizedStr
|
|
6
|
+
from mistralai_gcp.utils import validate_open_enum
|
|
7
|
+
from pydantic.functional_validators import PlainValidator
|
|
8
|
+
from typing import Literal, Optional, TypedDict, Union
|
|
8
9
|
from typing_extensions import Annotated, NotRequired
|
|
9
10
|
|
|
10
11
|
|
|
12
|
+
ToolTypes = Union[Literal["function"], UnrecognizedStr]
|
|
13
|
+
|
|
11
14
|
class ToolCallTypedDict(TypedDict):
|
|
12
15
|
function: FunctionCallTypedDict
|
|
13
16
|
id: NotRequired[str]
|
|
17
|
+
type: NotRequired[ToolTypes]
|
|
14
18
|
|
|
15
19
|
|
|
16
20
|
class ToolCall(BaseModel):
|
|
17
21
|
function: FunctionCall
|
|
18
22
|
id: Optional[str] = "null"
|
|
19
|
-
|
|
23
|
+
type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = "function"
|
|
20
24
|
|
|
@@ -7,6 +7,12 @@ from typing import List, Literal, Optional, TypedDict, Union
|
|
|
7
7
|
from typing_extensions import NotRequired
|
|
8
8
|
|
|
9
9
|
|
|
10
|
+
UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
UserMessageContent = Union[str, List[TextChunk]]
|
|
14
|
+
|
|
15
|
+
|
|
10
16
|
UserMessageRole = Literal["user"]
|
|
11
17
|
|
|
12
18
|
class UserMessageTypedDict(TypedDict):
|
|
@@ -18,9 +24,3 @@ class UserMessage(BaseModel):
|
|
|
18
24
|
content: UserMessageContent
|
|
19
25
|
role: Optional[UserMessageRole] = "user"
|
|
20
26
|
|
|
21
|
-
|
|
22
|
-
UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]]
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
UserMessageContent = Union[str, List[TextChunk]]
|
|
26
|
-
|
|
@@ -5,6 +5,12 @@ from mistralai_gcp.types import BaseModel
|
|
|
5
5
|
from typing import List, TypedDict, Union
|
|
6
6
|
|
|
7
7
|
|
|
8
|
+
LocTypedDict = Union[str, int]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
Loc = Union[str, int]
|
|
12
|
+
|
|
13
|
+
|
|
8
14
|
class ValidationErrorTypedDict(TypedDict):
|
|
9
15
|
loc: List[LocTypedDict]
|
|
10
16
|
msg: str
|
|
@@ -16,9 +22,3 @@ class ValidationError(BaseModel):
|
|
|
16
22
|
msg: str
|
|
17
23
|
type: str
|
|
18
24
|
|
|
19
|
-
|
|
20
|
-
LocTypedDict = Union[str, int]
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
Loc = Union[str, int]
|
|
24
|
-
|
|
@@ -29,9 +29,9 @@ class SDKConfiguration:
|
|
|
29
29
|
server: Optional[str] = ""
|
|
30
30
|
language: str = "python"
|
|
31
31
|
openapi_doc_version: str = "0.0.2"
|
|
32
|
-
sdk_version: str = "1.0.
|
|
33
|
-
gen_version: str = "2.
|
|
34
|
-
user_agent: str = "speakeasy-sdk/python 1.0.
|
|
32
|
+
sdk_version: str = "1.0.1"
|
|
33
|
+
gen_version: str = "2.399.0"
|
|
34
|
+
user_agent: str = "speakeasy-sdk/python 1.0.1 2.399.0 0.0.2 mistralai-gcp"
|
|
35
35
|
retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET)
|
|
36
36
|
timeout_ms: Optional[int] = None
|
|
37
37
|
|
|
File without changes
|
|
File without changes
|