mistralai 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/__init__.py +4 -0
- mistralai/_version.py +12 -0
- mistralai/agents.py +56 -22
- mistralai/batch.py +17 -0
- mistralai/chat.py +64 -30
- mistralai/classifiers.py +396 -0
- mistralai/embeddings.py +10 -6
- mistralai/files.py +252 -19
- mistralai/fim.py +40 -30
- mistralai/jobs.py +40 -20
- mistralai/mistral_jobs.py +733 -0
- mistralai/models/__init__.py +108 -18
- mistralai/models/agentscompletionrequest.py +27 -10
- mistralai/models/agentscompletionstreamrequest.py +27 -10
- mistralai/models/apiendpoint.py +9 -0
- mistralai/models/archiveftmodelout.py +11 -5
- mistralai/models/assistantmessage.py +11 -6
- mistralai/models/basemodelcard.py +22 -6
- mistralai/models/batcherror.py +17 -0
- mistralai/models/batchjobin.py +58 -0
- mistralai/models/batchjobout.py +117 -0
- mistralai/models/batchjobsout.py +30 -0
- mistralai/models/batchjobstatus.py +15 -0
- mistralai/models/chatclassificationrequest.py +104 -0
- mistralai/models/chatcompletionchoice.py +9 -4
- mistralai/models/chatcompletionrequest.py +32 -13
- mistralai/models/chatcompletionresponse.py +2 -2
- mistralai/models/chatcompletionstreamrequest.py +32 -13
- mistralai/models/checkpointout.py +1 -1
- mistralai/models/classificationobject.py +21 -0
- mistralai/models/classificationrequest.py +59 -0
- mistralai/models/classificationresponse.py +21 -0
- mistralai/models/completionchunk.py +2 -2
- mistralai/models/completionevent.py +1 -1
- mistralai/models/completionresponsestreamchoice.py +11 -5
- mistralai/models/delete_model_v1_models_model_id_deleteop.py +1 -2
- mistralai/models/deletefileout.py +1 -1
- mistralai/models/deletemodelout.py +2 -2
- mistralai/models/deltamessage.py +14 -7
- mistralai/models/detailedjobout.py +11 -5
- mistralai/models/embeddingrequest.py +5 -5
- mistralai/models/embeddingresponse.py +2 -1
- mistralai/models/embeddingresponsedata.py +2 -2
- mistralai/models/eventout.py +2 -2
- mistralai/models/filepurpose.py +8 -0
- mistralai/models/files_api_routes_delete_fileop.py +1 -2
- mistralai/models/files_api_routes_download_fileop.py +16 -0
- mistralai/models/files_api_routes_list_filesop.py +96 -0
- mistralai/models/files_api_routes_retrieve_fileop.py +1 -2
- mistralai/models/files_api_routes_upload_fileop.py +9 -9
- mistralai/models/fileschema.py +7 -21
- mistralai/models/fimcompletionrequest.py +20 -13
- mistralai/models/fimcompletionresponse.py +2 -2
- mistralai/models/fimcompletionstreamrequest.py +20 -13
- mistralai/models/ftmodelcapabilitiesout.py +2 -2
- mistralai/models/ftmodelcard.py +24 -6
- mistralai/models/ftmodelout.py +9 -5
- mistralai/models/function.py +2 -2
- mistralai/models/functioncall.py +2 -1
- mistralai/models/functionname.py +1 -1
- mistralai/models/githubrepositoryin.py +11 -5
- mistralai/models/githubrepositoryout.py +11 -5
- mistralai/models/httpvalidationerror.py +0 -2
- mistralai/models/imageurl.py +1 -2
- mistralai/models/imageurlchunk.py +11 -5
- mistralai/models/jobin.py +2 -2
- mistralai/models/jobmetadataout.py +1 -2
- mistralai/models/jobout.py +10 -5
- mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py +16 -0
- mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +16 -0
- mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +95 -0
- mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +1 -2
- mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +1 -2
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +1 -2
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +2 -2
- mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +1 -2
- mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +1 -2
- mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +1 -2
- mistralai/models/jobsout.py +9 -5
- mistralai/models/legacyjobmetadataout.py +12 -5
- mistralai/models/listfilesout.py +5 -1
- mistralai/models/metricout.py +1 -2
- mistralai/models/modelcapabilities.py +2 -2
- mistralai/models/modellist.py +2 -2
- mistralai/models/responseformat.py +2 -2
- mistralai/models/retrieve_model_v1_models_model_id_getop.py +2 -2
- mistralai/models/retrievefileout.py +10 -21
- mistralai/models/sampletype.py +6 -2
- mistralai/models/security.py +2 -2
- mistralai/models/source.py +3 -2
- mistralai/models/systemmessage.py +6 -6
- mistralai/models/textchunk.py +9 -5
- mistralai/models/tool.py +2 -2
- mistralai/models/toolcall.py +2 -2
- mistralai/models/toolchoice.py +2 -2
- mistralai/models/toolmessage.py +2 -2
- mistralai/models/trainingfile.py +2 -2
- mistralai/models/trainingparameters.py +7 -2
- mistralai/models/trainingparametersin.py +7 -2
- mistralai/models/unarchiveftmodelout.py +11 -5
- mistralai/models/updateftmodelin.py +1 -2
- mistralai/models/uploadfileout.py +7 -21
- mistralai/models/usageinfo.py +1 -1
- mistralai/models/usermessage.py +36 -5
- mistralai/models/validationerror.py +2 -1
- mistralai/models/wandbintegration.py +11 -5
- mistralai/models/wandbintegrationout.py +12 -6
- mistralai/models_.py +48 -24
- mistralai/sdk.py +7 -0
- mistralai/sdkconfiguration.py +7 -7
- mistralai/utils/__init__.py +8 -0
- mistralai/utils/annotations.py +13 -2
- mistralai/utils/serializers.py +25 -0
- {mistralai-1.1.0.dist-info → mistralai-1.2.1.dist-info}/METADATA +90 -14
- mistralai-1.2.1.dist-info/RECORD +276 -0
- {mistralai-1.1.0.dist-info → mistralai-1.2.1.dist-info}/WHEEL +1 -1
- mistralai_azure/__init__.py +4 -0
- mistralai_azure/_version.py +12 -0
- mistralai_azure/chat.py +64 -30
- mistralai_azure/models/__init__.py +9 -3
- mistralai_azure/models/assistantmessage.py +11 -6
- mistralai_azure/models/chatcompletionchoice.py +10 -5
- mistralai_azure/models/chatcompletionrequest.py +32 -13
- mistralai_azure/models/chatcompletionresponse.py +2 -2
- mistralai_azure/models/chatcompletionstreamrequest.py +32 -13
- mistralai_azure/models/completionchunk.py +2 -2
- mistralai_azure/models/completionevent.py +1 -1
- mistralai_azure/models/completionresponsestreamchoice.py +9 -4
- mistralai_azure/models/deltamessage.py +14 -7
- mistralai_azure/models/function.py +2 -2
- mistralai_azure/models/functioncall.py +2 -1
- mistralai_azure/models/functionname.py +1 -1
- mistralai_azure/models/httpvalidationerror.py +0 -2
- mistralai_azure/models/responseformat.py +2 -2
- mistralai_azure/models/security.py +1 -2
- mistralai_azure/models/systemmessage.py +6 -6
- mistralai_azure/models/textchunk.py +9 -5
- mistralai_azure/models/tool.py +2 -2
- mistralai_azure/models/toolcall.py +2 -2
- mistralai_azure/models/toolchoice.py +2 -2
- mistralai_azure/models/toolmessage.py +2 -2
- mistralai_azure/models/usageinfo.py +1 -1
- mistralai_azure/models/usermessage.py +36 -5
- mistralai_azure/models/validationerror.py +2 -1
- mistralai_azure/sdkconfiguration.py +7 -7
- mistralai_azure/utils/__init__.py +8 -0
- mistralai_azure/utils/annotations.py +13 -2
- mistralai_azure/utils/serializers.py +25 -0
- mistralai_gcp/__init__.py +4 -0
- mistralai_gcp/_version.py +12 -0
- mistralai_gcp/chat.py +64 -30
- mistralai_gcp/fim.py +40 -30
- mistralai_gcp/models/__init__.py +9 -3
- mistralai_gcp/models/assistantmessage.py +11 -6
- mistralai_gcp/models/chatcompletionchoice.py +10 -5
- mistralai_gcp/models/chatcompletionrequest.py +32 -13
- mistralai_gcp/models/chatcompletionresponse.py +2 -2
- mistralai_gcp/models/chatcompletionstreamrequest.py +32 -13
- mistralai_gcp/models/completionchunk.py +2 -2
- mistralai_gcp/models/completionevent.py +1 -1
- mistralai_gcp/models/completionresponsestreamchoice.py +9 -4
- mistralai_gcp/models/deltamessage.py +14 -7
- mistralai_gcp/models/fimcompletionrequest.py +20 -13
- mistralai_gcp/models/fimcompletionresponse.py +2 -2
- mistralai_gcp/models/fimcompletionstreamrequest.py +20 -13
- mistralai_gcp/models/function.py +2 -2
- mistralai_gcp/models/functioncall.py +2 -1
- mistralai_gcp/models/functionname.py +1 -1
- mistralai_gcp/models/httpvalidationerror.py +0 -2
- mistralai_gcp/models/responseformat.py +2 -2
- mistralai_gcp/models/security.py +1 -2
- mistralai_gcp/models/systemmessage.py +6 -6
- mistralai_gcp/models/textchunk.py +9 -5
- mistralai_gcp/models/tool.py +2 -2
- mistralai_gcp/models/toolcall.py +2 -2
- mistralai_gcp/models/toolchoice.py +2 -2
- mistralai_gcp/models/toolmessage.py +2 -2
- mistralai_gcp/models/usageinfo.py +1 -1
- mistralai_gcp/models/usermessage.py +36 -5
- mistralai_gcp/models/validationerror.py +2 -1
- mistralai_gcp/sdk.py +20 -11
- mistralai_gcp/sdkconfiguration.py +7 -7
- mistralai_gcp/utils/__init__.py +8 -0
- mistralai_gcp/utils/annotations.py +13 -2
- mistralai_gcp/utils/serializers.py +25 -0
- mistralai-1.1.0.dist-info/RECORD +0 -254
- {mistralai-1.1.0.dist-info → mistralai-1.2.1.dist-info}/LICENSE +0 -0
mistralai_gcp/models/__init__.py
CHANGED
|
@@ -2,6 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
from .assistantmessage import (
|
|
4
4
|
AssistantMessage,
|
|
5
|
+
AssistantMessageContent,
|
|
6
|
+
AssistantMessageContentTypedDict,
|
|
5
7
|
AssistantMessageRole,
|
|
6
8
|
AssistantMessageTypedDict,
|
|
7
9
|
)
|
|
@@ -42,7 +44,7 @@ from .completionresponsestreamchoice import (
|
|
|
42
44
|
FinishReason,
|
|
43
45
|
)
|
|
44
46
|
from .contentchunk import ContentChunk, ContentChunkTypedDict
|
|
45
|
-
from .deltamessage import DeltaMessage, DeltaMessageTypedDict
|
|
47
|
+
from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict
|
|
46
48
|
from .fimcompletionrequest import (
|
|
47
49
|
FIMCompletionRequest,
|
|
48
50
|
FIMCompletionRequestStop,
|
|
@@ -70,10 +72,10 @@ from .responseformats import ResponseFormats
|
|
|
70
72
|
from .sdkerror import SDKError
|
|
71
73
|
from .security import Security, SecurityTypedDict
|
|
72
74
|
from .systemmessage import (
|
|
73
|
-
Content,
|
|
74
|
-
ContentTypedDict,
|
|
75
75
|
Role,
|
|
76
76
|
SystemMessage,
|
|
77
|
+
SystemMessageContent,
|
|
78
|
+
SystemMessageContentTypedDict,
|
|
77
79
|
SystemMessageTypedDict,
|
|
78
80
|
)
|
|
79
81
|
from .textchunk import TextChunk, TextChunkTypedDict, Type
|
|
@@ -102,6 +104,8 @@ __all__ = [
|
|
|
102
104
|
"Arguments",
|
|
103
105
|
"ArgumentsTypedDict",
|
|
104
106
|
"AssistantMessage",
|
|
107
|
+
"AssistantMessageContent",
|
|
108
|
+
"AssistantMessageContentTypedDict",
|
|
105
109
|
"AssistantMessageRole",
|
|
106
110
|
"AssistantMessageTypedDict",
|
|
107
111
|
"ChatCompletionChoice",
|
|
@@ -166,6 +170,8 @@ __all__ = [
|
|
|
166
170
|
"Stop",
|
|
167
171
|
"StopTypedDict",
|
|
168
172
|
"SystemMessage",
|
|
173
|
+
"SystemMessageContent",
|
|
174
|
+
"SystemMessageContentTypedDict",
|
|
169
175
|
"SystemMessageTypedDict",
|
|
170
176
|
"TextChunk",
|
|
171
177
|
"TextChunkTypedDict",
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from .contentchunk import ContentChunk, ContentChunkTypedDict
|
|
4
5
|
from .toolcall import ToolCall, ToolCallTypedDict
|
|
5
6
|
from mistralai_gcp.types import (
|
|
6
7
|
BaseModel,
|
|
@@ -10,28 +11,32 @@ from mistralai_gcp.types import (
|
|
|
10
11
|
UNSET_SENTINEL,
|
|
11
12
|
)
|
|
12
13
|
from pydantic import model_serializer
|
|
13
|
-
from typing import List, Literal, Optional,
|
|
14
|
-
from typing_extensions import NotRequired
|
|
14
|
+
from typing import List, Literal, Optional, Union
|
|
15
|
+
from typing_extensions import NotRequired, TypedDict
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
AssistantMessageContent = Union[str, List[ContentChunk]]
|
|
15
22
|
|
|
16
23
|
|
|
17
24
|
AssistantMessageRole = Literal["assistant"]
|
|
18
25
|
|
|
19
26
|
|
|
20
27
|
class AssistantMessageTypedDict(TypedDict):
|
|
21
|
-
content: NotRequired[Nullable[
|
|
28
|
+
content: NotRequired[Nullable[AssistantMessageContentTypedDict]]
|
|
22
29
|
tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]]
|
|
23
30
|
prefix: NotRequired[bool]
|
|
24
|
-
r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message."""
|
|
25
31
|
role: NotRequired[AssistantMessageRole]
|
|
26
32
|
|
|
27
33
|
|
|
28
34
|
class AssistantMessage(BaseModel):
|
|
29
|
-
content: OptionalNullable[
|
|
35
|
+
content: OptionalNullable[AssistantMessageContent] = UNSET
|
|
30
36
|
|
|
31
37
|
tool_calls: OptionalNullable[List[ToolCall]] = UNSET
|
|
32
38
|
|
|
33
39
|
prefix: Optional[bool] = False
|
|
34
|
-
r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message."""
|
|
35
40
|
|
|
36
41
|
role: Optional[AssistantMessageRole] = "assistant"
|
|
37
42
|
|
|
@@ -2,12 +2,15 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
-
from mistralai_gcp.types import BaseModel
|
|
6
|
-
from
|
|
5
|
+
from mistralai_gcp.types import BaseModel, UnrecognizedStr
|
|
6
|
+
from mistralai_gcp.utils import validate_open_enum
|
|
7
|
+
from pydantic.functional_validators import PlainValidator
|
|
8
|
+
from typing import Literal, Union
|
|
9
|
+
from typing_extensions import Annotated, TypedDict
|
|
7
10
|
|
|
8
11
|
|
|
9
|
-
ChatCompletionChoiceFinishReason =
|
|
10
|
-
"stop", "length", "model_length", "error", "tool_calls"
|
|
12
|
+
ChatCompletionChoiceFinishReason = Union[
|
|
13
|
+
Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr
|
|
11
14
|
]
|
|
12
15
|
|
|
13
16
|
|
|
@@ -22,4 +25,6 @@ class ChatCompletionChoice(BaseModel):
|
|
|
22
25
|
|
|
23
26
|
message: AssistantMessage
|
|
24
27
|
|
|
25
|
-
finish_reason:
|
|
28
|
+
finish_reason: Annotated[
|
|
29
|
+
ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False))
|
|
30
|
+
]
|
|
@@ -18,8 +18,8 @@ from mistralai_gcp.types import (
|
|
|
18
18
|
)
|
|
19
19
|
from mistralai_gcp.utils import get_discriminator
|
|
20
20
|
from pydantic import Discriminator, Tag, model_serializer
|
|
21
|
-
from typing import List, Optional,
|
|
22
|
-
from typing_extensions import Annotated, NotRequired
|
|
21
|
+
from typing import List, Optional, Union
|
|
22
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
ChatCompletionRequestStopTypedDict = Union[str, List[str]]
|
|
@@ -60,14 +60,12 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
60
60
|
r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
|
|
61
61
|
messages: List[ChatCompletionRequestMessagesTypedDict]
|
|
62
62
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
63
|
-
temperature: NotRequired[float]
|
|
64
|
-
r"""What sampling temperature to use, between 0.0 and
|
|
63
|
+
temperature: NotRequired[Nullable[float]]
|
|
64
|
+
r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
|
|
65
65
|
top_p: NotRequired[float]
|
|
66
66
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
67
67
|
max_tokens: NotRequired[Nullable[int]]
|
|
68
68
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
69
|
-
min_tokens: NotRequired[Nullable[int]]
|
|
70
|
-
r"""The minimum number of tokens to generate in the completion."""
|
|
71
69
|
stream: NotRequired[bool]
|
|
72
70
|
r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
|
|
73
71
|
stop: NotRequired[ChatCompletionRequestStopTypedDict]
|
|
@@ -77,6 +75,12 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
77
75
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
78
76
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
79
77
|
tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict]
|
|
78
|
+
presence_penalty: NotRequired[float]
|
|
79
|
+
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
80
|
+
frequency_penalty: NotRequired[float]
|
|
81
|
+
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
82
|
+
n: NotRequired[Nullable[int]]
|
|
83
|
+
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
80
84
|
|
|
81
85
|
|
|
82
86
|
class ChatCompletionRequest(BaseModel):
|
|
@@ -86,8 +90,8 @@ class ChatCompletionRequest(BaseModel):
|
|
|
86
90
|
messages: List[ChatCompletionRequestMessages]
|
|
87
91
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
88
92
|
|
|
89
|
-
temperature:
|
|
90
|
-
r"""What sampling temperature to use, between 0.0 and
|
|
93
|
+
temperature: OptionalNullable[float] = UNSET
|
|
94
|
+
r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
|
|
91
95
|
|
|
92
96
|
top_p: Optional[float] = 1
|
|
93
97
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
@@ -95,9 +99,6 @@ class ChatCompletionRequest(BaseModel):
|
|
|
95
99
|
max_tokens: OptionalNullable[int] = UNSET
|
|
96
100
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
97
101
|
|
|
98
|
-
min_tokens: OptionalNullable[int] = UNSET
|
|
99
|
-
r"""The minimum number of tokens to generate in the completion."""
|
|
100
|
-
|
|
101
102
|
stream: Optional[bool] = False
|
|
102
103
|
r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
|
|
103
104
|
|
|
@@ -113,21 +114,39 @@ class ChatCompletionRequest(BaseModel):
|
|
|
113
114
|
|
|
114
115
|
tool_choice: Optional[ChatCompletionRequestToolChoice] = None
|
|
115
116
|
|
|
117
|
+
presence_penalty: Optional[float] = 0
|
|
118
|
+
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
119
|
+
|
|
120
|
+
frequency_penalty: Optional[float] = 0
|
|
121
|
+
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
122
|
+
|
|
123
|
+
n: OptionalNullable[int] = UNSET
|
|
124
|
+
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
125
|
+
|
|
116
126
|
@model_serializer(mode="wrap")
|
|
117
127
|
def serialize_model(self, handler):
|
|
118
128
|
optional_fields = [
|
|
119
129
|
"temperature",
|
|
120
130
|
"top_p",
|
|
121
131
|
"max_tokens",
|
|
122
|
-
"min_tokens",
|
|
123
132
|
"stream",
|
|
124
133
|
"stop",
|
|
125
134
|
"random_seed",
|
|
126
135
|
"response_format",
|
|
127
136
|
"tools",
|
|
128
137
|
"tool_choice",
|
|
138
|
+
"presence_penalty",
|
|
139
|
+
"frequency_penalty",
|
|
140
|
+
"n",
|
|
141
|
+
]
|
|
142
|
+
nullable_fields = [
|
|
143
|
+
"model",
|
|
144
|
+
"temperature",
|
|
145
|
+
"max_tokens",
|
|
146
|
+
"random_seed",
|
|
147
|
+
"tools",
|
|
148
|
+
"n",
|
|
129
149
|
]
|
|
130
|
-
nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
|
|
131
150
|
null_default_fields = []
|
|
132
151
|
|
|
133
152
|
serialized = handler(self)
|
|
@@ -4,8 +4,8 @@ from __future__ import annotations
|
|
|
4
4
|
from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
|
|
5
5
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
6
6
|
from mistralai_gcp.types import BaseModel
|
|
7
|
-
from typing import List, Optional
|
|
8
|
-
from typing_extensions import NotRequired
|
|
7
|
+
from typing import List, Optional
|
|
8
|
+
from typing_extensions import NotRequired, TypedDict
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class ChatCompletionResponseTypedDict(TypedDict):
|
|
@@ -18,8 +18,8 @@ from mistralai_gcp.types import (
|
|
|
18
18
|
)
|
|
19
19
|
from mistralai_gcp.utils import get_discriminator
|
|
20
20
|
from pydantic import Discriminator, Tag, model_serializer
|
|
21
|
-
from typing import List, Optional,
|
|
22
|
-
from typing_extensions import Annotated, NotRequired
|
|
21
|
+
from typing import List, Optional, Union
|
|
22
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
StopTypedDict = Union[str, List[str]]
|
|
@@ -62,14 +62,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
62
62
|
r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
|
|
63
63
|
messages: List[MessagesTypedDict]
|
|
64
64
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
65
|
-
temperature: NotRequired[float]
|
|
66
|
-
r"""What sampling temperature to use, between 0.0 and
|
|
65
|
+
temperature: NotRequired[Nullable[float]]
|
|
66
|
+
r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
|
|
67
67
|
top_p: NotRequired[float]
|
|
68
68
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
69
69
|
max_tokens: NotRequired[Nullable[int]]
|
|
70
70
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
71
|
-
min_tokens: NotRequired[Nullable[int]]
|
|
72
|
-
r"""The minimum number of tokens to generate in the completion."""
|
|
73
71
|
stream: NotRequired[bool]
|
|
74
72
|
stop: NotRequired[StopTypedDict]
|
|
75
73
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
@@ -78,6 +76,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
78
76
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
79
77
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
80
78
|
tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict]
|
|
79
|
+
presence_penalty: NotRequired[float]
|
|
80
|
+
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
81
|
+
frequency_penalty: NotRequired[float]
|
|
82
|
+
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
83
|
+
n: NotRequired[Nullable[int]]
|
|
84
|
+
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
81
85
|
|
|
82
86
|
|
|
83
87
|
class ChatCompletionStreamRequest(BaseModel):
|
|
@@ -87,8 +91,8 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
87
91
|
messages: List[Messages]
|
|
88
92
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
89
93
|
|
|
90
|
-
temperature:
|
|
91
|
-
r"""What sampling temperature to use, between 0.0 and
|
|
94
|
+
temperature: OptionalNullable[float] = UNSET
|
|
95
|
+
r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
|
|
92
96
|
|
|
93
97
|
top_p: Optional[float] = 1
|
|
94
98
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
@@ -96,9 +100,6 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
96
100
|
max_tokens: OptionalNullable[int] = UNSET
|
|
97
101
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
98
102
|
|
|
99
|
-
min_tokens: OptionalNullable[int] = UNSET
|
|
100
|
-
r"""The minimum number of tokens to generate in the completion."""
|
|
101
|
-
|
|
102
103
|
stream: Optional[bool] = True
|
|
103
104
|
|
|
104
105
|
stop: Optional[Stop] = None
|
|
@@ -113,21 +114,39 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
113
114
|
|
|
114
115
|
tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None
|
|
115
116
|
|
|
117
|
+
presence_penalty: Optional[float] = 0
|
|
118
|
+
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
119
|
+
|
|
120
|
+
frequency_penalty: Optional[float] = 0
|
|
121
|
+
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
122
|
+
|
|
123
|
+
n: OptionalNullable[int] = UNSET
|
|
124
|
+
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
125
|
+
|
|
116
126
|
@model_serializer(mode="wrap")
|
|
117
127
|
def serialize_model(self, handler):
|
|
118
128
|
optional_fields = [
|
|
119
129
|
"temperature",
|
|
120
130
|
"top_p",
|
|
121
131
|
"max_tokens",
|
|
122
|
-
"min_tokens",
|
|
123
132
|
"stream",
|
|
124
133
|
"stop",
|
|
125
134
|
"random_seed",
|
|
126
135
|
"response_format",
|
|
127
136
|
"tools",
|
|
128
137
|
"tool_choice",
|
|
138
|
+
"presence_penalty",
|
|
139
|
+
"frequency_penalty",
|
|
140
|
+
"n",
|
|
141
|
+
]
|
|
142
|
+
nullable_fields = [
|
|
143
|
+
"model",
|
|
144
|
+
"temperature",
|
|
145
|
+
"max_tokens",
|
|
146
|
+
"random_seed",
|
|
147
|
+
"tools",
|
|
148
|
+
"n",
|
|
129
149
|
]
|
|
130
|
-
nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
|
|
131
150
|
null_default_fields = []
|
|
132
151
|
|
|
133
152
|
serialized = handler(self)
|
|
@@ -7,8 +7,8 @@ from .completionresponsestreamchoice import (
|
|
|
7
7
|
)
|
|
8
8
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
9
9
|
from mistralai_gcp.types import BaseModel
|
|
10
|
-
from typing import List, Optional
|
|
11
|
-
from typing_extensions import NotRequired
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import NotRequired, TypedDict
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
class CompletionChunkTypedDict(TypedDict):
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .completionchunk import CompletionChunk, CompletionChunkTypedDict
|
|
5
5
|
from mistralai_gcp.types import BaseModel
|
|
6
|
-
from
|
|
6
|
+
from typing_extensions import TypedDict
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class CompletionEventTypedDict(TypedDict):
|
|
@@ -2,12 +2,15 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .deltamessage import DeltaMessage, DeltaMessageTypedDict
|
|
5
|
-
from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL
|
|
5
|
+
from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr
|
|
6
|
+
from mistralai_gcp.utils import validate_open_enum
|
|
6
7
|
from pydantic import model_serializer
|
|
7
|
-
from
|
|
8
|
+
from pydantic.functional_validators import PlainValidator
|
|
9
|
+
from typing import Literal, Union
|
|
10
|
+
from typing_extensions import Annotated, TypedDict
|
|
8
11
|
|
|
9
12
|
|
|
10
|
-
FinishReason = Literal["stop", "length", "error", "tool_calls"]
|
|
13
|
+
FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr]
|
|
11
14
|
|
|
12
15
|
|
|
13
16
|
class CompletionResponseStreamChoiceTypedDict(TypedDict):
|
|
@@ -21,7 +24,9 @@ class CompletionResponseStreamChoice(BaseModel):
|
|
|
21
24
|
|
|
22
25
|
delta: DeltaMessage
|
|
23
26
|
|
|
24
|
-
finish_reason:
|
|
27
|
+
finish_reason: Annotated[
|
|
28
|
+
Nullable[FinishReason], PlainValidator(validate_open_enum(False))
|
|
29
|
+
]
|
|
25
30
|
|
|
26
31
|
@model_serializer(mode="wrap")
|
|
27
32
|
def serialize_model(self, handler):
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from .contentchunk import ContentChunk, ContentChunkTypedDict
|
|
4
5
|
from .toolcall import ToolCall, ToolCallTypedDict
|
|
5
6
|
from mistralai_gcp.types import (
|
|
6
7
|
BaseModel,
|
|
@@ -10,27 +11,33 @@ from mistralai_gcp.types import (
|
|
|
10
11
|
UNSET_SENTINEL,
|
|
11
12
|
)
|
|
12
13
|
from pydantic import model_serializer
|
|
13
|
-
from typing import List,
|
|
14
|
-
from typing_extensions import NotRequired
|
|
14
|
+
from typing import List, Union
|
|
15
|
+
from typing_extensions import NotRequired, TypedDict
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
ContentTypedDict = Union[str, List[ContentChunkTypedDict]]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
Content = Union[str, List[ContentChunk]]
|
|
15
22
|
|
|
16
23
|
|
|
17
24
|
class DeltaMessageTypedDict(TypedDict):
|
|
18
|
-
role: NotRequired[str]
|
|
19
|
-
content: NotRequired[Nullable[
|
|
25
|
+
role: NotRequired[Nullable[str]]
|
|
26
|
+
content: NotRequired[Nullable[ContentTypedDict]]
|
|
20
27
|
tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]]
|
|
21
28
|
|
|
22
29
|
|
|
23
30
|
class DeltaMessage(BaseModel):
|
|
24
|
-
role:
|
|
31
|
+
role: OptionalNullable[str] = UNSET
|
|
25
32
|
|
|
26
|
-
content: OptionalNullable[
|
|
33
|
+
content: OptionalNullable[Content] = UNSET
|
|
27
34
|
|
|
28
35
|
tool_calls: OptionalNullable[List[ToolCall]] = UNSET
|
|
29
36
|
|
|
30
37
|
@model_serializer(mode="wrap")
|
|
31
38
|
def serialize_model(self, handler):
|
|
32
39
|
optional_fields = ["role", "content", "tool_calls"]
|
|
33
|
-
nullable_fields = ["content", "tool_calls"]
|
|
40
|
+
nullable_fields = ["role", "content", "tool_calls"]
|
|
34
41
|
null_default_fields = []
|
|
35
42
|
|
|
36
43
|
serialized = handler(self)
|
|
@@ -9,8 +9,8 @@ from mistralai_gcp.types import (
|
|
|
9
9
|
UNSET_SENTINEL,
|
|
10
10
|
)
|
|
11
11
|
from pydantic import model_serializer
|
|
12
|
-
from typing import List, Optional,
|
|
13
|
-
from typing_extensions import NotRequired
|
|
12
|
+
from typing import List, Optional, Union
|
|
13
|
+
from typing_extensions import NotRequired, TypedDict
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
FIMCompletionRequestStopTypedDict = Union[str, List[str]]
|
|
@@ -29,14 +29,12 @@ class FIMCompletionRequestTypedDict(TypedDict):
|
|
|
29
29
|
"""
|
|
30
30
|
prompt: str
|
|
31
31
|
r"""The text/code to complete."""
|
|
32
|
-
temperature: NotRequired[float]
|
|
33
|
-
r"""What sampling temperature to use, between 0.0 and
|
|
32
|
+
temperature: NotRequired[Nullable[float]]
|
|
33
|
+
r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
|
|
34
34
|
top_p: NotRequired[float]
|
|
35
35
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
36
36
|
max_tokens: NotRequired[Nullable[int]]
|
|
37
37
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
38
|
-
min_tokens: NotRequired[Nullable[int]]
|
|
39
|
-
r"""The minimum number of tokens to generate in the completion."""
|
|
40
38
|
stream: NotRequired[bool]
|
|
41
39
|
r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
|
|
42
40
|
stop: NotRequired[FIMCompletionRequestStopTypedDict]
|
|
@@ -45,6 +43,8 @@ class FIMCompletionRequestTypedDict(TypedDict):
|
|
|
45
43
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
46
44
|
suffix: NotRequired[Nullable[str]]
|
|
47
45
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
46
|
+
min_tokens: NotRequired[Nullable[int]]
|
|
47
|
+
r"""The minimum number of tokens to generate in the completion."""
|
|
48
48
|
|
|
49
49
|
|
|
50
50
|
class FIMCompletionRequest(BaseModel):
|
|
@@ -57,8 +57,8 @@ class FIMCompletionRequest(BaseModel):
|
|
|
57
57
|
prompt: str
|
|
58
58
|
r"""The text/code to complete."""
|
|
59
59
|
|
|
60
|
-
temperature:
|
|
61
|
-
r"""What sampling temperature to use, between 0.0 and
|
|
60
|
+
temperature: OptionalNullable[float] = UNSET
|
|
61
|
+
r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
|
|
62
62
|
|
|
63
63
|
top_p: Optional[float] = 1
|
|
64
64
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
@@ -66,9 +66,6 @@ class FIMCompletionRequest(BaseModel):
|
|
|
66
66
|
max_tokens: OptionalNullable[int] = UNSET
|
|
67
67
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
68
68
|
|
|
69
|
-
min_tokens: OptionalNullable[int] = UNSET
|
|
70
|
-
r"""The minimum number of tokens to generate in the completion."""
|
|
71
|
-
|
|
72
69
|
stream: Optional[bool] = False
|
|
73
70
|
r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
|
|
74
71
|
|
|
@@ -81,19 +78,29 @@ class FIMCompletionRequest(BaseModel):
|
|
|
81
78
|
suffix: OptionalNullable[str] = UNSET
|
|
82
79
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
83
80
|
|
|
81
|
+
min_tokens: OptionalNullable[int] = UNSET
|
|
82
|
+
r"""The minimum number of tokens to generate in the completion."""
|
|
83
|
+
|
|
84
84
|
@model_serializer(mode="wrap")
|
|
85
85
|
def serialize_model(self, handler):
|
|
86
86
|
optional_fields = [
|
|
87
87
|
"temperature",
|
|
88
88
|
"top_p",
|
|
89
89
|
"max_tokens",
|
|
90
|
-
"min_tokens",
|
|
91
90
|
"stream",
|
|
92
91
|
"stop",
|
|
93
92
|
"random_seed",
|
|
94
93
|
"suffix",
|
|
94
|
+
"min_tokens",
|
|
95
|
+
]
|
|
96
|
+
nullable_fields = [
|
|
97
|
+
"model",
|
|
98
|
+
"temperature",
|
|
99
|
+
"max_tokens",
|
|
100
|
+
"random_seed",
|
|
101
|
+
"suffix",
|
|
102
|
+
"min_tokens",
|
|
95
103
|
]
|
|
96
|
-
nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
|
|
97
104
|
null_default_fields = []
|
|
98
105
|
|
|
99
106
|
serialized = handler(self)
|
|
@@ -4,8 +4,8 @@ from __future__ import annotations
|
|
|
4
4
|
from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
|
|
5
5
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
6
6
|
from mistralai_gcp.types import BaseModel
|
|
7
|
-
from typing import List, Optional
|
|
8
|
-
from typing_extensions import NotRequired
|
|
7
|
+
from typing import List, Optional
|
|
8
|
+
from typing_extensions import NotRequired, TypedDict
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class FIMCompletionResponseTypedDict(TypedDict):
|
|
@@ -9,8 +9,8 @@ from mistralai_gcp.types import (
|
|
|
9
9
|
UNSET_SENTINEL,
|
|
10
10
|
)
|
|
11
11
|
from pydantic import model_serializer
|
|
12
|
-
from typing import List, Optional,
|
|
13
|
-
from typing_extensions import NotRequired
|
|
12
|
+
from typing import List, Optional, Union
|
|
13
|
+
from typing_extensions import NotRequired, TypedDict
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]]
|
|
@@ -29,14 +29,12 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
|
|
|
29
29
|
"""
|
|
30
30
|
prompt: str
|
|
31
31
|
r"""The text/code to complete."""
|
|
32
|
-
temperature: NotRequired[float]
|
|
33
|
-
r"""What sampling temperature to use, between 0.0 and
|
|
32
|
+
temperature: NotRequired[Nullable[float]]
|
|
33
|
+
r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
|
|
34
34
|
top_p: NotRequired[float]
|
|
35
35
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
36
36
|
max_tokens: NotRequired[Nullable[int]]
|
|
37
37
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
38
|
-
min_tokens: NotRequired[Nullable[int]]
|
|
39
|
-
r"""The minimum number of tokens to generate in the completion."""
|
|
40
38
|
stream: NotRequired[bool]
|
|
41
39
|
stop: NotRequired[FIMCompletionStreamRequestStopTypedDict]
|
|
42
40
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
@@ -44,6 +42,8 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
|
|
|
44
42
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
45
43
|
suffix: NotRequired[Nullable[str]]
|
|
46
44
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
45
|
+
min_tokens: NotRequired[Nullable[int]]
|
|
46
|
+
r"""The minimum number of tokens to generate in the completion."""
|
|
47
47
|
|
|
48
48
|
|
|
49
49
|
class FIMCompletionStreamRequest(BaseModel):
|
|
@@ -56,8 +56,8 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
56
56
|
prompt: str
|
|
57
57
|
r"""The text/code to complete."""
|
|
58
58
|
|
|
59
|
-
temperature:
|
|
60
|
-
r"""What sampling temperature to use, between 0.0 and
|
|
59
|
+
temperature: OptionalNullable[float] = UNSET
|
|
60
|
+
r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
|
|
61
61
|
|
|
62
62
|
top_p: Optional[float] = 1
|
|
63
63
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
@@ -65,9 +65,6 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
65
65
|
max_tokens: OptionalNullable[int] = UNSET
|
|
66
66
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
67
67
|
|
|
68
|
-
min_tokens: OptionalNullable[int] = UNSET
|
|
69
|
-
r"""The minimum number of tokens to generate in the completion."""
|
|
70
|
-
|
|
71
68
|
stream: Optional[bool] = True
|
|
72
69
|
|
|
73
70
|
stop: Optional[FIMCompletionStreamRequestStop] = None
|
|
@@ -79,19 +76,29 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
79
76
|
suffix: OptionalNullable[str] = UNSET
|
|
80
77
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
81
78
|
|
|
79
|
+
min_tokens: OptionalNullable[int] = UNSET
|
|
80
|
+
r"""The minimum number of tokens to generate in the completion."""
|
|
81
|
+
|
|
82
82
|
@model_serializer(mode="wrap")
|
|
83
83
|
def serialize_model(self, handler):
|
|
84
84
|
optional_fields = [
|
|
85
85
|
"temperature",
|
|
86
86
|
"top_p",
|
|
87
87
|
"max_tokens",
|
|
88
|
-
"min_tokens",
|
|
89
88
|
"stream",
|
|
90
89
|
"stop",
|
|
91
90
|
"random_seed",
|
|
92
91
|
"suffix",
|
|
92
|
+
"min_tokens",
|
|
93
|
+
]
|
|
94
|
+
nullable_fields = [
|
|
95
|
+
"model",
|
|
96
|
+
"temperature",
|
|
97
|
+
"max_tokens",
|
|
98
|
+
"random_seed",
|
|
99
|
+
"suffix",
|
|
100
|
+
"min_tokens",
|
|
93
101
|
]
|
|
94
|
-
nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
|
|
95
102
|
null_default_fields = []
|
|
96
103
|
|
|
97
104
|
serialized = handler(self)
|
mistralai_gcp/models/function.py
CHANGED
|
@@ -2,8 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from mistralai_gcp.types import BaseModel
|
|
5
|
-
from typing import Any, Dict, Optional
|
|
6
|
-
from typing_extensions import NotRequired
|
|
5
|
+
from typing import Any, Dict, Optional
|
|
6
|
+
from typing_extensions import NotRequired, TypedDict
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class FunctionTypedDict(TypedDict):
|