mistralai 1.0.2__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_hooks/sdkhooks.py +23 -4
- mistralai/_hooks/types.py +27 -9
- mistralai/agents.py +286 -150
- mistralai/basesdk.py +90 -5
- mistralai/chat.py +260 -144
- mistralai/embeddings.py +73 -53
- mistralai/files.py +252 -192
- mistralai/fim.py +174 -110
- mistralai/fine_tuning.py +3 -2
- mistralai/jobs.py +372 -263
- mistralai/models/__init__.py +499 -46
- mistralai/models/agentscompletionrequest.py +47 -11
- mistralai/models/agentscompletionstreamrequest.py +49 -11
- mistralai/models/archiveftmodelout.py +6 -2
- mistralai/models/assistantmessage.py +11 -4
- mistralai/models/{modelcard.py → basemodelcard.py} +37 -14
- mistralai/models/chatcompletionchoice.py +4 -2
- mistralai/models/chatcompletionrequest.py +57 -11
- mistralai/models/chatcompletionresponse.py +6 -2
- mistralai/models/chatcompletionstreamrequest.py +59 -11
- mistralai/models/checkpointout.py +3 -2
- mistralai/models/completionchunk.py +10 -3
- mistralai/models/completionevent.py +1 -2
- mistralai/models/completionresponsestreamchoice.py +13 -5
- mistralai/models/contentchunk.py +13 -10
- mistralai/models/delete_model_v1_models_model_id_deleteop.py +4 -3
- mistralai/models/deletefileout.py +3 -2
- mistralai/models/deletemodelout.py +3 -2
- mistralai/models/deltamessage.py +9 -4
- mistralai/models/detailedjobout.py +59 -7
- mistralai/models/embeddingrequest.py +9 -4
- mistralai/models/embeddingresponse.py +5 -2
- mistralai/models/embeddingresponsedata.py +3 -2
- mistralai/models/eventout.py +9 -4
- mistralai/models/files_api_routes_delete_fileop.py +4 -3
- mistralai/models/files_api_routes_retrieve_fileop.py +4 -3
- mistralai/models/files_api_routes_upload_fileop.py +27 -8
- mistralai/models/fileschema.py +26 -5
- mistralai/models/fimcompletionrequest.py +26 -5
- mistralai/models/fimcompletionresponse.py +6 -2
- mistralai/models/fimcompletionstreamrequest.py +26 -5
- mistralai/models/finetuneablemodel.py +7 -1
- mistralai/models/ftmodelcapabilitiesout.py +4 -2
- mistralai/models/ftmodelcard.py +103 -0
- mistralai/models/ftmodelout.py +32 -6
- mistralai/models/function.py +3 -2
- mistralai/models/functioncall.py +2 -2
- mistralai/models/functionname.py +17 -0
- mistralai/models/githubrepositoryin.py +15 -4
- mistralai/models/githubrepositoryout.py +15 -4
- mistralai/models/httpvalidationerror.py +2 -2
- mistralai/models/imageurl.py +48 -0
- mistralai/models/imageurlchunk.py +32 -0
- mistralai/models/jobin.py +22 -5
- mistralai/models/jobmetadataout.py +31 -6
- mistralai/models/jobout.py +55 -7
- mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +4 -3
- mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +4 -3
- mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +3 -2
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +4 -3
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +83 -16
- mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +4 -3
- mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +4 -3
- mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +9 -4
- mistralai/models/jobsout.py +6 -2
- mistralai/models/legacyjobmetadataout.py +45 -6
- mistralai/models/listfilesout.py +2 -2
- mistralai/models/metricout.py +11 -6
- mistralai/models/modelcapabilities.py +7 -2
- mistralai/models/modellist.py +21 -7
- mistralai/models/responseformat.py +6 -7
- mistralai/models/responseformats.py +8 -0
- mistralai/models/retrieve_model_v1_models_model_id_getop.py +24 -5
- mistralai/models/retrievefileout.py +26 -5
- mistralai/models/security.py +12 -3
- mistralai/models/systemmessage.py +6 -5
- mistralai/models/textchunk.py +9 -4
- mistralai/models/tool.py +9 -8
- mistralai/models/toolcall.py +9 -7
- mistralai/models/toolchoice.py +29 -0
- mistralai/models/toolchoiceenum.py +7 -0
- mistralai/models/toolmessage.py +11 -4
- mistralai/models/tooltypes.py +8 -0
- mistralai/models/trainingfile.py +2 -2
- mistralai/models/trainingparameters.py +27 -6
- mistralai/models/trainingparametersin.py +29 -8
- mistralai/models/unarchiveftmodelout.py +6 -2
- mistralai/models/updateftmodelin.py +8 -4
- mistralai/models/uploadfileout.py +26 -5
- mistralai/models/usageinfo.py +3 -2
- mistralai/models/usermessage.py +6 -5
- mistralai/models/validationerror.py +3 -2
- mistralai/models/wandbintegration.py +14 -4
- mistralai/models/wandbintegrationout.py +13 -4
- mistralai/models_.py +392 -294
- mistralai/sdk.py +24 -19
- mistralai/sdkconfiguration.py +6 -8
- mistralai/utils/__init__.py +6 -1
- mistralai/utils/logger.py +4 -1
- mistralai/utils/retries.py +2 -1
- mistralai/utils/security.py +13 -6
- {mistralai-1.0.2.dist-info → mistralai-1.1.0.dist-info}/METADATA +103 -74
- mistralai-1.1.0.dist-info/RECORD +254 -0
- mistralai_azure/_hooks/sdkhooks.py +23 -4
- mistralai_azure/_hooks/types.py +27 -9
- mistralai_azure/basesdk.py +91 -6
- mistralai_azure/chat.py +252 -144
- mistralai_azure/models/__init__.py +157 -15
- mistralai_azure/models/assistantmessage.py +18 -5
- mistralai_azure/models/chatcompletionchoice.py +7 -3
- mistralai_azure/models/chatcompletionrequest.py +65 -12
- mistralai_azure/models/chatcompletionresponse.py +6 -2
- mistralai_azure/models/chatcompletionstreamrequest.py +67 -12
- mistralai_azure/models/completionchunk.py +10 -3
- mistralai_azure/models/completionevent.py +1 -2
- mistralai_azure/models/completionresponsestreamchoice.py +10 -4
- mistralai_azure/models/contentchunk.py +4 -11
- mistralai_azure/models/deltamessage.py +16 -5
- mistralai_azure/models/function.py +3 -2
- mistralai_azure/models/functioncall.py +2 -2
- mistralai_azure/models/functionname.py +17 -0
- mistralai_azure/models/httpvalidationerror.py +2 -2
- mistralai_azure/models/responseformat.py +6 -7
- mistralai_azure/models/responseformats.py +8 -0
- mistralai_azure/models/security.py +12 -3
- mistralai_azure/models/systemmessage.py +6 -5
- mistralai_azure/models/textchunk.py +9 -4
- mistralai_azure/models/tool.py +9 -8
- mistralai_azure/models/toolcall.py +9 -7
- mistralai_azure/models/toolchoice.py +29 -0
- mistralai_azure/models/toolchoiceenum.py +7 -0
- mistralai_azure/models/toolmessage.py +18 -5
- mistralai_azure/models/tooltypes.py +8 -0
- mistralai_azure/models/usageinfo.py +3 -2
- mistralai_azure/models/usermessage.py +6 -5
- mistralai_azure/models/validationerror.py +3 -2
- mistralai_azure/sdkconfiguration.py +6 -8
- mistralai_azure/utils/__init__.py +8 -3
- mistralai_azure/utils/forms.py +10 -9
- mistralai_azure/utils/headers.py +8 -8
- mistralai_azure/utils/logger.py +6 -0
- mistralai_azure/utils/queryparams.py +16 -14
- mistralai_azure/utils/retries.py +2 -1
- mistralai_azure/utils/security.py +12 -6
- mistralai_azure/utils/serializers.py +17 -8
- mistralai_azure/utils/url.py +13 -8
- mistralai_azure/utils/values.py +6 -0
- mistralai_gcp/_hooks/sdkhooks.py +23 -4
- mistralai_gcp/_hooks/types.py +27 -9
- mistralai_gcp/basesdk.py +91 -6
- mistralai_gcp/chat.py +252 -144
- mistralai_gcp/fim.py +166 -110
- mistralai_gcp/models/__init__.py +179 -17
- mistralai_gcp/models/assistantmessage.py +18 -5
- mistralai_gcp/models/chatcompletionchoice.py +7 -3
- mistralai_gcp/models/chatcompletionrequest.py +62 -12
- mistralai_gcp/models/chatcompletionresponse.py +6 -2
- mistralai_gcp/models/chatcompletionstreamrequest.py +64 -12
- mistralai_gcp/models/completionchunk.py +10 -3
- mistralai_gcp/models/completionevent.py +1 -2
- mistralai_gcp/models/completionresponsestreamchoice.py +10 -4
- mistralai_gcp/models/contentchunk.py +4 -11
- mistralai_gcp/models/deltamessage.py +16 -5
- mistralai_gcp/models/fimcompletionrequest.py +33 -6
- mistralai_gcp/models/fimcompletionresponse.py +6 -2
- mistralai_gcp/models/fimcompletionstreamrequest.py +33 -6
- mistralai_gcp/models/function.py +3 -2
- mistralai_gcp/models/functioncall.py +2 -2
- mistralai_gcp/models/functionname.py +17 -0
- mistralai_gcp/models/httpvalidationerror.py +2 -2
- mistralai_gcp/models/responseformat.py +6 -7
- mistralai_gcp/models/responseformats.py +8 -0
- mistralai_gcp/models/security.py +12 -3
- mistralai_gcp/models/systemmessage.py +6 -5
- mistralai_gcp/models/textchunk.py +9 -4
- mistralai_gcp/models/tool.py +9 -8
- mistralai_gcp/models/toolcall.py +9 -7
- mistralai_gcp/models/toolchoice.py +29 -0
- mistralai_gcp/models/toolchoiceenum.py +7 -0
- mistralai_gcp/models/toolmessage.py +18 -5
- mistralai_gcp/models/tooltypes.py +8 -0
- mistralai_gcp/models/usageinfo.py +3 -2
- mistralai_gcp/models/usermessage.py +6 -5
- mistralai_gcp/models/validationerror.py +3 -2
- mistralai_gcp/sdk.py +14 -10
- mistralai_gcp/sdkconfiguration.py +6 -8
- mistralai_gcp/utils/__init__.py +8 -3
- mistralai_gcp/utils/forms.py +10 -9
- mistralai_gcp/utils/headers.py +8 -8
- mistralai_gcp/utils/logger.py +6 -0
- mistralai_gcp/utils/queryparams.py +16 -14
- mistralai_gcp/utils/retries.py +2 -1
- mistralai_gcp/utils/security.py +12 -6
- mistralai_gcp/utils/serializers.py +17 -8
- mistralai_gcp/utils/url.py +13 -8
- mistralai_gcp/utils/values.py +6 -0
- mistralai-1.0.2.dist-info/RECORD +0 -236
- {mistralai-1.0.2.dist-info → mistralai-1.1.0.dist-info}/LICENSE +0 -0
- {mistralai-1.0.2.dist-info → mistralai-1.1.0.dist-info}/WHEEL +0 -0
mistralai_gcp/models/__init__.py
CHANGED
|
@@ -1,31 +1,193 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
|
-
from .assistantmessage import
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
3
|
+
from .assistantmessage import (
|
|
4
|
+
AssistantMessage,
|
|
5
|
+
AssistantMessageRole,
|
|
6
|
+
AssistantMessageTypedDict,
|
|
7
|
+
)
|
|
8
|
+
from .chatcompletionchoice import (
|
|
9
|
+
ChatCompletionChoice,
|
|
10
|
+
ChatCompletionChoiceFinishReason,
|
|
11
|
+
ChatCompletionChoiceTypedDict,
|
|
12
|
+
)
|
|
13
|
+
from .chatcompletionrequest import (
|
|
14
|
+
ChatCompletionRequest,
|
|
15
|
+
ChatCompletionRequestMessages,
|
|
16
|
+
ChatCompletionRequestMessagesTypedDict,
|
|
17
|
+
ChatCompletionRequestStop,
|
|
18
|
+
ChatCompletionRequestStopTypedDict,
|
|
19
|
+
ChatCompletionRequestToolChoice,
|
|
20
|
+
ChatCompletionRequestToolChoiceTypedDict,
|
|
21
|
+
ChatCompletionRequestTypedDict,
|
|
22
|
+
)
|
|
23
|
+
from .chatcompletionresponse import (
|
|
24
|
+
ChatCompletionResponse,
|
|
25
|
+
ChatCompletionResponseTypedDict,
|
|
26
|
+
)
|
|
27
|
+
from .chatcompletionstreamrequest import (
|
|
28
|
+
ChatCompletionStreamRequest,
|
|
29
|
+
ChatCompletionStreamRequestToolChoice,
|
|
30
|
+
ChatCompletionStreamRequestToolChoiceTypedDict,
|
|
31
|
+
ChatCompletionStreamRequestTypedDict,
|
|
32
|
+
Messages,
|
|
33
|
+
MessagesTypedDict,
|
|
34
|
+
Stop,
|
|
35
|
+
StopTypedDict,
|
|
36
|
+
)
|
|
8
37
|
from .completionchunk import CompletionChunk, CompletionChunkTypedDict
|
|
9
38
|
from .completionevent import CompletionEvent, CompletionEventTypedDict
|
|
10
|
-
from .completionresponsestreamchoice import
|
|
39
|
+
from .completionresponsestreamchoice import (
|
|
40
|
+
CompletionResponseStreamChoice,
|
|
41
|
+
CompletionResponseStreamChoiceTypedDict,
|
|
42
|
+
FinishReason,
|
|
43
|
+
)
|
|
11
44
|
from .contentchunk import ContentChunk, ContentChunkTypedDict
|
|
12
45
|
from .deltamessage import DeltaMessage, DeltaMessageTypedDict
|
|
13
|
-
from .fimcompletionrequest import
|
|
46
|
+
from .fimcompletionrequest import (
|
|
47
|
+
FIMCompletionRequest,
|
|
48
|
+
FIMCompletionRequestStop,
|
|
49
|
+
FIMCompletionRequestStopTypedDict,
|
|
50
|
+
FIMCompletionRequestTypedDict,
|
|
51
|
+
)
|
|
14
52
|
from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict
|
|
15
|
-
from .fimcompletionstreamrequest import
|
|
53
|
+
from .fimcompletionstreamrequest import (
|
|
54
|
+
FIMCompletionStreamRequest,
|
|
55
|
+
FIMCompletionStreamRequestStop,
|
|
56
|
+
FIMCompletionStreamRequestStopTypedDict,
|
|
57
|
+
FIMCompletionStreamRequestTypedDict,
|
|
58
|
+
)
|
|
16
59
|
from .function import Function, FunctionTypedDict
|
|
17
|
-
from .functioncall import
|
|
60
|
+
from .functioncall import (
|
|
61
|
+
Arguments,
|
|
62
|
+
ArgumentsTypedDict,
|
|
63
|
+
FunctionCall,
|
|
64
|
+
FunctionCallTypedDict,
|
|
65
|
+
)
|
|
66
|
+
from .functionname import FunctionName, FunctionNameTypedDict
|
|
18
67
|
from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
|
|
19
|
-
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
68
|
+
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
69
|
+
from .responseformats import ResponseFormats
|
|
20
70
|
from .sdkerror import SDKError
|
|
21
71
|
from .security import Security, SecurityTypedDict
|
|
22
|
-
from .systemmessage import
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
72
|
+
from .systemmessage import (
|
|
73
|
+
Content,
|
|
74
|
+
ContentTypedDict,
|
|
75
|
+
Role,
|
|
76
|
+
SystemMessage,
|
|
77
|
+
SystemMessageTypedDict,
|
|
78
|
+
)
|
|
79
|
+
from .textchunk import TextChunk, TextChunkTypedDict, Type
|
|
80
|
+
from .tool import Tool, ToolTypedDict
|
|
81
|
+
from .toolcall import ToolCall, ToolCallTypedDict
|
|
82
|
+
from .toolchoice import ToolChoice, ToolChoiceTypedDict
|
|
83
|
+
from .toolchoiceenum import ToolChoiceEnum
|
|
26
84
|
from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict
|
|
85
|
+
from .tooltypes import ToolTypes
|
|
27
86
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
28
|
-
from .usermessage import
|
|
29
|
-
|
|
87
|
+
from .usermessage import (
|
|
88
|
+
UserMessage,
|
|
89
|
+
UserMessageContent,
|
|
90
|
+
UserMessageContentTypedDict,
|
|
91
|
+
UserMessageRole,
|
|
92
|
+
UserMessageTypedDict,
|
|
93
|
+
)
|
|
94
|
+
from .validationerror import (
|
|
95
|
+
Loc,
|
|
96
|
+
LocTypedDict,
|
|
97
|
+
ValidationError,
|
|
98
|
+
ValidationErrorTypedDict,
|
|
99
|
+
)
|
|
30
100
|
|
|
31
|
-
__all__ = [
|
|
101
|
+
__all__ = [
|
|
102
|
+
"Arguments",
|
|
103
|
+
"ArgumentsTypedDict",
|
|
104
|
+
"AssistantMessage",
|
|
105
|
+
"AssistantMessageRole",
|
|
106
|
+
"AssistantMessageTypedDict",
|
|
107
|
+
"ChatCompletionChoice",
|
|
108
|
+
"ChatCompletionChoiceFinishReason",
|
|
109
|
+
"ChatCompletionChoiceTypedDict",
|
|
110
|
+
"ChatCompletionRequest",
|
|
111
|
+
"ChatCompletionRequestMessages",
|
|
112
|
+
"ChatCompletionRequestMessagesTypedDict",
|
|
113
|
+
"ChatCompletionRequestStop",
|
|
114
|
+
"ChatCompletionRequestStopTypedDict",
|
|
115
|
+
"ChatCompletionRequestToolChoice",
|
|
116
|
+
"ChatCompletionRequestToolChoiceTypedDict",
|
|
117
|
+
"ChatCompletionRequestTypedDict",
|
|
118
|
+
"ChatCompletionResponse",
|
|
119
|
+
"ChatCompletionResponseTypedDict",
|
|
120
|
+
"ChatCompletionStreamRequest",
|
|
121
|
+
"ChatCompletionStreamRequestToolChoice",
|
|
122
|
+
"ChatCompletionStreamRequestToolChoiceTypedDict",
|
|
123
|
+
"ChatCompletionStreamRequestTypedDict",
|
|
124
|
+
"CompletionChunk",
|
|
125
|
+
"CompletionChunkTypedDict",
|
|
126
|
+
"CompletionEvent",
|
|
127
|
+
"CompletionEventTypedDict",
|
|
128
|
+
"CompletionResponseStreamChoice",
|
|
129
|
+
"CompletionResponseStreamChoiceTypedDict",
|
|
130
|
+
"Content",
|
|
131
|
+
"ContentChunk",
|
|
132
|
+
"ContentChunkTypedDict",
|
|
133
|
+
"ContentTypedDict",
|
|
134
|
+
"DeltaMessage",
|
|
135
|
+
"DeltaMessageTypedDict",
|
|
136
|
+
"FIMCompletionRequest",
|
|
137
|
+
"FIMCompletionRequestStop",
|
|
138
|
+
"FIMCompletionRequestStopTypedDict",
|
|
139
|
+
"FIMCompletionRequestTypedDict",
|
|
140
|
+
"FIMCompletionResponse",
|
|
141
|
+
"FIMCompletionResponseTypedDict",
|
|
142
|
+
"FIMCompletionStreamRequest",
|
|
143
|
+
"FIMCompletionStreamRequestStop",
|
|
144
|
+
"FIMCompletionStreamRequestStopTypedDict",
|
|
145
|
+
"FIMCompletionStreamRequestTypedDict",
|
|
146
|
+
"FinishReason",
|
|
147
|
+
"Function",
|
|
148
|
+
"FunctionCall",
|
|
149
|
+
"FunctionCallTypedDict",
|
|
150
|
+
"FunctionName",
|
|
151
|
+
"FunctionNameTypedDict",
|
|
152
|
+
"FunctionTypedDict",
|
|
153
|
+
"HTTPValidationError",
|
|
154
|
+
"HTTPValidationErrorData",
|
|
155
|
+
"Loc",
|
|
156
|
+
"LocTypedDict",
|
|
157
|
+
"Messages",
|
|
158
|
+
"MessagesTypedDict",
|
|
159
|
+
"ResponseFormat",
|
|
160
|
+
"ResponseFormatTypedDict",
|
|
161
|
+
"ResponseFormats",
|
|
162
|
+
"Role",
|
|
163
|
+
"SDKError",
|
|
164
|
+
"Security",
|
|
165
|
+
"SecurityTypedDict",
|
|
166
|
+
"Stop",
|
|
167
|
+
"StopTypedDict",
|
|
168
|
+
"SystemMessage",
|
|
169
|
+
"SystemMessageTypedDict",
|
|
170
|
+
"TextChunk",
|
|
171
|
+
"TextChunkTypedDict",
|
|
172
|
+
"Tool",
|
|
173
|
+
"ToolCall",
|
|
174
|
+
"ToolCallTypedDict",
|
|
175
|
+
"ToolChoice",
|
|
176
|
+
"ToolChoiceEnum",
|
|
177
|
+
"ToolChoiceTypedDict",
|
|
178
|
+
"ToolMessage",
|
|
179
|
+
"ToolMessageRole",
|
|
180
|
+
"ToolMessageTypedDict",
|
|
181
|
+
"ToolTypedDict",
|
|
182
|
+
"ToolTypes",
|
|
183
|
+
"Type",
|
|
184
|
+
"UsageInfo",
|
|
185
|
+
"UsageInfoTypedDict",
|
|
186
|
+
"UserMessage",
|
|
187
|
+
"UserMessageContent",
|
|
188
|
+
"UserMessageContentTypedDict",
|
|
189
|
+
"UserMessageRole",
|
|
190
|
+
"UserMessageTypedDict",
|
|
191
|
+
"ValidationError",
|
|
192
|
+
"ValidationErrorTypedDict",
|
|
193
|
+
]
|
|
@@ -2,7 +2,13 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .toolcall import ToolCall, ToolCallTypedDict
|
|
5
|
-
from mistralai_gcp.types import
|
|
5
|
+
from mistralai_gcp.types import (
|
|
6
|
+
BaseModel,
|
|
7
|
+
Nullable,
|
|
8
|
+
OptionalNullable,
|
|
9
|
+
UNSET,
|
|
10
|
+
UNSET_SENTINEL,
|
|
11
|
+
)
|
|
6
12
|
from pydantic import model_serializer
|
|
7
13
|
from typing import List, Literal, Optional, TypedDict
|
|
8
14
|
from typing_extensions import NotRequired
|
|
@@ -10,21 +16,25 @@ from typing_extensions import NotRequired
|
|
|
10
16
|
|
|
11
17
|
AssistantMessageRole = Literal["assistant"]
|
|
12
18
|
|
|
19
|
+
|
|
13
20
|
class AssistantMessageTypedDict(TypedDict):
|
|
14
21
|
content: NotRequired[Nullable[str]]
|
|
15
22
|
tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]]
|
|
16
23
|
prefix: NotRequired[bool]
|
|
17
24
|
r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message."""
|
|
18
25
|
role: NotRequired[AssistantMessageRole]
|
|
19
|
-
|
|
26
|
+
|
|
20
27
|
|
|
21
28
|
class AssistantMessage(BaseModel):
|
|
22
29
|
content: OptionalNullable[str] = UNSET
|
|
30
|
+
|
|
23
31
|
tool_calls: OptionalNullable[List[ToolCall]] = UNSET
|
|
32
|
+
|
|
24
33
|
prefix: Optional[bool] = False
|
|
25
34
|
r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message."""
|
|
35
|
+
|
|
26
36
|
role: Optional[AssistantMessageRole] = "assistant"
|
|
27
|
-
|
|
37
|
+
|
|
28
38
|
@model_serializer(mode="wrap")
|
|
29
39
|
def serialize_model(self, handler):
|
|
30
40
|
optional_fields = ["content", "tool_calls", "prefix", "role"]
|
|
@@ -38,9 +48,13 @@ class AssistantMessage(BaseModel):
|
|
|
38
48
|
for n, f in self.model_fields.items():
|
|
39
49
|
k = f.alias or n
|
|
40
50
|
val = serialized.get(k)
|
|
51
|
+
serialized.pop(k, None)
|
|
41
52
|
|
|
42
53
|
optional_nullable = k in optional_fields and k in nullable_fields
|
|
43
|
-
is_set = (
|
|
54
|
+
is_set = (
|
|
55
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
56
|
+
or k in null_default_fields
|
|
57
|
+
) # pylint: disable=no-member
|
|
44
58
|
|
|
45
59
|
if val is not None and val != UNSET_SENTINEL:
|
|
46
60
|
m[k] = val
|
|
@@ -50,4 +64,3 @@ class AssistantMessage(BaseModel):
|
|
|
50
64
|
m[k] = val
|
|
51
65
|
|
|
52
66
|
return m
|
|
53
|
-
|
|
@@ -6,16 +6,20 @@ from mistralai_gcp.types import BaseModel
|
|
|
6
6
|
from typing import Literal, TypedDict
|
|
7
7
|
|
|
8
8
|
|
|
9
|
-
ChatCompletionChoiceFinishReason = Literal[
|
|
9
|
+
ChatCompletionChoiceFinishReason = Literal[
|
|
10
|
+
"stop", "length", "model_length", "error", "tool_calls"
|
|
11
|
+
]
|
|
12
|
+
|
|
10
13
|
|
|
11
14
|
class ChatCompletionChoiceTypedDict(TypedDict):
|
|
12
15
|
index: int
|
|
13
16
|
message: AssistantMessageTypedDict
|
|
14
17
|
finish_reason: ChatCompletionChoiceFinishReason
|
|
15
|
-
|
|
18
|
+
|
|
16
19
|
|
|
17
20
|
class ChatCompletionChoice(BaseModel):
|
|
18
21
|
index: int
|
|
22
|
+
|
|
19
23
|
message: AssistantMessage
|
|
24
|
+
|
|
20
25
|
finish_reason: ChatCompletionChoiceFinishReason
|
|
21
|
-
|
|
@@ -5,12 +5,20 @@ from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
|
5
5
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
6
6
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
7
7
|
from .tool import Tool, ToolTypedDict
|
|
8
|
+
from .toolchoice import ToolChoice, ToolChoiceTypedDict
|
|
9
|
+
from .toolchoiceenum import ToolChoiceEnum
|
|
8
10
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
9
11
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
10
|
-
from mistralai_gcp.types import
|
|
12
|
+
from mistralai_gcp.types import (
|
|
13
|
+
BaseModel,
|
|
14
|
+
Nullable,
|
|
15
|
+
OptionalNullable,
|
|
16
|
+
UNSET,
|
|
17
|
+
UNSET_SENTINEL,
|
|
18
|
+
)
|
|
11
19
|
from mistralai_gcp.utils import get_discriminator
|
|
12
20
|
from pydantic import Discriminator, Tag, model_serializer
|
|
13
|
-
from typing import List,
|
|
21
|
+
from typing import List, Optional, TypedDict, Union
|
|
14
22
|
from typing_extensions import Annotated, NotRequired
|
|
15
23
|
|
|
16
24
|
|
|
@@ -22,13 +30,30 @@ ChatCompletionRequestStop = Union[str, List[str]]
|
|
|
22
30
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
23
31
|
|
|
24
32
|
|
|
25
|
-
ChatCompletionRequestMessagesTypedDict = Union[
|
|
33
|
+
ChatCompletionRequestMessagesTypedDict = Union[
|
|
34
|
+
SystemMessageTypedDict,
|
|
35
|
+
UserMessageTypedDict,
|
|
36
|
+
AssistantMessageTypedDict,
|
|
37
|
+
ToolMessageTypedDict,
|
|
38
|
+
]
|
|
26
39
|
|
|
27
40
|
|
|
28
|
-
ChatCompletionRequestMessages = Annotated[
|
|
41
|
+
ChatCompletionRequestMessages = Annotated[
|
|
42
|
+
Union[
|
|
43
|
+
Annotated[AssistantMessage, Tag("assistant")],
|
|
44
|
+
Annotated[SystemMessage, Tag("system")],
|
|
45
|
+
Annotated[ToolMessage, Tag("tool")],
|
|
46
|
+
Annotated[UserMessage, Tag("user")],
|
|
47
|
+
],
|
|
48
|
+
Discriminator(lambda m: get_discriminator(m, "role", "role")),
|
|
49
|
+
]
|
|
29
50
|
|
|
30
51
|
|
|
31
|
-
|
|
52
|
+
ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum]
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
ChatCompletionRequestToolChoice = Union[ToolChoice, ToolChoiceEnum]
|
|
56
|
+
|
|
32
57
|
|
|
33
58
|
class ChatCompletionRequestTypedDict(TypedDict):
|
|
34
59
|
model: Nullable[str]
|
|
@@ -51,35 +76,57 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
51
76
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
52
77
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
53
78
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
54
|
-
tool_choice: NotRequired[
|
|
55
|
-
|
|
79
|
+
tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict]
|
|
80
|
+
|
|
56
81
|
|
|
57
82
|
class ChatCompletionRequest(BaseModel):
|
|
58
83
|
model: Nullable[str]
|
|
59
84
|
r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
|
|
85
|
+
|
|
60
86
|
messages: List[ChatCompletionRequestMessages]
|
|
61
87
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
88
|
+
|
|
62
89
|
temperature: Optional[float] = 0.7
|
|
63
90
|
r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
|
|
91
|
+
|
|
64
92
|
top_p: Optional[float] = 1
|
|
65
93
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
94
|
+
|
|
66
95
|
max_tokens: OptionalNullable[int] = UNSET
|
|
67
96
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
97
|
+
|
|
68
98
|
min_tokens: OptionalNullable[int] = UNSET
|
|
69
99
|
r"""The minimum number of tokens to generate in the completion."""
|
|
100
|
+
|
|
70
101
|
stream: Optional[bool] = False
|
|
71
102
|
r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
|
|
103
|
+
|
|
72
104
|
stop: Optional[ChatCompletionRequestStop] = None
|
|
73
105
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
106
|
+
|
|
74
107
|
random_seed: OptionalNullable[int] = UNSET
|
|
75
108
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
109
|
+
|
|
76
110
|
response_format: Optional[ResponseFormat] = None
|
|
111
|
+
|
|
77
112
|
tools: OptionalNullable[List[Tool]] = UNSET
|
|
78
|
-
|
|
79
|
-
|
|
113
|
+
|
|
114
|
+
tool_choice: Optional[ChatCompletionRequestToolChoice] = None
|
|
115
|
+
|
|
80
116
|
@model_serializer(mode="wrap")
|
|
81
117
|
def serialize_model(self, handler):
|
|
82
|
-
optional_fields = [
|
|
118
|
+
optional_fields = [
|
|
119
|
+
"temperature",
|
|
120
|
+
"top_p",
|
|
121
|
+
"max_tokens",
|
|
122
|
+
"min_tokens",
|
|
123
|
+
"stream",
|
|
124
|
+
"stop",
|
|
125
|
+
"random_seed",
|
|
126
|
+
"response_format",
|
|
127
|
+
"tools",
|
|
128
|
+
"tool_choice",
|
|
129
|
+
]
|
|
83
130
|
nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
|
|
84
131
|
null_default_fields = []
|
|
85
132
|
|
|
@@ -90,9 +137,13 @@ class ChatCompletionRequest(BaseModel):
|
|
|
90
137
|
for n, f in self.model_fields.items():
|
|
91
138
|
k = f.alias or n
|
|
92
139
|
val = serialized.get(k)
|
|
140
|
+
serialized.pop(k, None)
|
|
93
141
|
|
|
94
142
|
optional_nullable = k in optional_fields and k in nullable_fields
|
|
95
|
-
is_set = (
|
|
143
|
+
is_set = (
|
|
144
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
145
|
+
or k in null_default_fields
|
|
146
|
+
) # pylint: disable=no-member
|
|
96
147
|
|
|
97
148
|
if val is not None and val != UNSET_SENTINEL:
|
|
98
149
|
m[k] = val
|
|
@@ -102,4 +153,3 @@ class ChatCompletionRequest(BaseModel):
|
|
|
102
153
|
m[k] = val
|
|
103
154
|
|
|
104
155
|
return m
|
|
105
|
-
|
|
@@ -15,13 +15,17 @@ class ChatCompletionResponseTypedDict(TypedDict):
|
|
|
15
15
|
usage: UsageInfoTypedDict
|
|
16
16
|
created: NotRequired[int]
|
|
17
17
|
choices: NotRequired[List[ChatCompletionChoiceTypedDict]]
|
|
18
|
-
|
|
18
|
+
|
|
19
19
|
|
|
20
20
|
class ChatCompletionResponse(BaseModel):
|
|
21
21
|
id: str
|
|
22
|
+
|
|
22
23
|
object: str
|
|
24
|
+
|
|
23
25
|
model: str
|
|
26
|
+
|
|
24
27
|
usage: UsageInfo
|
|
28
|
+
|
|
25
29
|
created: Optional[int] = None
|
|
30
|
+
|
|
26
31
|
choices: Optional[List[ChatCompletionChoice]] = None
|
|
27
|
-
|
|
@@ -5,12 +5,20 @@ from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
|
5
5
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
6
6
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
7
7
|
from .tool import Tool, ToolTypedDict
|
|
8
|
+
from .toolchoice import ToolChoice, ToolChoiceTypedDict
|
|
9
|
+
from .toolchoiceenum import ToolChoiceEnum
|
|
8
10
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
9
11
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
10
|
-
from mistralai_gcp.types import
|
|
12
|
+
from mistralai_gcp.types import (
|
|
13
|
+
BaseModel,
|
|
14
|
+
Nullable,
|
|
15
|
+
OptionalNullable,
|
|
16
|
+
UNSET,
|
|
17
|
+
UNSET_SENTINEL,
|
|
18
|
+
)
|
|
11
19
|
from mistralai_gcp.utils import get_discriminator
|
|
12
20
|
from pydantic import Discriminator, Tag, model_serializer
|
|
13
|
-
from typing import List,
|
|
21
|
+
from typing import List, Optional, TypedDict, Union
|
|
14
22
|
from typing_extensions import Annotated, NotRequired
|
|
15
23
|
|
|
16
24
|
|
|
@@ -22,13 +30,32 @@ Stop = Union[str, List[str]]
|
|
|
22
30
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
23
31
|
|
|
24
32
|
|
|
25
|
-
MessagesTypedDict = Union[
|
|
33
|
+
MessagesTypedDict = Union[
|
|
34
|
+
SystemMessageTypedDict,
|
|
35
|
+
UserMessageTypedDict,
|
|
36
|
+
AssistantMessageTypedDict,
|
|
37
|
+
ToolMessageTypedDict,
|
|
38
|
+
]
|
|
26
39
|
|
|
27
40
|
|
|
28
|
-
Messages = Annotated[
|
|
41
|
+
Messages = Annotated[
|
|
42
|
+
Union[
|
|
43
|
+
Annotated[AssistantMessage, Tag("assistant")],
|
|
44
|
+
Annotated[SystemMessage, Tag("system")],
|
|
45
|
+
Annotated[ToolMessage, Tag("tool")],
|
|
46
|
+
Annotated[UserMessage, Tag("user")],
|
|
47
|
+
],
|
|
48
|
+
Discriminator(lambda m: get_discriminator(m, "role", "role")),
|
|
49
|
+
]
|
|
29
50
|
|
|
30
51
|
|
|
31
|
-
|
|
52
|
+
ChatCompletionStreamRequestToolChoiceTypedDict = Union[
|
|
53
|
+
ToolChoiceTypedDict, ToolChoiceEnum
|
|
54
|
+
]
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum]
|
|
58
|
+
|
|
32
59
|
|
|
33
60
|
class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
34
61
|
model: Nullable[str]
|
|
@@ -50,34 +77,56 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
50
77
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
51
78
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
52
79
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
53
|
-
tool_choice: NotRequired[
|
|
54
|
-
|
|
80
|
+
tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict]
|
|
81
|
+
|
|
55
82
|
|
|
56
83
|
class ChatCompletionStreamRequest(BaseModel):
|
|
57
84
|
model: Nullable[str]
|
|
58
85
|
r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
|
|
86
|
+
|
|
59
87
|
messages: List[Messages]
|
|
60
88
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
89
|
+
|
|
61
90
|
temperature: Optional[float] = 0.7
|
|
62
91
|
r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
|
|
92
|
+
|
|
63
93
|
top_p: Optional[float] = 1
|
|
64
94
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
95
|
+
|
|
65
96
|
max_tokens: OptionalNullable[int] = UNSET
|
|
66
97
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
98
|
+
|
|
67
99
|
min_tokens: OptionalNullable[int] = UNSET
|
|
68
100
|
r"""The minimum number of tokens to generate in the completion."""
|
|
101
|
+
|
|
69
102
|
stream: Optional[bool] = True
|
|
103
|
+
|
|
70
104
|
stop: Optional[Stop] = None
|
|
71
105
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
106
|
+
|
|
72
107
|
random_seed: OptionalNullable[int] = UNSET
|
|
73
108
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
109
|
+
|
|
74
110
|
response_format: Optional[ResponseFormat] = None
|
|
111
|
+
|
|
75
112
|
tools: OptionalNullable[List[Tool]] = UNSET
|
|
76
|
-
|
|
77
|
-
|
|
113
|
+
|
|
114
|
+
tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None
|
|
115
|
+
|
|
78
116
|
@model_serializer(mode="wrap")
|
|
79
117
|
def serialize_model(self, handler):
|
|
80
|
-
optional_fields = [
|
|
118
|
+
optional_fields = [
|
|
119
|
+
"temperature",
|
|
120
|
+
"top_p",
|
|
121
|
+
"max_tokens",
|
|
122
|
+
"min_tokens",
|
|
123
|
+
"stream",
|
|
124
|
+
"stop",
|
|
125
|
+
"random_seed",
|
|
126
|
+
"response_format",
|
|
127
|
+
"tools",
|
|
128
|
+
"tool_choice",
|
|
129
|
+
]
|
|
81
130
|
nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
|
|
82
131
|
null_default_fields = []
|
|
83
132
|
|
|
@@ -88,9 +137,13 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
88
137
|
for n, f in self.model_fields.items():
|
|
89
138
|
k = f.alias or n
|
|
90
139
|
val = serialized.get(k)
|
|
140
|
+
serialized.pop(k, None)
|
|
91
141
|
|
|
92
142
|
optional_nullable = k in optional_fields and k in nullable_fields
|
|
93
|
-
is_set = (
|
|
143
|
+
is_set = (
|
|
144
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
145
|
+
or k in null_default_fields
|
|
146
|
+
) # pylint: disable=no-member
|
|
94
147
|
|
|
95
148
|
if val is not None and val != UNSET_SENTINEL:
|
|
96
149
|
m[k] = val
|
|
@@ -100,4 +153,3 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
100
153
|
m[k] = val
|
|
101
154
|
|
|
102
155
|
return m
|
|
103
|
-
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from .completionresponsestreamchoice import
|
|
4
|
+
from .completionresponsestreamchoice import (
|
|
5
|
+
CompletionResponseStreamChoice,
|
|
6
|
+
CompletionResponseStreamChoiceTypedDict,
|
|
7
|
+
)
|
|
5
8
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
6
9
|
from mistralai_gcp.types import BaseModel
|
|
7
10
|
from typing import List, Optional, TypedDict
|
|
@@ -15,13 +18,17 @@ class CompletionChunkTypedDict(TypedDict):
|
|
|
15
18
|
object: NotRequired[str]
|
|
16
19
|
created: NotRequired[int]
|
|
17
20
|
usage: NotRequired[UsageInfoTypedDict]
|
|
18
|
-
|
|
21
|
+
|
|
19
22
|
|
|
20
23
|
class CompletionChunk(BaseModel):
|
|
21
24
|
id: str
|
|
25
|
+
|
|
22
26
|
model: str
|
|
27
|
+
|
|
23
28
|
choices: List[CompletionResponseStreamChoice]
|
|
29
|
+
|
|
24
30
|
object: Optional[str] = None
|
|
31
|
+
|
|
25
32
|
created: Optional[int] = None
|
|
33
|
+
|
|
26
34
|
usage: Optional[UsageInfo] = None
|
|
27
|
-
|