mistralai 1.9.11__py3-none-any.whl → 1.10.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_hooks/registration.py +5 -0
- mistralai/_hooks/tracing.py +75 -0
- mistralai/_version.py +2 -2
- mistralai/accesses.py +8 -8
- mistralai/agents.py +29 -17
- mistralai/chat.py +41 -29
- mistralai/classifiers.py +13 -1
- mistralai/conversations.py +294 -62
- mistralai/documents.py +19 -3
- mistralai/embeddings.py +13 -7
- mistralai/extra/README.md +1 -1
- mistralai/extra/mcp/auth.py +10 -11
- mistralai/extra/mcp/base.py +17 -16
- mistralai/extra/mcp/sse.py +13 -15
- mistralai/extra/mcp/stdio.py +5 -6
- mistralai/extra/observability/__init__.py +15 -0
- mistralai/extra/observability/otel.py +372 -0
- mistralai/extra/run/context.py +33 -43
- mistralai/extra/run/result.py +29 -30
- mistralai/extra/run/tools.py +34 -23
- mistralai/extra/struct_chat.py +15 -8
- mistralai/extra/utils/response_format.py +5 -3
- mistralai/files.py +6 -0
- mistralai/fim.py +17 -5
- mistralai/mistral_agents.py +229 -1
- mistralai/mistral_jobs.py +39 -13
- mistralai/models/__init__.py +99 -3
- mistralai/models/agent.py +15 -2
- mistralai/models/agentconversation.py +11 -3
- mistralai/models/agentcreationrequest.py +6 -2
- mistralai/models/agents_api_v1_agents_deleteop.py +16 -0
- mistralai/models/agents_api_v1_agents_getop.py +40 -3
- mistralai/models/agents_api_v1_agents_listop.py +72 -2
- mistralai/models/agents_api_v1_conversations_deleteop.py +18 -0
- mistralai/models/agents_api_v1_conversations_listop.py +39 -2
- mistralai/models/agentscompletionrequest.py +21 -6
- mistralai/models/agentscompletionstreamrequest.py +21 -6
- mistralai/models/agentupdaterequest.py +18 -2
- mistralai/models/audioencoding.py +13 -0
- mistralai/models/audioformat.py +19 -0
- mistralai/models/audiotranscriptionrequest.py +2 -0
- mistralai/models/batchjobin.py +26 -5
- mistralai/models/batchjobout.py +5 -0
- mistralai/models/batchrequest.py +48 -0
- mistralai/models/chatcompletionrequest.py +22 -5
- mistralai/models/chatcompletionstreamrequest.py +22 -5
- mistralai/models/classificationrequest.py +37 -3
- mistralai/models/conversationrequest.py +15 -4
- mistralai/models/conversationrestartrequest.py +50 -2
- mistralai/models/conversationrestartstreamrequest.py +50 -2
- mistralai/models/conversationstreamrequest.py +15 -4
- mistralai/models/documentout.py +26 -10
- mistralai/models/documentupdatein.py +24 -3
- mistralai/models/embeddingrequest.py +19 -11
- mistralai/models/files_api_routes_list_filesop.py +7 -0
- mistralai/models/fimcompletionrequest.py +8 -9
- mistralai/models/fimcompletionstreamrequest.py +8 -9
- mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
- mistralai/models/libraries_documents_list_v1op.py +15 -2
- mistralai/models/libraryout.py +10 -7
- mistralai/models/listfilesout.py +35 -4
- mistralai/models/modelcapabilities.py +13 -4
- mistralai/models/modelconversation.py +8 -2
- mistralai/models/ocrpageobject.py +26 -5
- mistralai/models/ocrrequest.py +17 -1
- mistralai/models/ocrtableobject.py +31 -0
- mistralai/models/prediction.py +4 -0
- mistralai/models/requestsource.py +7 -0
- mistralai/models/responseformat.py +4 -2
- mistralai/models/responseformats.py +0 -1
- mistralai/models/sharingdelete.py +36 -5
- mistralai/models/sharingin.py +36 -5
- mistralai/models/sharingout.py +3 -3
- mistralai/models/toolexecutiondeltaevent.py +13 -4
- mistralai/models/toolexecutiondoneevent.py +13 -4
- mistralai/models/toolexecutionentry.py +9 -4
- mistralai/models/toolexecutionstartedevent.py +13 -4
- mistralai/models/toolfilechunk.py +11 -4
- mistralai/models/toolreferencechunk.py +13 -4
- mistralai/models_.py +2 -14
- mistralai/ocr.py +18 -0
- mistralai/transcriptions.py +4 -4
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/METADATA +162 -152
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/RECORD +168 -144
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/WHEEL +1 -1
- mistralai_azure/_version.py +3 -3
- mistralai_azure/basesdk.py +15 -5
- mistralai_azure/chat.py +59 -98
- mistralai_azure/models/__init__.py +50 -3
- mistralai_azure/models/chatcompletionrequest.py +16 -4
- mistralai_azure/models/chatcompletionstreamrequest.py +16 -4
- mistralai_azure/models/httpvalidationerror.py +11 -6
- mistralai_azure/models/mistralazureerror.py +26 -0
- mistralai_azure/models/no_response_error.py +13 -0
- mistralai_azure/models/prediction.py +4 -0
- mistralai_azure/models/responseformat.py +4 -2
- mistralai_azure/models/responseformats.py +0 -1
- mistralai_azure/models/responsevalidationerror.py +25 -0
- mistralai_azure/models/sdkerror.py +30 -14
- mistralai_azure/models/systemmessage.py +7 -3
- mistralai_azure/models/systemmessagecontentchunks.py +21 -0
- mistralai_azure/models/thinkchunk.py +35 -0
- mistralai_azure/ocr.py +15 -36
- mistralai_azure/utils/__init__.py +18 -5
- mistralai_azure/utils/eventstreaming.py +10 -0
- mistralai_azure/utils/serializers.py +3 -2
- mistralai_azure/utils/unmarshal_json_response.py +24 -0
- mistralai_gcp/_hooks/types.py +7 -0
- mistralai_gcp/_version.py +4 -4
- mistralai_gcp/basesdk.py +27 -25
- mistralai_gcp/chat.py +75 -98
- mistralai_gcp/fim.py +39 -74
- mistralai_gcp/httpclient.py +6 -16
- mistralai_gcp/models/__init__.py +321 -116
- mistralai_gcp/models/assistantmessage.py +1 -1
- mistralai_gcp/models/chatcompletionrequest.py +36 -7
- mistralai_gcp/models/chatcompletionresponse.py +6 -6
- mistralai_gcp/models/chatcompletionstreamrequest.py +36 -7
- mistralai_gcp/models/completionresponsestreamchoice.py +1 -1
- mistralai_gcp/models/deltamessage.py +1 -1
- mistralai_gcp/models/fimcompletionrequest.py +3 -9
- mistralai_gcp/models/fimcompletionresponse.py +6 -6
- mistralai_gcp/models/fimcompletionstreamrequest.py +3 -9
- mistralai_gcp/models/httpvalidationerror.py +11 -6
- mistralai_gcp/models/imageurl.py +1 -1
- mistralai_gcp/models/jsonschema.py +1 -1
- mistralai_gcp/models/mistralgcperror.py +26 -0
- mistralai_gcp/models/mistralpromptmode.py +8 -0
- mistralai_gcp/models/no_response_error.py +13 -0
- mistralai_gcp/models/prediction.py +4 -0
- mistralai_gcp/models/responseformat.py +5 -3
- mistralai_gcp/models/responseformats.py +0 -1
- mistralai_gcp/models/responsevalidationerror.py +25 -0
- mistralai_gcp/models/sdkerror.py +30 -14
- mistralai_gcp/models/systemmessage.py +7 -3
- mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
- mistralai_gcp/models/thinkchunk.py +35 -0
- mistralai_gcp/models/toolmessage.py +1 -1
- mistralai_gcp/models/usageinfo.py +71 -8
- mistralai_gcp/models/usermessage.py +1 -1
- mistralai_gcp/sdk.py +12 -10
- mistralai_gcp/sdkconfiguration.py +0 -7
- mistralai_gcp/types/basemodel.py +3 -3
- mistralai_gcp/utils/__init__.py +143 -45
- mistralai_gcp/utils/datetimes.py +23 -0
- mistralai_gcp/utils/enums.py +67 -27
- mistralai_gcp/utils/eventstreaming.py +10 -0
- mistralai_gcp/utils/forms.py +49 -28
- mistralai_gcp/utils/serializers.py +33 -3
- mistralai_gcp/utils/unmarshal_json_response.py +24 -0
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/licenses/LICENSE +0 -0
mistralai_gcp/models/__init__.py
CHANGED
|
@@ -1,122 +1,154 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
|
-
from .
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
AssistantMessageTypedDict,
|
|
9
|
-
)
|
|
10
|
-
from .chatcompletionchoice import (
|
|
11
|
-
ChatCompletionChoice,
|
|
12
|
-
ChatCompletionChoiceFinishReason,
|
|
13
|
-
ChatCompletionChoiceTypedDict,
|
|
14
|
-
)
|
|
15
|
-
from .chatcompletionrequest import (
|
|
16
|
-
ChatCompletionRequest,
|
|
17
|
-
ChatCompletionRequestMessages,
|
|
18
|
-
ChatCompletionRequestMessagesTypedDict,
|
|
19
|
-
ChatCompletionRequestStop,
|
|
20
|
-
ChatCompletionRequestStopTypedDict,
|
|
21
|
-
ChatCompletionRequestToolChoice,
|
|
22
|
-
ChatCompletionRequestToolChoiceTypedDict,
|
|
23
|
-
ChatCompletionRequestTypedDict,
|
|
24
|
-
)
|
|
25
|
-
from .chatcompletionresponse import (
|
|
26
|
-
ChatCompletionResponse,
|
|
27
|
-
ChatCompletionResponseTypedDict,
|
|
28
|
-
)
|
|
29
|
-
from .chatcompletionstreamrequest import (
|
|
30
|
-
ChatCompletionStreamRequest,
|
|
31
|
-
ChatCompletionStreamRequestToolChoice,
|
|
32
|
-
ChatCompletionStreamRequestToolChoiceTypedDict,
|
|
33
|
-
ChatCompletionStreamRequestTypedDict,
|
|
34
|
-
Messages,
|
|
35
|
-
MessagesTypedDict,
|
|
36
|
-
Stop,
|
|
37
|
-
StopTypedDict,
|
|
38
|
-
)
|
|
39
|
-
from .completionchunk import CompletionChunk, CompletionChunkTypedDict
|
|
40
|
-
from .completionevent import CompletionEvent, CompletionEventTypedDict
|
|
41
|
-
from .completionresponsestreamchoice import (
|
|
42
|
-
CompletionResponseStreamChoice,
|
|
43
|
-
CompletionResponseStreamChoiceTypedDict,
|
|
44
|
-
FinishReason,
|
|
45
|
-
)
|
|
46
|
-
from .contentchunk import ContentChunk, ContentChunkTypedDict
|
|
47
|
-
from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict
|
|
48
|
-
from .fimcompletionrequest import (
|
|
49
|
-
FIMCompletionRequest,
|
|
50
|
-
FIMCompletionRequestStop,
|
|
51
|
-
FIMCompletionRequestStopTypedDict,
|
|
52
|
-
FIMCompletionRequestTypedDict,
|
|
53
|
-
)
|
|
54
|
-
from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict
|
|
55
|
-
from .fimcompletionstreamrequest import (
|
|
56
|
-
FIMCompletionStreamRequest,
|
|
57
|
-
FIMCompletionStreamRequestStop,
|
|
58
|
-
FIMCompletionStreamRequestStopTypedDict,
|
|
59
|
-
FIMCompletionStreamRequestTypedDict,
|
|
60
|
-
)
|
|
61
|
-
from .function import Function, FunctionTypedDict
|
|
62
|
-
from .functioncall import (
|
|
63
|
-
Arguments,
|
|
64
|
-
ArgumentsTypedDict,
|
|
65
|
-
FunctionCall,
|
|
66
|
-
FunctionCallTypedDict,
|
|
67
|
-
)
|
|
68
|
-
from .functionname import FunctionName, FunctionNameTypedDict
|
|
69
|
-
from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
|
|
70
|
-
from .imageurl import ImageURL, ImageURLTypedDict
|
|
71
|
-
from .imageurlchunk import (
|
|
72
|
-
ImageURLChunk,
|
|
73
|
-
ImageURLChunkImageURL,
|
|
74
|
-
ImageURLChunkImageURLTypedDict,
|
|
75
|
-
ImageURLChunkType,
|
|
76
|
-
ImageURLChunkTypedDict,
|
|
77
|
-
)
|
|
78
|
-
from .jsonschema import JSONSchema, JSONSchemaTypedDict
|
|
79
|
-
from .prediction import Prediction, PredictionTypedDict
|
|
80
|
-
from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict
|
|
81
|
-
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
82
|
-
from .responseformats import ResponseFormats
|
|
83
|
-
from .sdkerror import SDKError
|
|
84
|
-
from .security import Security, SecurityTypedDict
|
|
85
|
-
from .systemmessage import (
|
|
86
|
-
Role,
|
|
87
|
-
SystemMessage,
|
|
88
|
-
SystemMessageContent,
|
|
89
|
-
SystemMessageContentTypedDict,
|
|
90
|
-
SystemMessageTypedDict,
|
|
91
|
-
)
|
|
92
|
-
from .textchunk import TextChunk, TextChunkTypedDict, Type
|
|
93
|
-
from .tool import Tool, ToolTypedDict
|
|
94
|
-
from .toolcall import ToolCall, ToolCallTypedDict
|
|
95
|
-
from .toolchoice import ToolChoice, ToolChoiceTypedDict
|
|
96
|
-
from .toolchoiceenum import ToolChoiceEnum
|
|
97
|
-
from .toolmessage import (
|
|
98
|
-
ToolMessage,
|
|
99
|
-
ToolMessageContent,
|
|
100
|
-
ToolMessageContentTypedDict,
|
|
101
|
-
ToolMessageRole,
|
|
102
|
-
ToolMessageTypedDict,
|
|
103
|
-
)
|
|
104
|
-
from .tooltypes import ToolTypes
|
|
105
|
-
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
106
|
-
from .usermessage import (
|
|
107
|
-
UserMessage,
|
|
108
|
-
UserMessageContent,
|
|
109
|
-
UserMessageContentTypedDict,
|
|
110
|
-
UserMessageRole,
|
|
111
|
-
UserMessageTypedDict,
|
|
112
|
-
)
|
|
113
|
-
from .validationerror import (
|
|
114
|
-
Loc,
|
|
115
|
-
LocTypedDict,
|
|
116
|
-
ValidationError,
|
|
117
|
-
ValidationErrorTypedDict,
|
|
118
|
-
)
|
|
3
|
+
from .mistralgcperror import MistralGcpError
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
from importlib import import_module
|
|
6
|
+
import builtins
|
|
7
|
+
import sys
|
|
119
8
|
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from .assistantmessage import (
|
|
11
|
+
AssistantMessage,
|
|
12
|
+
AssistantMessageContent,
|
|
13
|
+
AssistantMessageContentTypedDict,
|
|
14
|
+
AssistantMessageRole,
|
|
15
|
+
AssistantMessageTypedDict,
|
|
16
|
+
)
|
|
17
|
+
from .chatcompletionchoice import (
|
|
18
|
+
ChatCompletionChoice,
|
|
19
|
+
ChatCompletionChoiceFinishReason,
|
|
20
|
+
ChatCompletionChoiceTypedDict,
|
|
21
|
+
)
|
|
22
|
+
from .chatcompletionrequest import (
|
|
23
|
+
ChatCompletionRequest,
|
|
24
|
+
ChatCompletionRequestMessages,
|
|
25
|
+
ChatCompletionRequestMessagesTypedDict,
|
|
26
|
+
ChatCompletionRequestStop,
|
|
27
|
+
ChatCompletionRequestStopTypedDict,
|
|
28
|
+
ChatCompletionRequestToolChoice,
|
|
29
|
+
ChatCompletionRequestToolChoiceTypedDict,
|
|
30
|
+
ChatCompletionRequestTypedDict,
|
|
31
|
+
)
|
|
32
|
+
from .chatcompletionresponse import (
|
|
33
|
+
ChatCompletionResponse,
|
|
34
|
+
ChatCompletionResponseTypedDict,
|
|
35
|
+
)
|
|
36
|
+
from .chatcompletionstreamrequest import (
|
|
37
|
+
ChatCompletionStreamRequest,
|
|
38
|
+
ChatCompletionStreamRequestToolChoice,
|
|
39
|
+
ChatCompletionStreamRequestToolChoiceTypedDict,
|
|
40
|
+
ChatCompletionStreamRequestTypedDict,
|
|
41
|
+
Messages,
|
|
42
|
+
MessagesTypedDict,
|
|
43
|
+
Stop,
|
|
44
|
+
StopTypedDict,
|
|
45
|
+
)
|
|
46
|
+
from .completionchunk import CompletionChunk, CompletionChunkTypedDict
|
|
47
|
+
from .completionevent import CompletionEvent, CompletionEventTypedDict
|
|
48
|
+
from .completionresponsestreamchoice import (
|
|
49
|
+
CompletionResponseStreamChoice,
|
|
50
|
+
CompletionResponseStreamChoiceTypedDict,
|
|
51
|
+
FinishReason,
|
|
52
|
+
)
|
|
53
|
+
from .contentchunk import ContentChunk, ContentChunkTypedDict
|
|
54
|
+
from .deltamessage import (
|
|
55
|
+
Content,
|
|
56
|
+
ContentTypedDict,
|
|
57
|
+
DeltaMessage,
|
|
58
|
+
DeltaMessageTypedDict,
|
|
59
|
+
)
|
|
60
|
+
from .fimcompletionrequest import (
|
|
61
|
+
FIMCompletionRequest,
|
|
62
|
+
FIMCompletionRequestStop,
|
|
63
|
+
FIMCompletionRequestStopTypedDict,
|
|
64
|
+
FIMCompletionRequestTypedDict,
|
|
65
|
+
)
|
|
66
|
+
from .fimcompletionresponse import (
|
|
67
|
+
FIMCompletionResponse,
|
|
68
|
+
FIMCompletionResponseTypedDict,
|
|
69
|
+
)
|
|
70
|
+
from .fimcompletionstreamrequest import (
|
|
71
|
+
FIMCompletionStreamRequest,
|
|
72
|
+
FIMCompletionStreamRequestStop,
|
|
73
|
+
FIMCompletionStreamRequestStopTypedDict,
|
|
74
|
+
FIMCompletionStreamRequestTypedDict,
|
|
75
|
+
)
|
|
76
|
+
from .function import Function, FunctionTypedDict
|
|
77
|
+
from .functioncall import (
|
|
78
|
+
Arguments,
|
|
79
|
+
ArgumentsTypedDict,
|
|
80
|
+
FunctionCall,
|
|
81
|
+
FunctionCallTypedDict,
|
|
82
|
+
)
|
|
83
|
+
from .functionname import FunctionName, FunctionNameTypedDict
|
|
84
|
+
from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
|
|
85
|
+
from .imageurl import ImageURL, ImageURLTypedDict
|
|
86
|
+
from .imageurlchunk import (
|
|
87
|
+
ImageURLChunk,
|
|
88
|
+
ImageURLChunkImageURL,
|
|
89
|
+
ImageURLChunkImageURLTypedDict,
|
|
90
|
+
ImageURLChunkType,
|
|
91
|
+
ImageURLChunkTypedDict,
|
|
92
|
+
)
|
|
93
|
+
from .jsonschema import JSONSchema, JSONSchemaTypedDict
|
|
94
|
+
from .mistralpromptmode import MistralPromptMode
|
|
95
|
+
from .no_response_error import NoResponseError
|
|
96
|
+
from .prediction import Prediction, PredictionTypedDict
|
|
97
|
+
from .referencechunk import (
|
|
98
|
+
ReferenceChunk,
|
|
99
|
+
ReferenceChunkType,
|
|
100
|
+
ReferenceChunkTypedDict,
|
|
101
|
+
)
|
|
102
|
+
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
103
|
+
from .responseformats import ResponseFormats
|
|
104
|
+
from .responsevalidationerror import ResponseValidationError
|
|
105
|
+
from .sdkerror import SDKError
|
|
106
|
+
from .security import Security, SecurityTypedDict
|
|
107
|
+
from .systemmessage import (
|
|
108
|
+
Role,
|
|
109
|
+
SystemMessage,
|
|
110
|
+
SystemMessageContent,
|
|
111
|
+
SystemMessageContentTypedDict,
|
|
112
|
+
SystemMessageTypedDict,
|
|
113
|
+
)
|
|
114
|
+
from .systemmessagecontentchunks import (
|
|
115
|
+
SystemMessageContentChunks,
|
|
116
|
+
SystemMessageContentChunksTypedDict,
|
|
117
|
+
)
|
|
118
|
+
from .textchunk import TextChunk, TextChunkTypedDict, Type
|
|
119
|
+
from .thinkchunk import (
|
|
120
|
+
ThinkChunk,
|
|
121
|
+
ThinkChunkType,
|
|
122
|
+
ThinkChunkTypedDict,
|
|
123
|
+
Thinking,
|
|
124
|
+
ThinkingTypedDict,
|
|
125
|
+
)
|
|
126
|
+
from .tool import Tool, ToolTypedDict
|
|
127
|
+
from .toolcall import ToolCall, ToolCallTypedDict
|
|
128
|
+
from .toolchoice import ToolChoice, ToolChoiceTypedDict
|
|
129
|
+
from .toolchoiceenum import ToolChoiceEnum
|
|
130
|
+
from .toolmessage import (
|
|
131
|
+
ToolMessage,
|
|
132
|
+
ToolMessageContent,
|
|
133
|
+
ToolMessageContentTypedDict,
|
|
134
|
+
ToolMessageRole,
|
|
135
|
+
ToolMessageTypedDict,
|
|
136
|
+
)
|
|
137
|
+
from .tooltypes import ToolTypes
|
|
138
|
+
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
139
|
+
from .usermessage import (
|
|
140
|
+
UserMessage,
|
|
141
|
+
UserMessageContent,
|
|
142
|
+
UserMessageContentTypedDict,
|
|
143
|
+
UserMessageRole,
|
|
144
|
+
UserMessageTypedDict,
|
|
145
|
+
)
|
|
146
|
+
from .validationerror import (
|
|
147
|
+
Loc,
|
|
148
|
+
LocTypedDict,
|
|
149
|
+
ValidationError,
|
|
150
|
+
ValidationErrorTypedDict,
|
|
151
|
+
)
|
|
120
152
|
|
|
121
153
|
__all__ = [
|
|
122
154
|
"Arguments",
|
|
@@ -187,6 +219,9 @@ __all__ = [
|
|
|
187
219
|
"LocTypedDict",
|
|
188
220
|
"Messages",
|
|
189
221
|
"MessagesTypedDict",
|
|
222
|
+
"MistralGcpError",
|
|
223
|
+
"MistralPromptMode",
|
|
224
|
+
"NoResponseError",
|
|
190
225
|
"Prediction",
|
|
191
226
|
"PredictionTypedDict",
|
|
192
227
|
"ReferenceChunk",
|
|
@@ -195,6 +230,7 @@ __all__ = [
|
|
|
195
230
|
"ResponseFormat",
|
|
196
231
|
"ResponseFormatTypedDict",
|
|
197
232
|
"ResponseFormats",
|
|
233
|
+
"ResponseValidationError",
|
|
198
234
|
"Role",
|
|
199
235
|
"SDKError",
|
|
200
236
|
"Security",
|
|
@@ -203,10 +239,17 @@ __all__ = [
|
|
|
203
239
|
"StopTypedDict",
|
|
204
240
|
"SystemMessage",
|
|
205
241
|
"SystemMessageContent",
|
|
242
|
+
"SystemMessageContentChunks",
|
|
243
|
+
"SystemMessageContentChunksTypedDict",
|
|
206
244
|
"SystemMessageContentTypedDict",
|
|
207
245
|
"SystemMessageTypedDict",
|
|
208
246
|
"TextChunk",
|
|
209
247
|
"TextChunkTypedDict",
|
|
248
|
+
"ThinkChunk",
|
|
249
|
+
"ThinkChunkType",
|
|
250
|
+
"ThinkChunkTypedDict",
|
|
251
|
+
"Thinking",
|
|
252
|
+
"ThinkingTypedDict",
|
|
210
253
|
"Tool",
|
|
211
254
|
"ToolCall",
|
|
212
255
|
"ToolCallTypedDict",
|
|
@@ -231,3 +274,165 @@ __all__ = [
|
|
|
231
274
|
"ValidationError",
|
|
232
275
|
"ValidationErrorTypedDict",
|
|
233
276
|
]
|
|
277
|
+
|
|
278
|
+
_dynamic_imports: dict[str, str] = {
|
|
279
|
+
"AssistantMessage": ".assistantmessage",
|
|
280
|
+
"AssistantMessageContent": ".assistantmessage",
|
|
281
|
+
"AssistantMessageContentTypedDict": ".assistantmessage",
|
|
282
|
+
"AssistantMessageRole": ".assistantmessage",
|
|
283
|
+
"AssistantMessageTypedDict": ".assistantmessage",
|
|
284
|
+
"ChatCompletionChoice": ".chatcompletionchoice",
|
|
285
|
+
"ChatCompletionChoiceFinishReason": ".chatcompletionchoice",
|
|
286
|
+
"ChatCompletionChoiceTypedDict": ".chatcompletionchoice",
|
|
287
|
+
"ChatCompletionRequest": ".chatcompletionrequest",
|
|
288
|
+
"ChatCompletionRequestMessages": ".chatcompletionrequest",
|
|
289
|
+
"ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest",
|
|
290
|
+
"ChatCompletionRequestStop": ".chatcompletionrequest",
|
|
291
|
+
"ChatCompletionRequestStopTypedDict": ".chatcompletionrequest",
|
|
292
|
+
"ChatCompletionRequestToolChoice": ".chatcompletionrequest",
|
|
293
|
+
"ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest",
|
|
294
|
+
"ChatCompletionRequestTypedDict": ".chatcompletionrequest",
|
|
295
|
+
"ChatCompletionResponse": ".chatcompletionresponse",
|
|
296
|
+
"ChatCompletionResponseTypedDict": ".chatcompletionresponse",
|
|
297
|
+
"ChatCompletionStreamRequest": ".chatcompletionstreamrequest",
|
|
298
|
+
"ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest",
|
|
299
|
+
"ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest",
|
|
300
|
+
"ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest",
|
|
301
|
+
"Messages": ".chatcompletionstreamrequest",
|
|
302
|
+
"MessagesTypedDict": ".chatcompletionstreamrequest",
|
|
303
|
+
"Stop": ".chatcompletionstreamrequest",
|
|
304
|
+
"StopTypedDict": ".chatcompletionstreamrequest",
|
|
305
|
+
"CompletionChunk": ".completionchunk",
|
|
306
|
+
"CompletionChunkTypedDict": ".completionchunk",
|
|
307
|
+
"CompletionEvent": ".completionevent",
|
|
308
|
+
"CompletionEventTypedDict": ".completionevent",
|
|
309
|
+
"CompletionResponseStreamChoice": ".completionresponsestreamchoice",
|
|
310
|
+
"CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice",
|
|
311
|
+
"FinishReason": ".completionresponsestreamchoice",
|
|
312
|
+
"ContentChunk": ".contentchunk",
|
|
313
|
+
"ContentChunkTypedDict": ".contentchunk",
|
|
314
|
+
"Content": ".deltamessage",
|
|
315
|
+
"ContentTypedDict": ".deltamessage",
|
|
316
|
+
"DeltaMessage": ".deltamessage",
|
|
317
|
+
"DeltaMessageTypedDict": ".deltamessage",
|
|
318
|
+
"FIMCompletionRequest": ".fimcompletionrequest",
|
|
319
|
+
"FIMCompletionRequestStop": ".fimcompletionrequest",
|
|
320
|
+
"FIMCompletionRequestStopTypedDict": ".fimcompletionrequest",
|
|
321
|
+
"FIMCompletionRequestTypedDict": ".fimcompletionrequest",
|
|
322
|
+
"FIMCompletionResponse": ".fimcompletionresponse",
|
|
323
|
+
"FIMCompletionResponseTypedDict": ".fimcompletionresponse",
|
|
324
|
+
"FIMCompletionStreamRequest": ".fimcompletionstreamrequest",
|
|
325
|
+
"FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest",
|
|
326
|
+
"FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest",
|
|
327
|
+
"FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest",
|
|
328
|
+
"Function": ".function",
|
|
329
|
+
"FunctionTypedDict": ".function",
|
|
330
|
+
"Arguments": ".functioncall",
|
|
331
|
+
"ArgumentsTypedDict": ".functioncall",
|
|
332
|
+
"FunctionCall": ".functioncall",
|
|
333
|
+
"FunctionCallTypedDict": ".functioncall",
|
|
334
|
+
"FunctionName": ".functionname",
|
|
335
|
+
"FunctionNameTypedDict": ".functionname",
|
|
336
|
+
"HTTPValidationError": ".httpvalidationerror",
|
|
337
|
+
"HTTPValidationErrorData": ".httpvalidationerror",
|
|
338
|
+
"ImageURL": ".imageurl",
|
|
339
|
+
"ImageURLTypedDict": ".imageurl",
|
|
340
|
+
"ImageURLChunk": ".imageurlchunk",
|
|
341
|
+
"ImageURLChunkImageURL": ".imageurlchunk",
|
|
342
|
+
"ImageURLChunkImageURLTypedDict": ".imageurlchunk",
|
|
343
|
+
"ImageURLChunkType": ".imageurlchunk",
|
|
344
|
+
"ImageURLChunkTypedDict": ".imageurlchunk",
|
|
345
|
+
"JSONSchema": ".jsonschema",
|
|
346
|
+
"JSONSchemaTypedDict": ".jsonschema",
|
|
347
|
+
"MistralPromptMode": ".mistralpromptmode",
|
|
348
|
+
"NoResponseError": ".no_response_error",
|
|
349
|
+
"Prediction": ".prediction",
|
|
350
|
+
"PredictionTypedDict": ".prediction",
|
|
351
|
+
"ReferenceChunk": ".referencechunk",
|
|
352
|
+
"ReferenceChunkType": ".referencechunk",
|
|
353
|
+
"ReferenceChunkTypedDict": ".referencechunk",
|
|
354
|
+
"ResponseFormat": ".responseformat",
|
|
355
|
+
"ResponseFormatTypedDict": ".responseformat",
|
|
356
|
+
"ResponseFormats": ".responseformats",
|
|
357
|
+
"ResponseValidationError": ".responsevalidationerror",
|
|
358
|
+
"SDKError": ".sdkerror",
|
|
359
|
+
"Security": ".security",
|
|
360
|
+
"SecurityTypedDict": ".security",
|
|
361
|
+
"Role": ".systemmessage",
|
|
362
|
+
"SystemMessage": ".systemmessage",
|
|
363
|
+
"SystemMessageContent": ".systemmessage",
|
|
364
|
+
"SystemMessageContentTypedDict": ".systemmessage",
|
|
365
|
+
"SystemMessageTypedDict": ".systemmessage",
|
|
366
|
+
"SystemMessageContentChunks": ".systemmessagecontentchunks",
|
|
367
|
+
"SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks",
|
|
368
|
+
"TextChunk": ".textchunk",
|
|
369
|
+
"TextChunkTypedDict": ".textchunk",
|
|
370
|
+
"Type": ".textchunk",
|
|
371
|
+
"ThinkChunk": ".thinkchunk",
|
|
372
|
+
"ThinkChunkType": ".thinkchunk",
|
|
373
|
+
"ThinkChunkTypedDict": ".thinkchunk",
|
|
374
|
+
"Thinking": ".thinkchunk",
|
|
375
|
+
"ThinkingTypedDict": ".thinkchunk",
|
|
376
|
+
"Tool": ".tool",
|
|
377
|
+
"ToolTypedDict": ".tool",
|
|
378
|
+
"ToolCall": ".toolcall",
|
|
379
|
+
"ToolCallTypedDict": ".toolcall",
|
|
380
|
+
"ToolChoice": ".toolchoice",
|
|
381
|
+
"ToolChoiceTypedDict": ".toolchoice",
|
|
382
|
+
"ToolChoiceEnum": ".toolchoiceenum",
|
|
383
|
+
"ToolMessage": ".toolmessage",
|
|
384
|
+
"ToolMessageContent": ".toolmessage",
|
|
385
|
+
"ToolMessageContentTypedDict": ".toolmessage",
|
|
386
|
+
"ToolMessageRole": ".toolmessage",
|
|
387
|
+
"ToolMessageTypedDict": ".toolmessage",
|
|
388
|
+
"ToolTypes": ".tooltypes",
|
|
389
|
+
"UsageInfo": ".usageinfo",
|
|
390
|
+
"UsageInfoTypedDict": ".usageinfo",
|
|
391
|
+
"UserMessage": ".usermessage",
|
|
392
|
+
"UserMessageContent": ".usermessage",
|
|
393
|
+
"UserMessageContentTypedDict": ".usermessage",
|
|
394
|
+
"UserMessageRole": ".usermessage",
|
|
395
|
+
"UserMessageTypedDict": ".usermessage",
|
|
396
|
+
"Loc": ".validationerror",
|
|
397
|
+
"LocTypedDict": ".validationerror",
|
|
398
|
+
"ValidationError": ".validationerror",
|
|
399
|
+
"ValidationErrorTypedDict": ".validationerror",
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def dynamic_import(modname, retries=3):
|
|
404
|
+
for attempt in range(retries):
|
|
405
|
+
try:
|
|
406
|
+
return import_module(modname, __package__)
|
|
407
|
+
except KeyError:
|
|
408
|
+
# Clear any half-initialized module and retry
|
|
409
|
+
sys.modules.pop(modname, None)
|
|
410
|
+
if attempt == retries - 1:
|
|
411
|
+
break
|
|
412
|
+
raise KeyError(f"Failed to import module '{modname}' after {retries} attempts")
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
def __getattr__(attr_name: str) -> object:
|
|
416
|
+
module_name = _dynamic_imports.get(attr_name)
|
|
417
|
+
if module_name is None:
|
|
418
|
+
raise AttributeError(
|
|
419
|
+
f"No {attr_name} found in _dynamic_imports for module name -> {__name__} "
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
try:
|
|
423
|
+
module = dynamic_import(module_name)
|
|
424
|
+
result = getattr(module, attr_name)
|
|
425
|
+
return result
|
|
426
|
+
except ImportError as e:
|
|
427
|
+
raise ImportError(
|
|
428
|
+
f"Failed to import {attr_name} from {module_name}: {e}"
|
|
429
|
+
) from e
|
|
430
|
+
except AttributeError as e:
|
|
431
|
+
raise AttributeError(
|
|
432
|
+
f"Failed to get {attr_name} from {module_name}: {e}"
|
|
433
|
+
) from e
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
def __dir__():
|
|
437
|
+
lazy_attrs = builtins.list(_dynamic_imports.keys())
|
|
438
|
+
return builtins.sorted(lazy_attrs)
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
|
+
from .mistralpromptmode import MistralPromptMode
|
|
5
6
|
from .prediction import Prediction, PredictionTypedDict
|
|
6
7
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
7
8
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
@@ -17,8 +18,9 @@ from mistralai_gcp.types import (
|
|
|
17
18
|
UNSET,
|
|
18
19
|
UNSET_SENTINEL,
|
|
19
20
|
)
|
|
20
|
-
from mistralai_gcp.utils import get_discriminator
|
|
21
|
+
from mistralai_gcp.utils import get_discriminator, validate_open_enum
|
|
21
22
|
from pydantic import Discriminator, Tag, model_serializer
|
|
23
|
+
from pydantic.functional_validators import PlainValidator
|
|
22
24
|
from typing import List, Optional, Union
|
|
23
25
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
24
26
|
|
|
@@ -61,11 +63,13 @@ ChatCompletionRequestToolChoiceTypedDict = TypeAliasType(
|
|
|
61
63
|
"ChatCompletionRequestToolChoiceTypedDict",
|
|
62
64
|
Union[ToolChoiceTypedDict, ToolChoiceEnum],
|
|
63
65
|
)
|
|
66
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
64
67
|
|
|
65
68
|
|
|
66
69
|
ChatCompletionRequestToolChoice = TypeAliasType(
|
|
67
70
|
"ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum]
|
|
68
71
|
)
|
|
72
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
69
73
|
|
|
70
74
|
|
|
71
75
|
class ChatCompletionRequestTypedDict(TypedDict):
|
|
@@ -86,16 +90,23 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
86
90
|
random_seed: NotRequired[Nullable[int]]
|
|
87
91
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
88
92
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
93
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
89
94
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
95
|
+
r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
|
|
90
96
|
tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict]
|
|
97
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
91
98
|
presence_penalty: NotRequired[float]
|
|
92
|
-
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
99
|
+
r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
93
100
|
frequency_penalty: NotRequired[float]
|
|
94
|
-
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
101
|
+
r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
95
102
|
n: NotRequired[Nullable[int]]
|
|
96
103
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
97
104
|
prediction: NotRequired[PredictionTypedDict]
|
|
105
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
98
106
|
parallel_tool_calls: NotRequired[bool]
|
|
107
|
+
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
108
|
+
prompt_mode: NotRequired[Nullable[MistralPromptMode]]
|
|
109
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
99
110
|
|
|
100
111
|
|
|
101
112
|
class ChatCompletionRequest(BaseModel):
|
|
@@ -124,23 +135,33 @@ class ChatCompletionRequest(BaseModel):
|
|
|
124
135
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
125
136
|
|
|
126
137
|
response_format: Optional[ResponseFormat] = None
|
|
138
|
+
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
127
139
|
|
|
128
140
|
tools: OptionalNullable[List[Tool]] = UNSET
|
|
141
|
+
r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
|
|
129
142
|
|
|
130
143
|
tool_choice: Optional[ChatCompletionRequestToolChoice] = None
|
|
144
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
|
|
131
145
|
|
|
132
146
|
presence_penalty: Optional[float] = None
|
|
133
|
-
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
147
|
+
r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
134
148
|
|
|
135
149
|
frequency_penalty: Optional[float] = None
|
|
136
|
-
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
150
|
+
r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
137
151
|
|
|
138
152
|
n: OptionalNullable[int] = UNSET
|
|
139
153
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
140
154
|
|
|
141
155
|
prediction: Optional[Prediction] = None
|
|
156
|
+
r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
|
|
142
157
|
|
|
143
158
|
parallel_tool_calls: Optional[bool] = None
|
|
159
|
+
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
160
|
+
|
|
161
|
+
prompt_mode: Annotated[
|
|
162
|
+
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
163
|
+
] = UNSET
|
|
164
|
+
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
144
165
|
|
|
145
166
|
@model_serializer(mode="wrap")
|
|
146
167
|
def serialize_model(self, handler):
|
|
@@ -159,15 +180,23 @@ class ChatCompletionRequest(BaseModel):
|
|
|
159
180
|
"n",
|
|
160
181
|
"prediction",
|
|
161
182
|
"parallel_tool_calls",
|
|
183
|
+
"prompt_mode",
|
|
184
|
+
]
|
|
185
|
+
nullable_fields = [
|
|
186
|
+
"temperature",
|
|
187
|
+
"max_tokens",
|
|
188
|
+
"random_seed",
|
|
189
|
+
"tools",
|
|
190
|
+
"n",
|
|
191
|
+
"prompt_mode",
|
|
162
192
|
]
|
|
163
|
-
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
164
193
|
null_default_fields = []
|
|
165
194
|
|
|
166
195
|
serialized = handler(self)
|
|
167
196
|
|
|
168
197
|
m = {}
|
|
169
198
|
|
|
170
|
-
for n, f in self.model_fields.items():
|
|
199
|
+
for n, f in type(self).model_fields.items():
|
|
171
200
|
k = f.alias or n
|
|
172
201
|
val = serialized.get(k)
|
|
173
202
|
serialized.pop(k, None)
|
|
@@ -4,8 +4,8 @@ from __future__ import annotations
|
|
|
4
4
|
from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
|
|
5
5
|
from .usageinfo import UsageInfo, UsageInfoTypedDict
|
|
6
6
|
from mistralai_gcp.types import BaseModel
|
|
7
|
-
from typing import List
|
|
8
|
-
from typing_extensions import
|
|
7
|
+
from typing import List
|
|
8
|
+
from typing_extensions import TypedDict
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class ChatCompletionResponseTypedDict(TypedDict):
|
|
@@ -13,8 +13,8 @@ class ChatCompletionResponseTypedDict(TypedDict):
|
|
|
13
13
|
object: str
|
|
14
14
|
model: str
|
|
15
15
|
usage: UsageInfoTypedDict
|
|
16
|
-
created:
|
|
17
|
-
choices:
|
|
16
|
+
created: int
|
|
17
|
+
choices: List[ChatCompletionChoiceTypedDict]
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
class ChatCompletionResponse(BaseModel):
|
|
@@ -26,6 +26,6 @@ class ChatCompletionResponse(BaseModel):
|
|
|
26
26
|
|
|
27
27
|
usage: UsageInfo
|
|
28
28
|
|
|
29
|
-
created:
|
|
29
|
+
created: int
|
|
30
30
|
|
|
31
|
-
choices:
|
|
31
|
+
choices: List[ChatCompletionChoice]
|