orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_hooks/globalhook.py +0 -1
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +332 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +258 -0
- orq_ai_sdk/embeddings.py +238 -0
- orq_ai_sdk/generations.py +272 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5341 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1922 -384
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1375 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +1890 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +384 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +504 -0
- orq_ai_sdk/models/createimageop.py +208 -117
- orq_ai_sdk/models/createimagevariationop.py +486 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2748 -1252
- orq_ai_sdk/models/creatererankop.py +416 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +316 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +562 -0
- orq_ai_sdk/models/createtranslationop.py +540 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1690 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1673 -230
- orq_ai_sdk/models/getpromptversionop.py +1670 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +793 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1684 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1451 -197
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1428 -195
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1951 -404
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2844 -1450
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +660 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +232 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +251 -0
- orq_ai_sdk/transcriptions.py +326 -0
- orq_ai_sdk/translations.py +298 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +254 -0
- orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
- orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -11,7 +11,7 @@ from orq_ai_sdk.types import (
|
|
|
11
11
|
)
|
|
12
12
|
import pydantic
|
|
13
13
|
from pydantic import model_serializer
|
|
14
|
-
from typing import List, Optional
|
|
14
|
+
from typing import List, Literal, Optional
|
|
15
15
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
16
16
|
|
|
17
17
|
|
|
@@ -32,31 +32,30 @@ class PromptTokensDetails(BaseModel):
|
|
|
32
32
|
|
|
33
33
|
@model_serializer(mode="wrap")
|
|
34
34
|
def serialize_model(self, handler):
|
|
35
|
-
optional_fields =
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
35
|
+
optional_fields = set(
|
|
36
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
37
|
+
)
|
|
38
|
+
nullable_fields = set(
|
|
39
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
40
|
+
)
|
|
39
41
|
serialized = handler(self)
|
|
40
|
-
|
|
41
42
|
m = {}
|
|
42
43
|
|
|
43
44
|
for n, f in type(self).model_fields.items():
|
|
44
45
|
k = f.alias or n
|
|
45
46
|
val = serialized.get(k)
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
):
|
|
59
|
-
m[k] = val
|
|
47
|
+
is_nullable_and_explicitly_set = (
|
|
48
|
+
k in nullable_fields
|
|
49
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
if val != UNSET_SENTINEL:
|
|
53
|
+
if (
|
|
54
|
+
val is not None
|
|
55
|
+
or k not in optional_fields
|
|
56
|
+
or is_nullable_and_explicitly_set
|
|
57
|
+
):
|
|
58
|
+
m[k] = val
|
|
60
59
|
|
|
61
60
|
return m
|
|
62
61
|
|
|
@@ -81,41 +80,40 @@ class CompletionTokensDetails(BaseModel):
|
|
|
81
80
|
|
|
82
81
|
@model_serializer(mode="wrap")
|
|
83
82
|
def serialize_model(self, handler):
|
|
84
|
-
optional_fields =
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
83
|
+
optional_fields = set(
|
|
84
|
+
[
|
|
85
|
+
"reasoning_tokens",
|
|
86
|
+
"accepted_prediction_tokens",
|
|
87
|
+
"rejected_prediction_tokens",
|
|
88
|
+
"audio_tokens",
|
|
89
|
+
]
|
|
90
|
+
)
|
|
91
|
+
nullable_fields = set(
|
|
92
|
+
[
|
|
93
|
+
"reasoning_tokens",
|
|
94
|
+
"accepted_prediction_tokens",
|
|
95
|
+
"rejected_prediction_tokens",
|
|
96
|
+
"audio_tokens",
|
|
97
|
+
]
|
|
98
|
+
)
|
|
98
99
|
serialized = handler(self)
|
|
99
|
-
|
|
100
100
|
m = {}
|
|
101
101
|
|
|
102
102
|
for n, f in type(self).model_fields.items():
|
|
103
103
|
k = f.alias or n
|
|
104
104
|
val = serialized.get(k)
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
):
|
|
118
|
-
m[k] = val
|
|
105
|
+
is_nullable_and_explicitly_set = (
|
|
106
|
+
k in nullable_fields
|
|
107
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
if val != UNSET_SENTINEL:
|
|
111
|
+
if (
|
|
112
|
+
val is not None
|
|
113
|
+
or k not in optional_fields
|
|
114
|
+
or is_nullable_and_explicitly_set
|
|
115
|
+
):
|
|
116
|
+
m[k] = val
|
|
119
117
|
|
|
120
118
|
return m
|
|
121
119
|
|
|
@@ -151,41 +149,94 @@ class Usage(BaseModel):
|
|
|
151
149
|
|
|
152
150
|
@model_serializer(mode="wrap")
|
|
153
151
|
def serialize_model(self, handler):
|
|
154
|
-
optional_fields =
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
152
|
+
optional_fields = set(
|
|
153
|
+
[
|
|
154
|
+
"completion_tokens",
|
|
155
|
+
"prompt_tokens",
|
|
156
|
+
"total_tokens",
|
|
157
|
+
"prompt_tokens_details",
|
|
158
|
+
"completion_tokens_details",
|
|
159
|
+
]
|
|
160
|
+
)
|
|
161
|
+
nullable_fields = set(["prompt_tokens_details", "completion_tokens_details"])
|
|
164
162
|
serialized = handler(self)
|
|
165
|
-
|
|
166
163
|
m = {}
|
|
167
164
|
|
|
168
165
|
for n, f in type(self).model_fields.items():
|
|
169
166
|
k = f.alias or n
|
|
170
167
|
val = serialized.get(k)
|
|
171
|
-
|
|
168
|
+
is_nullable_and_explicitly_set = (
|
|
169
|
+
k in nullable_fields
|
|
170
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
if val != UNSET_SENTINEL:
|
|
174
|
+
if (
|
|
175
|
+
val is not None
|
|
176
|
+
or k not in optional_fields
|
|
177
|
+
or is_nullable_and_explicitly_set
|
|
178
|
+
):
|
|
179
|
+
m[k] = val
|
|
172
180
|
|
|
173
|
-
|
|
174
|
-
is_set = (
|
|
175
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
176
|
-
or k in null_default_fields
|
|
177
|
-
) # pylint: disable=no-member
|
|
181
|
+
return m
|
|
178
182
|
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
183
|
+
|
|
184
|
+
FinishReason = Literal[
|
|
185
|
+
"stop",
|
|
186
|
+
"length",
|
|
187
|
+
"tool_calls",
|
|
188
|
+
"content_filter",
|
|
189
|
+
"function_call",
|
|
190
|
+
"max_iterations",
|
|
191
|
+
"max_time",
|
|
192
|
+
]
|
|
193
|
+
r"""The reason why the agent stopped generating"""
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
CreateAgentResponseType = Literal["function",]
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class FunctionTypedDict(TypedDict):
|
|
200
|
+
name: NotRequired[str]
|
|
201
|
+
arguments: NotRequired[str]
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
class Function(BaseModel):
|
|
205
|
+
name: Optional[str] = None
|
|
206
|
+
|
|
207
|
+
arguments: Optional[str] = None
|
|
208
|
+
|
|
209
|
+
@model_serializer(mode="wrap")
|
|
210
|
+
def serialize_model(self, handler):
|
|
211
|
+
optional_fields = set(["name", "arguments"])
|
|
212
|
+
serialized = handler(self)
|
|
213
|
+
m = {}
|
|
214
|
+
|
|
215
|
+
for n, f in type(self).model_fields.items():
|
|
216
|
+
k = f.alias or n
|
|
217
|
+
val = serialized.get(k)
|
|
218
|
+
|
|
219
|
+
if val != UNSET_SENTINEL:
|
|
220
|
+
if val is not None or k not in optional_fields:
|
|
221
|
+
m[k] = val
|
|
185
222
|
|
|
186
223
|
return m
|
|
187
224
|
|
|
188
225
|
|
|
226
|
+
class PendingToolCallsTypedDict(TypedDict):
|
|
227
|
+
id: str
|
|
228
|
+
type: CreateAgentResponseType
|
|
229
|
+
function: FunctionTypedDict
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
class PendingToolCalls(BaseModel):
|
|
233
|
+
id: str
|
|
234
|
+
|
|
235
|
+
type: CreateAgentResponseType
|
|
236
|
+
|
|
237
|
+
function: Function
|
|
238
|
+
|
|
239
|
+
|
|
189
240
|
class CreateAgentResponseTypedDict(TypedDict):
|
|
190
241
|
r"""Response type from the create-response endpoint."""
|
|
191
242
|
|
|
@@ -201,6 +252,10 @@ class CreateAgentResponseTypedDict(TypedDict):
|
|
|
201
252
|
r"""Model used in provider/model format"""
|
|
202
253
|
usage: NotRequired[Nullable[UsageTypedDict]]
|
|
203
254
|
r"""Token usage from the agent execution"""
|
|
255
|
+
finish_reason: NotRequired[FinishReason]
|
|
256
|
+
r"""The reason why the agent stopped generating"""
|
|
257
|
+
pending_tool_calls: NotRequired[List[PendingToolCallsTypedDict]]
|
|
258
|
+
r"""Tool calls awaiting user response (when finish_reason is function_call)"""
|
|
204
259
|
|
|
205
260
|
|
|
206
261
|
class CreateAgentResponse(BaseModel):
|
|
@@ -224,32 +279,33 @@ class CreateAgentResponse(BaseModel):
|
|
|
224
279
|
usage: OptionalNullable[Usage] = UNSET
|
|
225
280
|
r"""Token usage from the agent execution"""
|
|
226
281
|
|
|
282
|
+
finish_reason: Optional[FinishReason] = None
|
|
283
|
+
r"""The reason why the agent stopped generating"""
|
|
284
|
+
|
|
285
|
+
pending_tool_calls: Optional[List[PendingToolCalls]] = None
|
|
286
|
+
r"""Tool calls awaiting user response (when finish_reason is function_call)"""
|
|
287
|
+
|
|
227
288
|
@model_serializer(mode="wrap")
|
|
228
289
|
def serialize_model(self, handler):
|
|
229
|
-
optional_fields = ["usage"]
|
|
230
|
-
nullable_fields = ["usage"]
|
|
231
|
-
null_default_fields = []
|
|
232
|
-
|
|
290
|
+
optional_fields = set(["usage", "finish_reason", "pending_tool_calls"])
|
|
291
|
+
nullable_fields = set(["usage"])
|
|
233
292
|
serialized = handler(self)
|
|
234
|
-
|
|
235
293
|
m = {}
|
|
236
294
|
|
|
237
295
|
for n, f in type(self).model_fields.items():
|
|
238
296
|
k = f.alias or n
|
|
239
297
|
val = serialized.get(k)
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
):
|
|
253
|
-
m[k] = val
|
|
298
|
+
is_nullable_and_explicitly_set = (
|
|
299
|
+
k in nullable_fields
|
|
300
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
if val != UNSET_SENTINEL:
|
|
304
|
+
if (
|
|
305
|
+
val is not None
|
|
306
|
+
or k not in optional_fields
|
|
307
|
+
or is_nullable_and_explicitly_set
|
|
308
|
+
):
|
|
309
|
+
m[k] = val
|
|
254
310
|
|
|
255
311
|
return m
|
|
@@ -10,7 +10,7 @@ from .responsestreamingevent import (
|
|
|
10
10
|
)
|
|
11
11
|
from .textpart import TextPart, TextPartTypedDict
|
|
12
12
|
from .toolresultpart import ToolResultPart, ToolResultPartTypedDict
|
|
13
|
-
from orq_ai_sdk.types import BaseModel
|
|
13
|
+
from orq_ai_sdk.types import BaseModel, UNSET_SENTINEL
|
|
14
14
|
from orq_ai_sdk.utils import (
|
|
15
15
|
FieldMetadata,
|
|
16
16
|
PathParamMetadata,
|
|
@@ -19,7 +19,7 @@ from orq_ai_sdk.utils import (
|
|
|
19
19
|
get_discriminator,
|
|
20
20
|
)
|
|
21
21
|
import pydantic
|
|
22
|
-
from pydantic import Discriminator, Tag
|
|
22
|
+
from pydantic import Discriminator, Tag, model_serializer
|
|
23
23
|
from typing import Any, Dict, List, Literal, Optional, Union
|
|
24
24
|
from typing_extensions import (
|
|
25
25
|
Annotated,
|
|
@@ -97,6 +97,22 @@ class A2AMessage(BaseModel):
|
|
|
97
97
|
message_id: Annotated[Optional[str], pydantic.Field(alias="messageId")] = None
|
|
98
98
|
r"""Optional A2A message ID in ULID format"""
|
|
99
99
|
|
|
100
|
+
@model_serializer(mode="wrap")
|
|
101
|
+
def serialize_model(self, handler):
|
|
102
|
+
optional_fields = set(["messageId"])
|
|
103
|
+
serialized = handler(self)
|
|
104
|
+
m = {}
|
|
105
|
+
|
|
106
|
+
for n, f in type(self).model_fields.items():
|
|
107
|
+
k = f.alias or n
|
|
108
|
+
val = serialized.get(k)
|
|
109
|
+
|
|
110
|
+
if val != UNSET_SENTINEL:
|
|
111
|
+
if val is not None or k not in optional_fields:
|
|
112
|
+
m[k] = val
|
|
113
|
+
|
|
114
|
+
return m
|
|
115
|
+
|
|
100
116
|
|
|
101
117
|
class IdentityTypedDict(TypedDict):
|
|
102
118
|
r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
|
|
@@ -136,6 +152,22 @@ class Identity(BaseModel):
|
|
|
136
152
|
tags: Optional[List[str]] = None
|
|
137
153
|
r"""A list of tags associated with the contact"""
|
|
138
154
|
|
|
155
|
+
@model_serializer(mode="wrap")
|
|
156
|
+
def serialize_model(self, handler):
|
|
157
|
+
optional_fields = set(["display_name", "email", "metadata", "logo_url", "tags"])
|
|
158
|
+
serialized = handler(self)
|
|
159
|
+
m = {}
|
|
160
|
+
|
|
161
|
+
for n, f in type(self).model_fields.items():
|
|
162
|
+
k = f.alias or n
|
|
163
|
+
val = serialized.get(k)
|
|
164
|
+
|
|
165
|
+
if val != UNSET_SENTINEL:
|
|
166
|
+
if val is not None or k not in optional_fields:
|
|
167
|
+
m[k] = val
|
|
168
|
+
|
|
169
|
+
return m
|
|
170
|
+
|
|
139
171
|
|
|
140
172
|
@deprecated(
|
|
141
173
|
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
@@ -181,6 +213,22 @@ class Contact(BaseModel):
|
|
|
181
213
|
tags: Optional[List[str]] = None
|
|
182
214
|
r"""A list of tags associated with the contact"""
|
|
183
215
|
|
|
216
|
+
@model_serializer(mode="wrap")
|
|
217
|
+
def serialize_model(self, handler):
|
|
218
|
+
optional_fields = set(["display_name", "email", "metadata", "logo_url", "tags"])
|
|
219
|
+
serialized = handler(self)
|
|
220
|
+
m = {}
|
|
221
|
+
|
|
222
|
+
for n, f in type(self).model_fields.items():
|
|
223
|
+
k = f.alias or n
|
|
224
|
+
val = serialized.get(k)
|
|
225
|
+
|
|
226
|
+
if val != UNSET_SENTINEL:
|
|
227
|
+
if val is not None or k not in optional_fields:
|
|
228
|
+
m[k] = val
|
|
229
|
+
|
|
230
|
+
return m
|
|
231
|
+
|
|
184
232
|
|
|
185
233
|
class CreateAgentResponseRequestThreadTypedDict(TypedDict):
|
|
186
234
|
r"""Thread information to group related requests"""
|
|
@@ -200,6 +248,22 @@ class CreateAgentResponseRequestThread(BaseModel):
|
|
|
200
248
|
tags: Optional[List[str]] = None
|
|
201
249
|
r"""Optional tags to differentiate or categorize threads"""
|
|
202
250
|
|
|
251
|
+
@model_serializer(mode="wrap")
|
|
252
|
+
def serialize_model(self, handler):
|
|
253
|
+
optional_fields = set(["tags"])
|
|
254
|
+
serialized = handler(self)
|
|
255
|
+
m = {}
|
|
256
|
+
|
|
257
|
+
for n, f in type(self).model_fields.items():
|
|
258
|
+
k = f.alias or n
|
|
259
|
+
val = serialized.get(k)
|
|
260
|
+
|
|
261
|
+
if val != UNSET_SENTINEL:
|
|
262
|
+
if val is not None or k not in optional_fields:
|
|
263
|
+
m[k] = val
|
|
264
|
+
|
|
265
|
+
return m
|
|
266
|
+
|
|
203
267
|
|
|
204
268
|
class CreateAgentResponseRequestMemoryTypedDict(TypedDict):
|
|
205
269
|
r"""Memory configuration for the agent execution. Used to associate memory stores with specific entities like users or sessions."""
|
|
@@ -293,6 +357,35 @@ class CreateAgentResponseRequestRequestBody(BaseModel):
|
|
|
293
357
|
conversation: Optional[Conversation] = None
|
|
294
358
|
r"""Conversation context for chat studio integration"""
|
|
295
359
|
|
|
360
|
+
@model_serializer(mode="wrap")
|
|
361
|
+
def serialize_model(self, handler):
|
|
362
|
+
optional_fields = set(
|
|
363
|
+
[
|
|
364
|
+
"task_id",
|
|
365
|
+
"variables",
|
|
366
|
+
"identity",
|
|
367
|
+
"contact",
|
|
368
|
+
"thread",
|
|
369
|
+
"memory",
|
|
370
|
+
"metadata",
|
|
371
|
+
"background",
|
|
372
|
+
"stream",
|
|
373
|
+
"conversation",
|
|
374
|
+
]
|
|
375
|
+
)
|
|
376
|
+
serialized = handler(self)
|
|
377
|
+
m = {}
|
|
378
|
+
|
|
379
|
+
for n, f in type(self).model_fields.items():
|
|
380
|
+
k = f.alias or n
|
|
381
|
+
val = serialized.get(k)
|
|
382
|
+
|
|
383
|
+
if val != UNSET_SENTINEL:
|
|
384
|
+
if val is not None or k not in optional_fields:
|
|
385
|
+
m[k] = val
|
|
386
|
+
|
|
387
|
+
return m
|
|
388
|
+
|
|
296
389
|
|
|
297
390
|
class CreateAgentResponseRequestRequestTypedDict(TypedDict):
|
|
298
391
|
agent_key: str
|
|
@@ -325,6 +418,22 @@ class CreateAgentResponseRequestResponseBody(BaseModel):
|
|
|
325
418
|
data: Optional[ResponseStreamingEvent] = None
|
|
326
419
|
r"""Union of all possible streaming events. Each event has a type field for discrimination."""
|
|
327
420
|
|
|
421
|
+
@model_serializer(mode="wrap")
|
|
422
|
+
def serialize_model(self, handler):
|
|
423
|
+
optional_fields = set(["data"])
|
|
424
|
+
serialized = handler(self)
|
|
425
|
+
m = {}
|
|
426
|
+
|
|
427
|
+
for n, f in type(self).model_fields.items():
|
|
428
|
+
k = f.alias or n
|
|
429
|
+
val = serialized.get(k)
|
|
430
|
+
|
|
431
|
+
if val != UNSET_SENTINEL:
|
|
432
|
+
if val is not None or k not in optional_fields:
|
|
433
|
+
m[k] = val
|
|
434
|
+
|
|
435
|
+
return m
|
|
436
|
+
|
|
328
437
|
|
|
329
438
|
CreateAgentResponseRequestResponseTypedDict = TypeAliasType(
|
|
330
439
|
"CreateAgentResponseRequestResponseTypedDict",
|