orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_hooks/globalhook.py +0 -1
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +332 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +258 -0
- orq_ai_sdk/embeddings.py +238 -0
- orq_ai_sdk/generations.py +272 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5341 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1922 -384
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1375 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +1890 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +384 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +504 -0
- orq_ai_sdk/models/createimageop.py +208 -117
- orq_ai_sdk/models/createimagevariationop.py +486 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2748 -1252
- orq_ai_sdk/models/creatererankop.py +416 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +316 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +562 -0
- orq_ai_sdk/models/createtranslationop.py +540 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1690 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1673 -230
- orq_ai_sdk/models/getpromptversionop.py +1670 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +793 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1684 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1451 -197
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1428 -195
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1951 -404
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2844 -1450
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +660 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +232 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +251 -0
- orq_ai_sdk/transcriptions.py +326 -0
- orq_ai_sdk/translations.py +298 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +254 -0
- orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
- orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -83,6 +83,22 @@ class MessageDifference(BaseModel):
|
|
|
83
83
|
|
|
84
84
|
metadata: Optional[Dict[str, Any]] = None
|
|
85
85
|
|
|
86
|
+
@model_serializer(mode="wrap")
|
|
87
|
+
def serialize_model(self, handler):
|
|
88
|
+
optional_fields = set(["metadata"])
|
|
89
|
+
serialized = handler(self)
|
|
90
|
+
m = {}
|
|
91
|
+
|
|
92
|
+
for n, f in type(self).model_fields.items():
|
|
93
|
+
k = f.alias or n
|
|
94
|
+
val = serialized.get(k)
|
|
95
|
+
|
|
96
|
+
if val != UNSET_SENTINEL:
|
|
97
|
+
if val is not None or k not in optional_fields:
|
|
98
|
+
m[k] = val
|
|
99
|
+
|
|
100
|
+
return m
|
|
101
|
+
|
|
86
102
|
|
|
87
103
|
AgentThoughtStreamingEventFinishReason = Literal[
|
|
88
104
|
"stop",
|
|
@@ -111,6 +127,22 @@ class AgentThoughtStreamingEventFunction(BaseModel):
|
|
|
111
127
|
arguments: Optional[str] = None
|
|
112
128
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
113
129
|
|
|
130
|
+
@model_serializer(mode="wrap")
|
|
131
|
+
def serialize_model(self, handler):
|
|
132
|
+
optional_fields = set(["name", "arguments"])
|
|
133
|
+
serialized = handler(self)
|
|
134
|
+
m = {}
|
|
135
|
+
|
|
136
|
+
for n, f in type(self).model_fields.items():
|
|
137
|
+
k = f.alias or n
|
|
138
|
+
val = serialized.get(k)
|
|
139
|
+
|
|
140
|
+
if val != UNSET_SENTINEL:
|
|
141
|
+
if val is not None or k not in optional_fields:
|
|
142
|
+
m[k] = val
|
|
143
|
+
|
|
144
|
+
return m
|
|
145
|
+
|
|
114
146
|
|
|
115
147
|
class AgentThoughtStreamingEventToolCallsTypedDict(TypedDict):
|
|
116
148
|
index: NotRequired[float]
|
|
@@ -133,6 +165,22 @@ class AgentThoughtStreamingEventToolCalls(BaseModel):
|
|
|
133
165
|
thought_signature: Optional[str] = None
|
|
134
166
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
135
167
|
|
|
168
|
+
@model_serializer(mode="wrap")
|
|
169
|
+
def serialize_model(self, handler):
|
|
170
|
+
optional_fields = set(["index", "id", "type", "function", "thought_signature"])
|
|
171
|
+
serialized = handler(self)
|
|
172
|
+
m = {}
|
|
173
|
+
|
|
174
|
+
for n, f in type(self).model_fields.items():
|
|
175
|
+
k = f.alias or n
|
|
176
|
+
val = serialized.get(k)
|
|
177
|
+
|
|
178
|
+
if val != UNSET_SENTINEL:
|
|
179
|
+
if val is not None or k not in optional_fields:
|
|
180
|
+
m[k] = val
|
|
181
|
+
|
|
182
|
+
return m
|
|
183
|
+
|
|
136
184
|
|
|
137
185
|
AgentThoughtStreamingEventDataRole = Literal["assistant",]
|
|
138
186
|
|
|
@@ -200,46 +248,39 @@ class AgentThoughtStreamingEventMessage(BaseModel):
|
|
|
200
248
|
|
|
201
249
|
@model_serializer(mode="wrap")
|
|
202
250
|
def serialize_model(self, handler):
|
|
203
|
-
optional_fields =
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
"reasoning",
|
|
217
|
-
|
|
218
|
-
"audio",
|
|
219
|
-
]
|
|
220
|
-
null_default_fields = []
|
|
221
|
-
|
|
251
|
+
optional_fields = set(
|
|
252
|
+
[
|
|
253
|
+
"content",
|
|
254
|
+
"refusal",
|
|
255
|
+
"tool_calls",
|
|
256
|
+
"role",
|
|
257
|
+
"reasoning",
|
|
258
|
+
"reasoning_signature",
|
|
259
|
+
"redacted_reasoning",
|
|
260
|
+
"audio",
|
|
261
|
+
]
|
|
262
|
+
)
|
|
263
|
+
nullable_fields = set(
|
|
264
|
+
["content", "refusal", "reasoning", "reasoning_signature", "audio"]
|
|
265
|
+
)
|
|
222
266
|
serialized = handler(self)
|
|
223
|
-
|
|
224
267
|
m = {}
|
|
225
268
|
|
|
226
269
|
for n, f in type(self).model_fields.items():
|
|
227
270
|
k = f.alias or n
|
|
228
271
|
val = serialized.get(k)
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
):
|
|
242
|
-
m[k] = val
|
|
272
|
+
is_nullable_and_explicitly_set = (
|
|
273
|
+
k in nullable_fields
|
|
274
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
if val != UNSET_SENTINEL:
|
|
278
|
+
if (
|
|
279
|
+
val is not None
|
|
280
|
+
or k not in optional_fields
|
|
281
|
+
or is_nullable_and_explicitly_set
|
|
282
|
+
):
|
|
283
|
+
m[k] = val
|
|
243
284
|
|
|
244
285
|
return m
|
|
245
286
|
|
|
@@ -265,30 +306,14 @@ class TopLogprobs(BaseModel):
|
|
|
265
306
|
|
|
266
307
|
@model_serializer(mode="wrap")
|
|
267
308
|
def serialize_model(self, handler):
|
|
268
|
-
optional_fields = []
|
|
269
|
-
nullable_fields = ["bytes"]
|
|
270
|
-
null_default_fields = []
|
|
271
|
-
|
|
272
309
|
serialized = handler(self)
|
|
273
|
-
|
|
274
310
|
m = {}
|
|
275
311
|
|
|
276
312
|
for n, f in type(self).model_fields.items():
|
|
277
313
|
k = f.alias or n
|
|
278
314
|
val = serialized.get(k)
|
|
279
|
-
serialized.pop(k, None)
|
|
280
|
-
|
|
281
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
282
|
-
is_set = (
|
|
283
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
284
|
-
or k in null_default_fields
|
|
285
|
-
) # pylint: disable=no-member
|
|
286
315
|
|
|
287
|
-
if val
|
|
288
|
-
m[k] = val
|
|
289
|
-
elif val != UNSET_SENTINEL and (
|
|
290
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
291
|
-
):
|
|
316
|
+
if val != UNSET_SENTINEL:
|
|
292
317
|
m[k] = val
|
|
293
318
|
|
|
294
319
|
return m
|
|
@@ -320,30 +345,14 @@ class AgentThoughtStreamingEventContent(BaseModel):
|
|
|
320
345
|
|
|
321
346
|
@model_serializer(mode="wrap")
|
|
322
347
|
def serialize_model(self, handler):
|
|
323
|
-
optional_fields = []
|
|
324
|
-
nullable_fields = ["bytes"]
|
|
325
|
-
null_default_fields = []
|
|
326
|
-
|
|
327
348
|
serialized = handler(self)
|
|
328
|
-
|
|
329
349
|
m = {}
|
|
330
350
|
|
|
331
351
|
for n, f in type(self).model_fields.items():
|
|
332
352
|
k = f.alias or n
|
|
333
353
|
val = serialized.get(k)
|
|
334
|
-
serialized.pop(k, None)
|
|
335
|
-
|
|
336
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
337
|
-
is_set = (
|
|
338
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
339
|
-
or k in null_default_fields
|
|
340
|
-
) # pylint: disable=no-member
|
|
341
354
|
|
|
342
|
-
if val
|
|
343
|
-
m[k] = val
|
|
344
|
-
elif val != UNSET_SENTINEL and (
|
|
345
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
346
|
-
):
|
|
355
|
+
if val != UNSET_SENTINEL:
|
|
347
356
|
m[k] = val
|
|
348
357
|
|
|
349
358
|
return m
|
|
@@ -370,30 +379,14 @@ class AgentThoughtStreamingEventTopLogprobs(BaseModel):
|
|
|
370
379
|
|
|
371
380
|
@model_serializer(mode="wrap")
|
|
372
381
|
def serialize_model(self, handler):
|
|
373
|
-
optional_fields = []
|
|
374
|
-
nullable_fields = ["bytes"]
|
|
375
|
-
null_default_fields = []
|
|
376
|
-
|
|
377
382
|
serialized = handler(self)
|
|
378
|
-
|
|
379
383
|
m = {}
|
|
380
384
|
|
|
381
385
|
for n, f in type(self).model_fields.items():
|
|
382
386
|
k = f.alias or n
|
|
383
387
|
val = serialized.get(k)
|
|
384
|
-
serialized.pop(k, None)
|
|
385
|
-
|
|
386
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
387
|
-
is_set = (
|
|
388
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
389
|
-
or k in null_default_fields
|
|
390
|
-
) # pylint: disable=no-member
|
|
391
388
|
|
|
392
|
-
if val
|
|
393
|
-
m[k] = val
|
|
394
|
-
elif val != UNSET_SENTINEL and (
|
|
395
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
396
|
-
):
|
|
389
|
+
if val != UNSET_SENTINEL:
|
|
397
390
|
m[k] = val
|
|
398
391
|
|
|
399
392
|
return m
|
|
@@ -425,30 +418,14 @@ class Refusal(BaseModel):
|
|
|
425
418
|
|
|
426
419
|
@model_serializer(mode="wrap")
|
|
427
420
|
def serialize_model(self, handler):
|
|
428
|
-
optional_fields = []
|
|
429
|
-
nullable_fields = ["bytes"]
|
|
430
|
-
null_default_fields = []
|
|
431
|
-
|
|
432
421
|
serialized = handler(self)
|
|
433
|
-
|
|
434
422
|
m = {}
|
|
435
423
|
|
|
436
424
|
for n, f in type(self).model_fields.items():
|
|
437
425
|
k = f.alias or n
|
|
438
426
|
val = serialized.get(k)
|
|
439
|
-
serialized.pop(k, None)
|
|
440
427
|
|
|
441
|
-
|
|
442
|
-
is_set = (
|
|
443
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
444
|
-
or k in null_default_fields
|
|
445
|
-
) # pylint: disable=no-member
|
|
446
|
-
|
|
447
|
-
if val is not None and val != UNSET_SENTINEL:
|
|
448
|
-
m[k] = val
|
|
449
|
-
elif val != UNSET_SENTINEL and (
|
|
450
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
451
|
-
):
|
|
428
|
+
if val != UNSET_SENTINEL:
|
|
452
429
|
m[k] = val
|
|
453
430
|
|
|
454
431
|
return m
|
|
@@ -474,30 +451,14 @@ class Logprobs(BaseModel):
|
|
|
474
451
|
|
|
475
452
|
@model_serializer(mode="wrap")
|
|
476
453
|
def serialize_model(self, handler):
|
|
477
|
-
optional_fields = []
|
|
478
|
-
nullable_fields = ["content", "refusal"]
|
|
479
|
-
null_default_fields = []
|
|
480
|
-
|
|
481
454
|
serialized = handler(self)
|
|
482
|
-
|
|
483
455
|
m = {}
|
|
484
456
|
|
|
485
457
|
for n, f in type(self).model_fields.items():
|
|
486
458
|
k = f.alias or n
|
|
487
459
|
val = serialized.get(k)
|
|
488
|
-
serialized.pop(k, None)
|
|
489
|
-
|
|
490
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
491
|
-
is_set = (
|
|
492
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
493
|
-
or k in null_default_fields
|
|
494
|
-
) # pylint: disable=no-member
|
|
495
460
|
|
|
496
|
-
if val
|
|
497
|
-
m[k] = val
|
|
498
|
-
elif val != UNSET_SENTINEL and (
|
|
499
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
500
|
-
):
|
|
461
|
+
if val != UNSET_SENTINEL:
|
|
501
462
|
m[k] = val
|
|
502
463
|
|
|
503
464
|
return m
|
|
@@ -529,31 +490,26 @@ class Choice(BaseModel):
|
|
|
529
490
|
|
|
530
491
|
@model_serializer(mode="wrap")
|
|
531
492
|
def serialize_model(self, handler):
|
|
532
|
-
optional_fields = ["index", "logprobs"]
|
|
533
|
-
nullable_fields = ["finish_reason", "logprobs"]
|
|
534
|
-
null_default_fields = []
|
|
535
|
-
|
|
493
|
+
optional_fields = set(["index", "logprobs"])
|
|
494
|
+
nullable_fields = set(["finish_reason", "logprobs"])
|
|
536
495
|
serialized = handler(self)
|
|
537
|
-
|
|
538
496
|
m = {}
|
|
539
497
|
|
|
540
498
|
for n, f in type(self).model_fields.items():
|
|
541
499
|
k = f.alias or n
|
|
542
500
|
val = serialized.get(k)
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
):
|
|
556
|
-
m[k] = val
|
|
501
|
+
is_nullable_and_explicitly_set = (
|
|
502
|
+
k in nullable_fields
|
|
503
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
if val != UNSET_SENTINEL:
|
|
507
|
+
if (
|
|
508
|
+
val is not None
|
|
509
|
+
or k not in optional_fields
|
|
510
|
+
or is_nullable_and_explicitly_set
|
|
511
|
+
):
|
|
512
|
+
m[k] = val
|
|
557
513
|
|
|
558
514
|
return m
|
|
559
515
|
|
|
@@ -575,31 +531,30 @@ class AgentThoughtStreamingEventPromptTokensDetails(BaseModel):
|
|
|
575
531
|
|
|
576
532
|
@model_serializer(mode="wrap")
|
|
577
533
|
def serialize_model(self, handler):
|
|
578
|
-
optional_fields =
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
534
|
+
optional_fields = set(
|
|
535
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
536
|
+
)
|
|
537
|
+
nullable_fields = set(
|
|
538
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
539
|
+
)
|
|
582
540
|
serialized = handler(self)
|
|
583
|
-
|
|
584
541
|
m = {}
|
|
585
542
|
|
|
586
543
|
for n, f in type(self).model_fields.items():
|
|
587
544
|
k = f.alias or n
|
|
588
545
|
val = serialized.get(k)
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
):
|
|
602
|
-
m[k] = val
|
|
546
|
+
is_nullable_and_explicitly_set = (
|
|
547
|
+
k in nullable_fields
|
|
548
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
549
|
+
)
|
|
550
|
+
|
|
551
|
+
if val != UNSET_SENTINEL:
|
|
552
|
+
if (
|
|
553
|
+
val is not None
|
|
554
|
+
or k not in optional_fields
|
|
555
|
+
or is_nullable_and_explicitly_set
|
|
556
|
+
):
|
|
557
|
+
m[k] = val
|
|
603
558
|
|
|
604
559
|
return m
|
|
605
560
|
|
|
@@ -624,41 +579,40 @@ class AgentThoughtStreamingEventCompletionTokensDetails(BaseModel):
|
|
|
624
579
|
|
|
625
580
|
@model_serializer(mode="wrap")
|
|
626
581
|
def serialize_model(self, handler):
|
|
627
|
-
optional_fields =
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
582
|
+
optional_fields = set(
|
|
583
|
+
[
|
|
584
|
+
"reasoning_tokens",
|
|
585
|
+
"accepted_prediction_tokens",
|
|
586
|
+
"rejected_prediction_tokens",
|
|
587
|
+
"audio_tokens",
|
|
588
|
+
]
|
|
589
|
+
)
|
|
590
|
+
nullable_fields = set(
|
|
591
|
+
[
|
|
592
|
+
"reasoning_tokens",
|
|
593
|
+
"accepted_prediction_tokens",
|
|
594
|
+
"rejected_prediction_tokens",
|
|
595
|
+
"audio_tokens",
|
|
596
|
+
]
|
|
597
|
+
)
|
|
641
598
|
serialized = handler(self)
|
|
642
|
-
|
|
643
599
|
m = {}
|
|
644
600
|
|
|
645
601
|
for n, f in type(self).model_fields.items():
|
|
646
602
|
k = f.alias or n
|
|
647
603
|
val = serialized.get(k)
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
):
|
|
661
|
-
m[k] = val
|
|
604
|
+
is_nullable_and_explicitly_set = (
|
|
605
|
+
k in nullable_fields
|
|
606
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
607
|
+
)
|
|
608
|
+
|
|
609
|
+
if val != UNSET_SENTINEL:
|
|
610
|
+
if (
|
|
611
|
+
val is not None
|
|
612
|
+
or k not in optional_fields
|
|
613
|
+
or is_nullable_and_explicitly_set
|
|
614
|
+
):
|
|
615
|
+
m[k] = val
|
|
662
616
|
|
|
663
617
|
return m
|
|
664
618
|
|
|
@@ -702,37 +656,34 @@ class AgentThoughtStreamingEventUsage(BaseModel):
|
|
|
702
656
|
|
|
703
657
|
@model_serializer(mode="wrap")
|
|
704
658
|
def serialize_model(self, handler):
|
|
705
|
-
optional_fields =
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
659
|
+
optional_fields = set(
|
|
660
|
+
[
|
|
661
|
+
"completion_tokens",
|
|
662
|
+
"prompt_tokens",
|
|
663
|
+
"total_tokens",
|
|
664
|
+
"prompt_tokens_details",
|
|
665
|
+
"completion_tokens_details",
|
|
666
|
+
]
|
|
667
|
+
)
|
|
668
|
+
nullable_fields = set(["prompt_tokens_details", "completion_tokens_details"])
|
|
715
669
|
serialized = handler(self)
|
|
716
|
-
|
|
717
670
|
m = {}
|
|
718
671
|
|
|
719
672
|
for n, f in type(self).model_fields.items():
|
|
720
673
|
k = f.alias or n
|
|
721
674
|
val = serialized.get(k)
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
):
|
|
735
|
-
m[k] = val
|
|
675
|
+
is_nullable_and_explicitly_set = (
|
|
676
|
+
k in nullable_fields
|
|
677
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
678
|
+
)
|
|
679
|
+
|
|
680
|
+
if val != UNSET_SENTINEL:
|
|
681
|
+
if (
|
|
682
|
+
val is not None
|
|
683
|
+
or k not in optional_fields
|
|
684
|
+
or is_nullable_and_explicitly_set
|
|
685
|
+
):
|
|
686
|
+
m[k] = val
|
|
736
687
|
|
|
737
688
|
return m
|
|
738
689
|
|
|
@@ -767,6 +718,22 @@ class AgentThoughtStreamingEventData(BaseModel):
|
|
|
767
718
|
usage: Optional[AgentThoughtStreamingEventUsage] = None
|
|
768
719
|
r"""Usage statistics for the completion request."""
|
|
769
720
|
|
|
721
|
+
@model_serializer(mode="wrap")
|
|
722
|
+
def serialize_model(self, handler):
|
|
723
|
+
optional_fields = set(["choice", "choiceIndex", "responseId", "usage"])
|
|
724
|
+
serialized = handler(self)
|
|
725
|
+
m = {}
|
|
726
|
+
|
|
727
|
+
for n, f in type(self).model_fields.items():
|
|
728
|
+
k = f.alias or n
|
|
729
|
+
val = serialized.get(k)
|
|
730
|
+
|
|
731
|
+
if val != UNSET_SENTINEL:
|
|
732
|
+
if val is not None or k not in optional_fields:
|
|
733
|
+
m[k] = val
|
|
734
|
+
|
|
735
|
+
return m
|
|
736
|
+
|
|
770
737
|
|
|
771
738
|
class AgentThoughtStreamingEventTypedDict(TypedDict):
|
|
772
739
|
r"""Emitted during agent reasoning. Contains the incremental message changes, model choices, iteration count, and token usage for this processing step."""
|
|
@@ -47,31 +47,26 @@ class Metadata(BaseModel):
|
|
|
47
47
|
|
|
48
48
|
@model_serializer(mode="wrap")
|
|
49
49
|
def serialize_model(self, handler):
|
|
50
|
-
optional_fields = ["generatingTitle", "entityId", "model"]
|
|
51
|
-
nullable_fields = ["entityId", "model"]
|
|
52
|
-
null_default_fields = []
|
|
53
|
-
|
|
50
|
+
optional_fields = set(["generatingTitle", "entityId", "model"])
|
|
51
|
+
nullable_fields = set(["entityId", "model"])
|
|
54
52
|
serialized = handler(self)
|
|
55
|
-
|
|
56
53
|
m = {}
|
|
57
54
|
|
|
58
55
|
for n, f in type(self).model_fields.items():
|
|
59
56
|
k = f.alias or n
|
|
60
57
|
val = serialized.get(k)
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
):
|
|
74
|
-
m[k] = val
|
|
58
|
+
is_nullable_and_explicitly_set = (
|
|
59
|
+
k in nullable_fields
|
|
60
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
if val != UNSET_SENTINEL:
|
|
64
|
+
if (
|
|
65
|
+
val is not None
|
|
66
|
+
or k not in optional_fields
|
|
67
|
+
or is_nullable_and_explicitly_set
|
|
68
|
+
):
|
|
69
|
+
m[k] = val
|
|
75
70
|
|
|
76
71
|
return m
|
|
77
72
|
|
|
@@ -109,7 +104,7 @@ class ConversationResponse(BaseModel):
|
|
|
109
104
|
r"""Unix timestamp in milliseconds when the conversation was last modified."""
|
|
110
105
|
|
|
111
106
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
112
|
-
"
|
|
107
|
+
"conv_01kftttr2qac0v9jzsdskx8m5m"
|
|
113
108
|
)
|
|
114
109
|
r"""Unique conversation identifier with `conv_` prefix."""
|
|
115
110
|
|
|
@@ -121,3 +116,19 @@ class ConversationResponse(BaseModel):
|
|
|
121
116
|
|
|
122
117
|
metadata: Optional[Metadata] = None
|
|
123
118
|
r"""Additional conversation metadata."""
|
|
119
|
+
|
|
120
|
+
@model_serializer(mode="wrap")
|
|
121
|
+
def serialize_model(self, handler):
|
|
122
|
+
optional_fields = set(["_id", "createdById", "updatedById", "metadata"])
|
|
123
|
+
serialized = handler(self)
|
|
124
|
+
m = {}
|
|
125
|
+
|
|
126
|
+
for n, f in type(self).model_fields.items():
|
|
127
|
+
k = f.alias or n
|
|
128
|
+
val = serialized.get(k)
|
|
129
|
+
|
|
130
|
+
if val != UNSET_SENTINEL:
|
|
131
|
+
if val is not None or k not in optional_fields:
|
|
132
|
+
m[k] = val
|
|
133
|
+
|
|
134
|
+
return m
|