orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.3.0rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/agents.py +186 -186
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +438 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +364 -0
- orq_ai_sdk/embeddings.py +344 -0
- orq_ai_sdk/generations.py +370 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5746 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1945 -383
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1381 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +2078 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +579 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +715 -0
- orq_ai_sdk/models/createimageop.py +407 -128
- orq_ai_sdk/models/createimagevariationop.py +706 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2759 -1251
- orq_ai_sdk/models/creatererankop.py +608 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +466 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +732 -0
- orq_ai_sdk/models/createtranslationop.py +702 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1696 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1679 -230
- orq_ai_sdk/models/getpromptversionop.py +1676 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +805 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1690 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +799 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1462 -196
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1439 -194
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1968 -397
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2854 -1448
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +666 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +330 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +333 -0
- orq_ai_sdk/transcriptions.py +416 -0
- orq_ai_sdk/translations.py +384 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +364 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/METADATA +169 -148
- orq_ai_sdk-4.3.0rc7.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.3.0rc7.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
orq_ai_sdk/models/parseop.py
CHANGED
|
@@ -76,6 +76,33 @@ class FastChunkerStrategy(BaseModel):
|
|
|
76
76
|
forward_fallback: Optional[bool] = False
|
|
77
77
|
r"""Search forward if no delimiter found in backward search window"""
|
|
78
78
|
|
|
79
|
+
@model_serializer(mode="wrap")
|
|
80
|
+
def serialize_model(self, handler):
|
|
81
|
+
optional_fields = set(
|
|
82
|
+
[
|
|
83
|
+
"metadata",
|
|
84
|
+
"return_type",
|
|
85
|
+
"target_size",
|
|
86
|
+
"delimiters",
|
|
87
|
+
"pattern",
|
|
88
|
+
"prefix",
|
|
89
|
+
"consecutive",
|
|
90
|
+
"forward_fallback",
|
|
91
|
+
]
|
|
92
|
+
)
|
|
93
|
+
serialized = handler(self)
|
|
94
|
+
m = {}
|
|
95
|
+
|
|
96
|
+
for n, f in type(self).model_fields.items():
|
|
97
|
+
k = f.alias or n
|
|
98
|
+
val = serialized.get(k)
|
|
99
|
+
|
|
100
|
+
if val != UNSET_SENTINEL:
|
|
101
|
+
if val is not None or k not in optional_fields:
|
|
102
|
+
m[k] = val
|
|
103
|
+
|
|
104
|
+
return m
|
|
105
|
+
|
|
79
106
|
|
|
80
107
|
ParseChunkingRequestChunkingRequestReturnType = Literal[
|
|
81
108
|
"chunks",
|
|
@@ -133,6 +160,30 @@ class AgenticChunkerStrategy(BaseModel):
|
|
|
133
160
|
min_characters_per_chunk: Optional[int] = 24
|
|
134
161
|
r"""Minimum characters allowed per chunk"""
|
|
135
162
|
|
|
163
|
+
@model_serializer(mode="wrap")
|
|
164
|
+
def serialize_model(self, handler):
|
|
165
|
+
optional_fields = set(
|
|
166
|
+
[
|
|
167
|
+
"metadata",
|
|
168
|
+
"return_type",
|
|
169
|
+
"chunk_size",
|
|
170
|
+
"candidate_size",
|
|
171
|
+
"min_characters_per_chunk",
|
|
172
|
+
]
|
|
173
|
+
)
|
|
174
|
+
serialized = handler(self)
|
|
175
|
+
m = {}
|
|
176
|
+
|
|
177
|
+
for n, f in type(self).model_fields.items():
|
|
178
|
+
k = f.alias or n
|
|
179
|
+
val = serialized.get(k)
|
|
180
|
+
|
|
181
|
+
if val != UNSET_SENTINEL:
|
|
182
|
+
if val is not None or k not in optional_fields:
|
|
183
|
+
m[k] = val
|
|
184
|
+
|
|
185
|
+
return m
|
|
186
|
+
|
|
136
187
|
|
|
137
188
|
ParseChunkingRequestChunkingReturnType = Literal[
|
|
138
189
|
"chunks",
|
|
@@ -223,6 +274,33 @@ class SemanticChunkerStrategy(BaseModel):
|
|
|
223
274
|
similarity_window: Optional[int] = 1
|
|
224
275
|
r"""Window size for similarity comparison"""
|
|
225
276
|
|
|
277
|
+
@model_serializer(mode="wrap")
|
|
278
|
+
def serialize_model(self, handler):
|
|
279
|
+
optional_fields = set(
|
|
280
|
+
[
|
|
281
|
+
"metadata",
|
|
282
|
+
"return_type",
|
|
283
|
+
"chunk_size",
|
|
284
|
+
"threshold",
|
|
285
|
+
"dimensions",
|
|
286
|
+
"max_tokens",
|
|
287
|
+
"mode",
|
|
288
|
+
"similarity_window",
|
|
289
|
+
]
|
|
290
|
+
)
|
|
291
|
+
serialized = handler(self)
|
|
292
|
+
m = {}
|
|
293
|
+
|
|
294
|
+
for n, f in type(self).model_fields.items():
|
|
295
|
+
k = f.alias or n
|
|
296
|
+
val = serialized.get(k)
|
|
297
|
+
|
|
298
|
+
if val != UNSET_SENTINEL:
|
|
299
|
+
if val is not None or k not in optional_fields:
|
|
300
|
+
m[k] = val
|
|
301
|
+
|
|
302
|
+
return m
|
|
303
|
+
|
|
226
304
|
|
|
227
305
|
ParseChunkingRequestReturnType = Literal[
|
|
228
306
|
"chunks",
|
|
@@ -275,6 +353,30 @@ class RecursiveChunkerStrategy(BaseModel):
|
|
|
275
353
|
min_characters_per_chunk: Optional[int] = 24
|
|
276
354
|
r"""Minimum characters allowed per chunk"""
|
|
277
355
|
|
|
356
|
+
@model_serializer(mode="wrap")
|
|
357
|
+
def serialize_model(self, handler):
|
|
358
|
+
optional_fields = set(
|
|
359
|
+
[
|
|
360
|
+
"metadata",
|
|
361
|
+
"return_type",
|
|
362
|
+
"chunk_size",
|
|
363
|
+
"separators",
|
|
364
|
+
"min_characters_per_chunk",
|
|
365
|
+
]
|
|
366
|
+
)
|
|
367
|
+
serialized = handler(self)
|
|
368
|
+
m = {}
|
|
369
|
+
|
|
370
|
+
for n, f in type(self).model_fields.items():
|
|
371
|
+
k = f.alias or n
|
|
372
|
+
val = serialized.get(k)
|
|
373
|
+
|
|
374
|
+
if val != UNSET_SENTINEL:
|
|
375
|
+
if val is not None or k not in optional_fields:
|
|
376
|
+
m[k] = val
|
|
377
|
+
|
|
378
|
+
return m
|
|
379
|
+
|
|
278
380
|
|
|
279
381
|
ChunkingRequestReturnType = Literal[
|
|
280
382
|
"chunks",
|
|
@@ -327,6 +429,30 @@ class SentenceChunkerStrategy(BaseModel):
|
|
|
327
429
|
min_sentences_per_chunk: Optional[int] = 1
|
|
328
430
|
r"""Minimum number of sentences per chunk"""
|
|
329
431
|
|
|
432
|
+
@model_serializer(mode="wrap")
|
|
433
|
+
def serialize_model(self, handler):
|
|
434
|
+
optional_fields = set(
|
|
435
|
+
[
|
|
436
|
+
"metadata",
|
|
437
|
+
"return_type",
|
|
438
|
+
"chunk_size",
|
|
439
|
+
"chunk_overlap",
|
|
440
|
+
"min_sentences_per_chunk",
|
|
441
|
+
]
|
|
442
|
+
)
|
|
443
|
+
serialized = handler(self)
|
|
444
|
+
m = {}
|
|
445
|
+
|
|
446
|
+
for n, f in type(self).model_fields.items():
|
|
447
|
+
k = f.alias or n
|
|
448
|
+
val = serialized.get(k)
|
|
449
|
+
|
|
450
|
+
if val != UNSET_SENTINEL:
|
|
451
|
+
if val is not None or k not in optional_fields:
|
|
452
|
+
m[k] = val
|
|
453
|
+
|
|
454
|
+
return m
|
|
455
|
+
|
|
330
456
|
|
|
331
457
|
ReturnType = Literal[
|
|
332
458
|
"chunks",
|
|
@@ -374,6 +500,24 @@ class TokenChunkerStrategy(BaseModel):
|
|
|
374
500
|
chunk_overlap: Optional[int] = 0
|
|
375
501
|
r"""Number of tokens to overlap between chunks"""
|
|
376
502
|
|
|
503
|
+
@model_serializer(mode="wrap")
|
|
504
|
+
def serialize_model(self, handler):
|
|
505
|
+
optional_fields = set(
|
|
506
|
+
["metadata", "return_type", "chunk_size", "chunk_overlap"]
|
|
507
|
+
)
|
|
508
|
+
serialized = handler(self)
|
|
509
|
+
m = {}
|
|
510
|
+
|
|
511
|
+
for n, f in type(self).model_fields.items():
|
|
512
|
+
k = f.alias or n
|
|
513
|
+
val = serialized.get(k)
|
|
514
|
+
|
|
515
|
+
if val != UNSET_SENTINEL:
|
|
516
|
+
if val is not None or k not in optional_fields:
|
|
517
|
+
m[k] = val
|
|
518
|
+
|
|
519
|
+
return m
|
|
520
|
+
|
|
377
521
|
|
|
378
522
|
ParseChunkingRequestTypedDict = TypeAliasType(
|
|
379
523
|
"ParseChunkingRequestTypedDict",
|
|
@@ -418,30 +562,14 @@ class ParseMetadata(BaseModel):
|
|
|
418
562
|
|
|
419
563
|
@model_serializer(mode="wrap")
|
|
420
564
|
def serialize_model(self, handler):
|
|
421
|
-
optional_fields = []
|
|
422
|
-
nullable_fields = ["start_index", "end_index", "token_count"]
|
|
423
|
-
null_default_fields = []
|
|
424
|
-
|
|
425
565
|
serialized = handler(self)
|
|
426
|
-
|
|
427
566
|
m = {}
|
|
428
567
|
|
|
429
568
|
for n, f in type(self).model_fields.items():
|
|
430
569
|
k = f.alias or n
|
|
431
570
|
val = serialized.get(k)
|
|
432
|
-
serialized.pop(k, None)
|
|
433
|
-
|
|
434
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
435
|
-
is_set = (
|
|
436
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
437
|
-
or k in null_default_fields
|
|
438
|
-
) # pylint: disable=no-member
|
|
439
571
|
|
|
440
|
-
if val
|
|
441
|
-
m[k] = val
|
|
442
|
-
elif val != UNSET_SENTINEL and (
|
|
443
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
444
|
-
):
|
|
572
|
+
if val != UNSET_SENTINEL:
|
|
445
573
|
m[k] = val
|
|
446
574
|
|
|
447
575
|
return m
|
|
@@ -464,6 +592,22 @@ class Chunks(BaseModel):
|
|
|
464
592
|
|
|
465
593
|
metadata: Optional[ParseMetadata] = None
|
|
466
594
|
|
|
595
|
+
@model_serializer(mode="wrap")
|
|
596
|
+
def serialize_model(self, handler):
|
|
597
|
+
optional_fields = set(["metadata"])
|
|
598
|
+
serialized = handler(self)
|
|
599
|
+
m = {}
|
|
600
|
+
|
|
601
|
+
for n, f in type(self).model_fields.items():
|
|
602
|
+
k = f.alias or n
|
|
603
|
+
val = serialized.get(k)
|
|
604
|
+
|
|
605
|
+
if val != UNSET_SENTINEL:
|
|
606
|
+
if val is not None or k not in optional_fields:
|
|
607
|
+
m[k] = val
|
|
608
|
+
|
|
609
|
+
return m
|
|
610
|
+
|
|
467
611
|
|
|
468
612
|
class ParseResponseBodyTypedDict(TypedDict):
|
|
469
613
|
r"""Text successfully chunked"""
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from orq_ai_sdk.types import BaseModel
|
|
4
|
+
from orq_ai_sdk.types import BaseModel, UNSET_SENTINEL
|
|
5
5
|
import pydantic
|
|
6
|
+
from pydantic import model_serializer
|
|
6
7
|
from typing import Any, Dict, Literal, Optional
|
|
7
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
8
9
|
|
|
@@ -39,7 +40,7 @@ class PartReasoningPart(BaseModel):
|
|
|
39
40
|
r"""The reasoning or thought process behind the response. Used for chain-of-thought or extended thinking."""
|
|
40
41
|
|
|
41
42
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
42
|
-
"
|
|
43
|
+
"reasoning_01kg2rzq2dydmjdrwpfgrfzpy3"
|
|
43
44
|
)
|
|
44
45
|
r"""Unique identifier for the part. Format: reasoning_{ulid} (e.g., reasoning_01hxyz...)"""
|
|
45
46
|
|
|
@@ -49,6 +50,22 @@ class PartReasoningPart(BaseModel):
|
|
|
49
50
|
signature: Optional[str] = None
|
|
50
51
|
r"""Optional cryptographic signature to verify the authenticity and integrity of the reasoning content"""
|
|
51
52
|
|
|
53
|
+
@model_serializer(mode="wrap")
|
|
54
|
+
def serialize_model(self, handler):
|
|
55
|
+
optional_fields = set(["_id", "metadata", "signature"])
|
|
56
|
+
serialized = handler(self)
|
|
57
|
+
m = {}
|
|
58
|
+
|
|
59
|
+
for n, f in type(self).model_fields.items():
|
|
60
|
+
k = f.alias or n
|
|
61
|
+
val = serialized.get(k)
|
|
62
|
+
|
|
63
|
+
if val != UNSET_SENTINEL:
|
|
64
|
+
if val is not None or k not in optional_fields:
|
|
65
|
+
m[k] = val
|
|
66
|
+
|
|
67
|
+
return m
|
|
68
|
+
|
|
52
69
|
|
|
53
70
|
class PartDoneEventDataTypedDict(TypedDict):
|
|
54
71
|
part_id: str
|
|
@@ -0,0 +1,408 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from orq_ai_sdk.types import (
|
|
5
|
+
BaseModel,
|
|
6
|
+
Nullable,
|
|
7
|
+
OptionalNullable,
|
|
8
|
+
UNSET,
|
|
9
|
+
UNSET_SENTINEL,
|
|
10
|
+
)
|
|
11
|
+
from orq_ai_sdk.utils import get_discriminator
|
|
12
|
+
from pydantic import Discriminator, Tag, model_serializer
|
|
13
|
+
from typing import List, Literal, Optional, Union
|
|
14
|
+
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
PostV2RouterOcrDocumentType = Literal["image_url",]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ImageURL2TypedDict(TypedDict):
|
|
21
|
+
r"""URL of the image to process"""
|
|
22
|
+
|
|
23
|
+
url: str
|
|
24
|
+
detail: NotRequired[str]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ImageURL2(BaseModel):
|
|
28
|
+
r"""URL of the image to process"""
|
|
29
|
+
|
|
30
|
+
url: str
|
|
31
|
+
|
|
32
|
+
detail: Optional[str] = None
|
|
33
|
+
|
|
34
|
+
@model_serializer(mode="wrap")
|
|
35
|
+
def serialize_model(self, handler):
|
|
36
|
+
optional_fields = set(["detail"])
|
|
37
|
+
serialized = handler(self)
|
|
38
|
+
m = {}
|
|
39
|
+
|
|
40
|
+
for n, f in type(self).model_fields.items():
|
|
41
|
+
k = f.alias or n
|
|
42
|
+
val = serialized.get(k)
|
|
43
|
+
|
|
44
|
+
if val != UNSET_SENTINEL:
|
|
45
|
+
if val is not None or k not in optional_fields:
|
|
46
|
+
m[k] = val
|
|
47
|
+
|
|
48
|
+
return m
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
DocumentImageURLTypedDict = TypeAliasType(
|
|
52
|
+
"DocumentImageURLTypedDict", Union[ImageURL2TypedDict, str]
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
DocumentImageURL = TypeAliasType("DocumentImageURL", Union[ImageURL2, str])
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class Document2TypedDict(TypedDict):
|
|
60
|
+
type: PostV2RouterOcrDocumentType
|
|
61
|
+
image_url: DocumentImageURLTypedDict
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class Document2(BaseModel):
|
|
65
|
+
type: PostV2RouterOcrDocumentType
|
|
66
|
+
|
|
67
|
+
image_url: DocumentImageURL
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
DocumentType = Literal["document_url",]
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class Document1TypedDict(TypedDict):
|
|
74
|
+
type: DocumentType
|
|
75
|
+
document_url: str
|
|
76
|
+
r"""URL of the document to process"""
|
|
77
|
+
document_name: NotRequired[str]
|
|
78
|
+
r"""The name of the document"""
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class Document1(BaseModel):
|
|
82
|
+
type: DocumentType
|
|
83
|
+
|
|
84
|
+
document_url: str
|
|
85
|
+
r"""URL of the document to process"""
|
|
86
|
+
|
|
87
|
+
document_name: Optional[str] = None
|
|
88
|
+
r"""The name of the document"""
|
|
89
|
+
|
|
90
|
+
@model_serializer(mode="wrap")
|
|
91
|
+
def serialize_model(self, handler):
|
|
92
|
+
optional_fields = set(["document_name"])
|
|
93
|
+
serialized = handler(self)
|
|
94
|
+
m = {}
|
|
95
|
+
|
|
96
|
+
for n, f in type(self).model_fields.items():
|
|
97
|
+
k = f.alias or n
|
|
98
|
+
val = serialized.get(k)
|
|
99
|
+
|
|
100
|
+
if val != UNSET_SENTINEL:
|
|
101
|
+
if val is not None or k not in optional_fields:
|
|
102
|
+
m[k] = val
|
|
103
|
+
|
|
104
|
+
return m
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
DocumentTypedDict = TypeAliasType(
|
|
108
|
+
"DocumentTypedDict", Union[Document2TypedDict, Document1TypedDict]
|
|
109
|
+
)
|
|
110
|
+
r"""Document to run OCR on. Can be a DocumentURLChunk or ImageURLChunk."""
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
Document = Annotated[
|
|
114
|
+
Union[
|
|
115
|
+
Annotated[Document1, Tag("document_url")],
|
|
116
|
+
Annotated[Document2, Tag("image_url")],
|
|
117
|
+
],
|
|
118
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
119
|
+
]
|
|
120
|
+
r"""Document to run OCR on. Can be a DocumentURLChunk or ImageURLChunk."""
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class OcrSettingsTypedDict(TypedDict):
|
|
124
|
+
r"""Optional settings for the OCR run"""
|
|
125
|
+
|
|
126
|
+
include_image_base64: NotRequired[Nullable[bool]]
|
|
127
|
+
r"""Whether to include image Base64 in the response. Null for default."""
|
|
128
|
+
max_images_to_include: NotRequired[int]
|
|
129
|
+
r"""Maximum number of images to extract. Null for no limit."""
|
|
130
|
+
image_min_size: NotRequired[int]
|
|
131
|
+
r"""Minimum height and width of image to extract. Null for no minimum."""
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class OcrSettings(BaseModel):
|
|
135
|
+
r"""Optional settings for the OCR run"""
|
|
136
|
+
|
|
137
|
+
include_image_base64: OptionalNullable[bool] = UNSET
|
|
138
|
+
r"""Whether to include image Base64 in the response. Null for default."""
|
|
139
|
+
|
|
140
|
+
max_images_to_include: Optional[int] = None
|
|
141
|
+
r"""Maximum number of images to extract. Null for no limit."""
|
|
142
|
+
|
|
143
|
+
image_min_size: Optional[int] = None
|
|
144
|
+
r"""Minimum height and width of image to extract. Null for no minimum."""
|
|
145
|
+
|
|
146
|
+
@model_serializer(mode="wrap")
|
|
147
|
+
def serialize_model(self, handler):
|
|
148
|
+
optional_fields = set(
|
|
149
|
+
["include_image_base64", "max_images_to_include", "image_min_size"]
|
|
150
|
+
)
|
|
151
|
+
nullable_fields = set(["include_image_base64"])
|
|
152
|
+
serialized = handler(self)
|
|
153
|
+
m = {}
|
|
154
|
+
|
|
155
|
+
for n, f in type(self).model_fields.items():
|
|
156
|
+
k = f.alias or n
|
|
157
|
+
val = serialized.get(k)
|
|
158
|
+
is_nullable_and_explicitly_set = (
|
|
159
|
+
k in nullable_fields
|
|
160
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
if val != UNSET_SENTINEL:
|
|
164
|
+
if (
|
|
165
|
+
val is not None
|
|
166
|
+
or k not in optional_fields
|
|
167
|
+
or is_nullable_and_explicitly_set
|
|
168
|
+
):
|
|
169
|
+
m[k] = val
|
|
170
|
+
|
|
171
|
+
return m
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
class PostV2RouterOcrRequestBodyTypedDict(TypedDict):
|
|
175
|
+
r"""input"""
|
|
176
|
+
|
|
177
|
+
model: str
|
|
178
|
+
r"""ID of the model to use for OCR."""
|
|
179
|
+
document: DocumentTypedDict
|
|
180
|
+
r"""Document to run OCR on. Can be a DocumentURLChunk or ImageURLChunk."""
|
|
181
|
+
pages: NotRequired[Nullable[List[int]]]
|
|
182
|
+
r"""Specific pages to process. Can be a single number, range, or list. Starts from 0. Null for all pages."""
|
|
183
|
+
ocr_settings: NotRequired[OcrSettingsTypedDict]
|
|
184
|
+
r"""Optional settings for the OCR run"""
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
class PostV2RouterOcrRequestBody(BaseModel):
|
|
188
|
+
r"""input"""
|
|
189
|
+
|
|
190
|
+
model: str
|
|
191
|
+
r"""ID of the model to use for OCR."""
|
|
192
|
+
|
|
193
|
+
document: Document
|
|
194
|
+
r"""Document to run OCR on. Can be a DocumentURLChunk or ImageURLChunk."""
|
|
195
|
+
|
|
196
|
+
pages: OptionalNullable[List[int]] = UNSET
|
|
197
|
+
r"""Specific pages to process. Can be a single number, range, or list. Starts from 0. Null for all pages."""
|
|
198
|
+
|
|
199
|
+
ocr_settings: Optional[OcrSettings] = None
|
|
200
|
+
r"""Optional settings for the OCR run"""
|
|
201
|
+
|
|
202
|
+
@model_serializer(mode="wrap")
|
|
203
|
+
def serialize_model(self, handler):
|
|
204
|
+
optional_fields = set(["pages", "ocr_settings"])
|
|
205
|
+
nullable_fields = set(["pages"])
|
|
206
|
+
serialized = handler(self)
|
|
207
|
+
m = {}
|
|
208
|
+
|
|
209
|
+
for n, f in type(self).model_fields.items():
|
|
210
|
+
k = f.alias or n
|
|
211
|
+
val = serialized.get(k)
|
|
212
|
+
is_nullable_and_explicitly_set = (
|
|
213
|
+
k in nullable_fields
|
|
214
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
if val != UNSET_SENTINEL:
|
|
218
|
+
if (
|
|
219
|
+
val is not None
|
|
220
|
+
or k not in optional_fields
|
|
221
|
+
or is_nullable_and_explicitly_set
|
|
222
|
+
):
|
|
223
|
+
m[k] = val
|
|
224
|
+
|
|
225
|
+
return m
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
class PostV2RouterOcrImagesTypedDict(TypedDict):
|
|
229
|
+
id: str
|
|
230
|
+
r"""The id of the image"""
|
|
231
|
+
image_base64: NotRequired[Nullable[str]]
|
|
232
|
+
r"""The base64 encoded image"""
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class PostV2RouterOcrImages(BaseModel):
|
|
236
|
+
id: str
|
|
237
|
+
r"""The id of the image"""
|
|
238
|
+
|
|
239
|
+
image_base64: OptionalNullable[str] = UNSET
|
|
240
|
+
r"""The base64 encoded image"""
|
|
241
|
+
|
|
242
|
+
@model_serializer(mode="wrap")
|
|
243
|
+
def serialize_model(self, handler):
|
|
244
|
+
optional_fields = set(["image_base64"])
|
|
245
|
+
nullable_fields = set(["image_base64"])
|
|
246
|
+
serialized = handler(self)
|
|
247
|
+
m = {}
|
|
248
|
+
|
|
249
|
+
for n, f in type(self).model_fields.items():
|
|
250
|
+
k = f.alias or n
|
|
251
|
+
val = serialized.get(k)
|
|
252
|
+
is_nullable_and_explicitly_set = (
|
|
253
|
+
k in nullable_fields
|
|
254
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
if val != UNSET_SENTINEL:
|
|
258
|
+
if (
|
|
259
|
+
val is not None
|
|
260
|
+
or k not in optional_fields
|
|
261
|
+
or is_nullable_and_explicitly_set
|
|
262
|
+
):
|
|
263
|
+
m[k] = val
|
|
264
|
+
|
|
265
|
+
return m
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
class DimensionsTypedDict(TypedDict):
|
|
269
|
+
r"""The dimensions of the PDF Page's screenshot image"""
|
|
270
|
+
|
|
271
|
+
dpi: int
|
|
272
|
+
r"""Dots per inch of the page-image"""
|
|
273
|
+
height: int
|
|
274
|
+
r"""Height of the image in pixels"""
|
|
275
|
+
width: int
|
|
276
|
+
r"""Width of the image in pixels"""
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
class Dimensions(BaseModel):
|
|
280
|
+
r"""The dimensions of the PDF Page's screenshot image"""
|
|
281
|
+
|
|
282
|
+
dpi: int
|
|
283
|
+
r"""Dots per inch of the page-image"""
|
|
284
|
+
|
|
285
|
+
height: int
|
|
286
|
+
r"""Height of the image in pixels"""
|
|
287
|
+
|
|
288
|
+
width: int
|
|
289
|
+
r"""Width of the image in pixels"""
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
class PagesTypedDict(TypedDict):
|
|
293
|
+
index: float
|
|
294
|
+
r"""The page index in a pdf document starting from 0"""
|
|
295
|
+
markdown: str
|
|
296
|
+
r"""The markdown string response of the page"""
|
|
297
|
+
images: List[PostV2RouterOcrImagesTypedDict]
|
|
298
|
+
dimensions: NotRequired[Nullable[DimensionsTypedDict]]
|
|
299
|
+
r"""The dimensions of the PDF Page's screenshot image"""
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
class Pages(BaseModel):
|
|
303
|
+
index: float
|
|
304
|
+
r"""The page index in a pdf document starting from 0"""
|
|
305
|
+
|
|
306
|
+
markdown: str
|
|
307
|
+
r"""The markdown string response of the page"""
|
|
308
|
+
|
|
309
|
+
images: List[PostV2RouterOcrImages]
|
|
310
|
+
|
|
311
|
+
dimensions: OptionalNullable[Dimensions] = UNSET
|
|
312
|
+
r"""The dimensions of the PDF Page's screenshot image"""
|
|
313
|
+
|
|
314
|
+
@model_serializer(mode="wrap")
|
|
315
|
+
def serialize_model(self, handler):
|
|
316
|
+
optional_fields = set(["dimensions"])
|
|
317
|
+
nullable_fields = set(["dimensions"])
|
|
318
|
+
serialized = handler(self)
|
|
319
|
+
m = {}
|
|
320
|
+
|
|
321
|
+
for n, f in type(self).model_fields.items():
|
|
322
|
+
k = f.alias or n
|
|
323
|
+
val = serialized.get(k)
|
|
324
|
+
is_nullable_and_explicitly_set = (
|
|
325
|
+
k in nullable_fields
|
|
326
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
if val != UNSET_SENTINEL:
|
|
330
|
+
if (
|
|
331
|
+
val is not None
|
|
332
|
+
or k not in optional_fields
|
|
333
|
+
or is_nullable_and_explicitly_set
|
|
334
|
+
):
|
|
335
|
+
m[k] = val
|
|
336
|
+
|
|
337
|
+
return m
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
PostV2RouterOcrUsageType = Literal["tokens",]
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
class Usage2TypedDict(TypedDict):
|
|
344
|
+
r"""The usage information for the OCR run counted as tokens processed"""
|
|
345
|
+
|
|
346
|
+
type: PostV2RouterOcrUsageType
|
|
347
|
+
tokens_processed: int
|
|
348
|
+
r"""The number of tokens processed"""
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
class Usage2(BaseModel):
|
|
352
|
+
r"""The usage information for the OCR run counted as tokens processed"""
|
|
353
|
+
|
|
354
|
+
type: PostV2RouterOcrUsageType
|
|
355
|
+
|
|
356
|
+
tokens_processed: int
|
|
357
|
+
r"""The number of tokens processed"""
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
UsageType = Literal["pages",]
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
class Usage1TypedDict(TypedDict):
|
|
364
|
+
r"""The usage information for the OCR run counted as pages processed"""
|
|
365
|
+
|
|
366
|
+
type: UsageType
|
|
367
|
+
pages_processed: int
|
|
368
|
+
r"""The number of pages processed"""
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
class Usage1(BaseModel):
|
|
372
|
+
r"""The usage information for the OCR run counted as pages processed"""
|
|
373
|
+
|
|
374
|
+
type: UsageType
|
|
375
|
+
|
|
376
|
+
pages_processed: int
|
|
377
|
+
r"""The number of pages processed"""
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
PostV2RouterOcrUsageTypedDict = TypeAliasType(
|
|
381
|
+
"PostV2RouterOcrUsageTypedDict", Union[Usage1TypedDict, Usage2TypedDict]
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
PostV2RouterOcrUsage = Annotated[
|
|
386
|
+
Union[Annotated[Usage1, Tag("pages")], Annotated[Usage2, Tag("tokens")]],
|
|
387
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
388
|
+
]
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
class PostV2RouterOcrResponseBodyTypedDict(TypedDict):
|
|
392
|
+
r"""Represents an OCR response from the API."""
|
|
393
|
+
|
|
394
|
+
model: str
|
|
395
|
+
r"""ID of the model used for OCR."""
|
|
396
|
+
pages: List[PagesTypedDict]
|
|
397
|
+
usage: PostV2RouterOcrUsageTypedDict
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
class PostV2RouterOcrResponseBody(BaseModel):
|
|
401
|
+
r"""Represents an OCR response from the API."""
|
|
402
|
+
|
|
403
|
+
model: str
|
|
404
|
+
r"""ID of the model used for OCR."""
|
|
405
|
+
|
|
406
|
+
pages: List[Pages]
|
|
407
|
+
|
|
408
|
+
usage: PostV2RouterOcrUsage
|