orq-ai-sdk 4.2.0rc49__py3-none-any.whl → 4.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/chat.py +22 -0
  5. orq_ai_sdk/completions.py +438 -0
  6. orq_ai_sdk/contacts.py +43 -886
  7. orq_ai_sdk/deployments.py +61 -0
  8. orq_ai_sdk/edits.py +364 -0
  9. orq_ai_sdk/embeddings.py +344 -0
  10. orq_ai_sdk/generations.py +370 -0
  11. orq_ai_sdk/images.py +28 -0
  12. orq_ai_sdk/models/__init__.py +3839 -424
  13. orq_ai_sdk/models/conversationresponse.py +1 -1
  14. orq_ai_sdk/models/conversationwithmessagesresponse.py +1 -1
  15. orq_ai_sdk/models/createagentrequestop.py +768 -12
  16. orq_ai_sdk/models/createagentresponse.py +68 -2
  17. orq_ai_sdk/models/createchatcompletionop.py +538 -313
  18. orq_ai_sdk/models/createcompletionop.py +2078 -0
  19. orq_ai_sdk/models/createcontactop.py +5 -10
  20. orq_ai_sdk/models/createconversationop.py +1 -1
  21. orq_ai_sdk/models/createconversationresponseop.py +2 -2
  22. orq_ai_sdk/models/createdatasetitemop.py +4 -4
  23. orq_ai_sdk/models/createdatasetop.py +1 -1
  24. orq_ai_sdk/models/createdatasourceop.py +1 -1
  25. orq_ai_sdk/models/createembeddingop.py +579 -0
  26. orq_ai_sdk/models/createevalop.py +14 -14
  27. orq_ai_sdk/models/createidentityop.py +1 -1
  28. orq_ai_sdk/models/createimageeditop.py +715 -0
  29. orq_ai_sdk/models/createimageop.py +228 -82
  30. orq_ai_sdk/models/createimagevariationop.py +706 -0
  31. orq_ai_sdk/models/creatememoryop.py +4 -2
  32. orq_ai_sdk/models/createmoderationop.py +521 -0
  33. orq_ai_sdk/models/createpromptop.py +375 -6
  34. orq_ai_sdk/models/creatererankop.py +608 -0
  35. orq_ai_sdk/models/createresponseop.py +2567 -0
  36. orq_ai_sdk/models/createspeechop.py +466 -0
  37. orq_ai_sdk/models/createtoolop.py +6 -6
  38. orq_ai_sdk/models/createtranscriptionop.py +732 -0
  39. orq_ai_sdk/models/createtranslationop.py +702 -0
  40. orq_ai_sdk/models/deploymentgetconfigop.py +17 -7
  41. orq_ai_sdk/models/deploymentsop.py +1 -0
  42. orq_ai_sdk/models/deploymentstreamop.py +7 -0
  43. orq_ai_sdk/models/filegetop.py +1 -1
  44. orq_ai_sdk/models/filelistop.py +1 -1
  45. orq_ai_sdk/models/fileuploadop.py +1 -1
  46. orq_ai_sdk/models/generateconversationnameop.py +1 -1
  47. orq_ai_sdk/models/getallmemoriesop.py +4 -2
  48. orq_ai_sdk/models/getallpromptsop.py +188 -3
  49. orq_ai_sdk/models/getalltoolsop.py +6 -6
  50. orq_ai_sdk/models/getevalsop.py +17 -17
  51. orq_ai_sdk/models/getonepromptop.py +188 -3
  52. orq_ai_sdk/models/getpromptversionop.py +188 -3
  53. orq_ai_sdk/models/invokedeploymentrequest.py +11 -4
  54. orq_ai_sdk/models/listagentsop.py +372 -0
  55. orq_ai_sdk/models/listdatasetdatapointsop.py +4 -4
  56. orq_ai_sdk/models/listdatasetsop.py +1 -1
  57. orq_ai_sdk/models/listdatasourcesop.py +1 -1
  58. orq_ai_sdk/models/listidentitiesop.py +1 -1
  59. orq_ai_sdk/models/listmodelsop.py +1 -0
  60. orq_ai_sdk/models/listpromptversionsop.py +188 -3
  61. orq_ai_sdk/models/partdoneevent.py +1 -1
  62. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  63. orq_ai_sdk/models/publiccontact.py +9 -3
  64. orq_ai_sdk/models/publicidentity.py +62 -0
  65. orq_ai_sdk/models/reasoningpart.py +1 -1
  66. orq_ai_sdk/models/responsedoneevent.py +14 -11
  67. orq_ai_sdk/models/retrieveagentrequestop.py +382 -0
  68. orq_ai_sdk/models/retrievedatapointop.py +4 -4
  69. orq_ai_sdk/models/retrievedatasetop.py +1 -1
  70. orq_ai_sdk/models/retrievedatasourceop.py +1 -1
  71. orq_ai_sdk/models/retrieveidentityop.py +1 -1
  72. orq_ai_sdk/models/retrievememoryop.py +4 -2
  73. orq_ai_sdk/models/retrievetoolop.py +6 -6
  74. orq_ai_sdk/models/runagentop.py +379 -9
  75. orq_ai_sdk/models/streamrunagentop.py +385 -9
  76. orq_ai_sdk/models/updateagentop.py +770 -12
  77. orq_ai_sdk/models/updateconversationop.py +1 -1
  78. orq_ai_sdk/models/updatedatapointop.py +4 -4
  79. orq_ai_sdk/models/updatedatasetop.py +1 -1
  80. orq_ai_sdk/models/updatedatasourceop.py +1 -1
  81. orq_ai_sdk/models/updateevalop.py +14 -14
  82. orq_ai_sdk/models/updateidentityop.py +1 -1
  83. orq_ai_sdk/models/updatememoryop.py +4 -2
  84. orq_ai_sdk/models/updatepromptop.py +375 -6
  85. orq_ai_sdk/models/updatetoolop.py +7 -7
  86. orq_ai_sdk/moderations.py +218 -0
  87. orq_ai_sdk/orq_completions.py +666 -0
  88. orq_ai_sdk/orq_responses.py +398 -0
  89. orq_ai_sdk/rerank.py +330 -0
  90. orq_ai_sdk/router.py +89 -641
  91. orq_ai_sdk/speech.py +333 -0
  92. orq_ai_sdk/transcriptions.py +416 -0
  93. orq_ai_sdk/translations.py +384 -0
  94. orq_ai_sdk/variations.py +364 -0
  95. orq_ai_sdk-4.2.12.dist-info/METADATA +888 -0
  96. {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.12.dist-info}/RECORD +98 -75
  97. {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.12.dist-info}/WHEEL +1 -1
  98. orq_ai_sdk/models/deletecontactop.py +0 -44
  99. orq_ai_sdk/models/listcontactsop.py +0 -265
  100. orq_ai_sdk/models/retrievecontactop.py +0 -142
  101. orq_ai_sdk/models/updatecontactop.py +0 -233
  102. orq_ai_sdk-4.2.0rc49.dist-info/METADATA +0 -788
  103. {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.12.dist-info}/top_level.txt +0 -0
@@ -111,6 +111,7 @@ ListPromptVersionsModelType = Literal[
111
111
  "tts",
112
112
  "stt",
113
113
  "rerank",
114
+ "ocr",
114
115
  "moderation",
115
116
  "vision",
116
117
  ]
@@ -717,7 +718,7 @@ ListPromptVersionsContent = TypeAliasType(
717
718
  r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
718
719
 
719
720
 
720
- ListPromptVersionsType = Literal["function",]
721
+ ListPromptVersionsPromptsType = Literal["function",]
721
722
 
722
723
 
723
724
  class ListPromptVersionsFunctionTypedDict(TypedDict):
@@ -734,14 +735,14 @@ class ListPromptVersionsFunction(BaseModel):
734
735
 
735
736
 
736
737
  class ListPromptVersionsToolCallsTypedDict(TypedDict):
737
- type: ListPromptVersionsType
738
+ type: ListPromptVersionsPromptsType
738
739
  function: ListPromptVersionsFunctionTypedDict
739
740
  id: NotRequired[str]
740
741
  index: NotRequired[float]
741
742
 
742
743
 
743
744
  class ListPromptVersionsToolCalls(BaseModel):
744
- type: ListPromptVersionsType
745
+ type: ListPromptVersionsPromptsType
745
746
 
746
747
  function: ListPromptVersionsFunction
747
748
 
@@ -1253,6 +1254,154 @@ class ListPromptVersionsGuardrails(BaseModel):
1253
1254
  r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1254
1255
 
1255
1256
 
1257
+ class ListPromptVersionsFallbacksTypedDict(TypedDict):
1258
+ model: str
1259
+ r"""Fallback model identifier"""
1260
+
1261
+
1262
+ class ListPromptVersionsFallbacks(BaseModel):
1263
+ model: str
1264
+ r"""Fallback model identifier"""
1265
+
1266
+
1267
+ class ListPromptVersionsRetryTypedDict(TypedDict):
1268
+ r"""Retry configuration for the request"""
1269
+
1270
+ count: NotRequired[float]
1271
+ r"""Number of retry attempts (1-5)"""
1272
+ on_codes: NotRequired[List[float]]
1273
+ r"""HTTP status codes that trigger retry logic"""
1274
+
1275
+
1276
+ class ListPromptVersionsRetry(BaseModel):
1277
+ r"""Retry configuration for the request"""
1278
+
1279
+ count: Optional[float] = 3
1280
+ r"""Number of retry attempts (1-5)"""
1281
+
1282
+ on_codes: Optional[List[float]] = None
1283
+ r"""HTTP status codes that trigger retry logic"""
1284
+
1285
+ @model_serializer(mode="wrap")
1286
+ def serialize_model(self, handler):
1287
+ optional_fields = set(["count", "on_codes"])
1288
+ serialized = handler(self)
1289
+ m = {}
1290
+
1291
+ for n, f in type(self).model_fields.items():
1292
+ k = f.alias or n
1293
+ val = serialized.get(k)
1294
+
1295
+ if val != UNSET_SENTINEL:
1296
+ if val is not None or k not in optional_fields:
1297
+ m[k] = val
1298
+
1299
+ return m
1300
+
1301
+
1302
+ ListPromptVersionsType = Literal["exact_match",]
1303
+
1304
+
1305
+ class ListPromptVersionsCacheTypedDict(TypedDict):
1306
+ r"""Cache configuration for the request."""
1307
+
1308
+ type: ListPromptVersionsType
1309
+ ttl: NotRequired[float]
1310
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1311
+
1312
+
1313
+ class ListPromptVersionsCache(BaseModel):
1314
+ r"""Cache configuration for the request."""
1315
+
1316
+ type: ListPromptVersionsType
1317
+
1318
+ ttl: Optional[float] = 1800
1319
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1320
+
1321
+ @model_serializer(mode="wrap")
1322
+ def serialize_model(self, handler):
1323
+ optional_fields = set(["ttl"])
1324
+ serialized = handler(self)
1325
+ m = {}
1326
+
1327
+ for n, f in type(self).model_fields.items():
1328
+ k = f.alias or n
1329
+ val = serialized.get(k)
1330
+
1331
+ if val != UNSET_SENTINEL:
1332
+ if val is not None or k not in optional_fields:
1333
+ m[k] = val
1334
+
1335
+ return m
1336
+
1337
+
1338
+ ListPromptVersionsLoadBalancerType = Literal["weight_based",]
1339
+
1340
+
1341
+ class ListPromptVersionsLoadBalancerModelsTypedDict(TypedDict):
1342
+ model: str
1343
+ r"""Model identifier for load balancing"""
1344
+ weight: NotRequired[float]
1345
+ r"""Weight assigned to this model for load balancing"""
1346
+
1347
+
1348
+ class ListPromptVersionsLoadBalancerModels(BaseModel):
1349
+ model: str
1350
+ r"""Model identifier for load balancing"""
1351
+
1352
+ weight: Optional[float] = 0.5
1353
+ r"""Weight assigned to this model for load balancing"""
1354
+
1355
+ @model_serializer(mode="wrap")
1356
+ def serialize_model(self, handler):
1357
+ optional_fields = set(["weight"])
1358
+ serialized = handler(self)
1359
+ m = {}
1360
+
1361
+ for n, f in type(self).model_fields.items():
1362
+ k = f.alias or n
1363
+ val = serialized.get(k)
1364
+
1365
+ if val != UNSET_SENTINEL:
1366
+ if val is not None or k not in optional_fields:
1367
+ m[k] = val
1368
+
1369
+ return m
1370
+
1371
+
1372
+ class ListPromptVersionsLoadBalancer1TypedDict(TypedDict):
1373
+ type: ListPromptVersionsLoadBalancerType
1374
+ models: List[ListPromptVersionsLoadBalancerModelsTypedDict]
1375
+
1376
+
1377
+ class ListPromptVersionsLoadBalancer1(BaseModel):
1378
+ type: ListPromptVersionsLoadBalancerType
1379
+
1380
+ models: List[ListPromptVersionsLoadBalancerModels]
1381
+
1382
+
1383
+ ListPromptVersionsLoadBalancerTypedDict = ListPromptVersionsLoadBalancer1TypedDict
1384
+ r"""Load balancer configuration for the request."""
1385
+
1386
+
1387
+ ListPromptVersionsLoadBalancer = ListPromptVersionsLoadBalancer1
1388
+ r"""Load balancer configuration for the request."""
1389
+
1390
+
1391
+ class ListPromptVersionsTimeoutTypedDict(TypedDict):
1392
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1393
+
1394
+ call_timeout: float
1395
+ r"""Timeout value in milliseconds"""
1396
+
1397
+
1398
+ class ListPromptVersionsTimeout(BaseModel):
1399
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1400
+
1401
+ call_timeout: float
1402
+ r"""Timeout value in milliseconds"""
1403
+
1404
+
1256
1405
  ListPromptVersionsMessagesPromptsResponse200Role = Literal["tool",]
1257
1406
  r"""The role of the messages author, in this case tool."""
1258
1407
 
@@ -1831,6 +1980,8 @@ ListPromptVersionsPromptsMessages = Annotated[
1831
1980
  class ListPromptVersionsPromptFieldTypedDict(TypedDict):
1832
1981
  r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
1833
1982
 
1983
+ name: NotRequired[str]
1984
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
1834
1985
  audio: NotRequired[Nullable[ListPromptVersionsAudioTypedDict]]
1835
1986
  r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
1836
1987
  frequency_penalty: NotRequired[Nullable[float]]
@@ -1885,6 +2036,16 @@ class ListPromptVersionsPromptFieldTypedDict(TypedDict):
1885
2036
  r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
1886
2037
  guardrails: NotRequired[List[ListPromptVersionsGuardrailsTypedDict]]
1887
2038
  r"""A list of guardrails to apply to the request."""
2039
+ fallbacks: NotRequired[List[ListPromptVersionsFallbacksTypedDict]]
2040
+ r"""Array of fallback models to use if primary model fails"""
2041
+ retry: NotRequired[ListPromptVersionsRetryTypedDict]
2042
+ r"""Retry configuration for the request"""
2043
+ cache: NotRequired[ListPromptVersionsCacheTypedDict]
2044
+ r"""Cache configuration for the request."""
2045
+ load_balancer: NotRequired[ListPromptVersionsLoadBalancerTypedDict]
2046
+ r"""Load balancer configuration for the request."""
2047
+ timeout: NotRequired[ListPromptVersionsTimeoutTypedDict]
2048
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1888
2049
  messages: NotRequired[List[ListPromptVersionsPromptsMessagesTypedDict]]
1889
2050
  r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
1890
2051
  model: NotRequired[Nullable[str]]
@@ -1895,6 +2056,9 @@ class ListPromptVersionsPromptFieldTypedDict(TypedDict):
1895
2056
  class ListPromptVersionsPromptField(BaseModel):
1896
2057
  r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
1897
2058
 
2059
+ name: Optional[str] = None
2060
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
2061
+
1898
2062
  audio: OptionalNullable[ListPromptVersionsAudio] = UNSET
1899
2063
  r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
1900
2064
 
@@ -1971,6 +2135,21 @@ class ListPromptVersionsPromptField(BaseModel):
1971
2135
  guardrails: Optional[List[ListPromptVersionsGuardrails]] = None
1972
2136
  r"""A list of guardrails to apply to the request."""
1973
2137
 
2138
+ fallbacks: Optional[List[ListPromptVersionsFallbacks]] = None
2139
+ r"""Array of fallback models to use if primary model fails"""
2140
+
2141
+ retry: Optional[ListPromptVersionsRetry] = None
2142
+ r"""Retry configuration for the request"""
2143
+
2144
+ cache: Optional[ListPromptVersionsCache] = None
2145
+ r"""Cache configuration for the request."""
2146
+
2147
+ load_balancer: Optional[ListPromptVersionsLoadBalancer] = None
2148
+ r"""Load balancer configuration for the request."""
2149
+
2150
+ timeout: Optional[ListPromptVersionsTimeout] = None
2151
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
2152
+
1974
2153
  messages: Optional[List[ListPromptVersionsPromptsMessages]] = None
1975
2154
  r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
1976
2155
 
@@ -1983,6 +2162,7 @@ class ListPromptVersionsPromptField(BaseModel):
1983
2162
  def serialize_model(self, handler):
1984
2163
  optional_fields = set(
1985
2164
  [
2165
+ "name",
1986
2166
  "audio",
1987
2167
  "frequency_penalty",
1988
2168
  "max_tokens",
@@ -2005,6 +2185,11 @@ class ListPromptVersionsPromptField(BaseModel):
2005
2185
  "parallel_tool_calls",
2006
2186
  "modalities",
2007
2187
  "guardrails",
2188
+ "fallbacks",
2189
+ "retry",
2190
+ "cache",
2191
+ "load_balancer",
2192
+ "timeout",
2008
2193
  "messages",
2009
2194
  "model",
2010
2195
  "version",
@@ -40,7 +40,7 @@ class PartReasoningPart(BaseModel):
40
40
  r"""The reasoning or thought process behind the response. Used for chain-of-thought or extended thinking."""
41
41
 
42
42
  id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
43
- "reasoning_01kfds3avvptfe7wsz639qyhmq"
43
+ "reasoning_01kfzbgfh56wjv31gw3712s28r"
44
44
  )
45
45
  r"""Unique identifier for the part. Format: reasoning_{ulid} (e.g., reasoning_01hxyz...)"""
46
46
 
@@ -0,0 +1,408 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from orq_ai_sdk.types import (
5
+ BaseModel,
6
+ Nullable,
7
+ OptionalNullable,
8
+ UNSET,
9
+ UNSET_SENTINEL,
10
+ )
11
+ from orq_ai_sdk.utils import get_discriminator
12
+ from pydantic import Discriminator, Tag, model_serializer
13
+ from typing import List, Literal, Optional, Union
14
+ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
15
+
16
+
17
+ PostV2RouterOcrDocumentType = Literal["image_url",]
18
+
19
+
20
+ class ImageURL2TypedDict(TypedDict):
21
+ r"""URL of the image to process"""
22
+
23
+ url: str
24
+ detail: NotRequired[str]
25
+
26
+
27
+ class ImageURL2(BaseModel):
28
+ r"""URL of the image to process"""
29
+
30
+ url: str
31
+
32
+ detail: Optional[str] = None
33
+
34
+ @model_serializer(mode="wrap")
35
+ def serialize_model(self, handler):
36
+ optional_fields = set(["detail"])
37
+ serialized = handler(self)
38
+ m = {}
39
+
40
+ for n, f in type(self).model_fields.items():
41
+ k = f.alias or n
42
+ val = serialized.get(k)
43
+
44
+ if val != UNSET_SENTINEL:
45
+ if val is not None or k not in optional_fields:
46
+ m[k] = val
47
+
48
+ return m
49
+
50
+
51
+ DocumentImageURLTypedDict = TypeAliasType(
52
+ "DocumentImageURLTypedDict", Union[ImageURL2TypedDict, str]
53
+ )
54
+
55
+
56
+ DocumentImageURL = TypeAliasType("DocumentImageURL", Union[ImageURL2, str])
57
+
58
+
59
+ class Document2TypedDict(TypedDict):
60
+ type: PostV2RouterOcrDocumentType
61
+ image_url: DocumentImageURLTypedDict
62
+
63
+
64
+ class Document2(BaseModel):
65
+ type: PostV2RouterOcrDocumentType
66
+
67
+ image_url: DocumentImageURL
68
+
69
+
70
+ DocumentType = Literal["document_url",]
71
+
72
+
73
+ class Document1TypedDict(TypedDict):
74
+ type: DocumentType
75
+ document_url: str
76
+ r"""URL of the document to process"""
77
+ document_name: NotRequired[str]
78
+ r"""The name of the document"""
79
+
80
+
81
+ class Document1(BaseModel):
82
+ type: DocumentType
83
+
84
+ document_url: str
85
+ r"""URL of the document to process"""
86
+
87
+ document_name: Optional[str] = None
88
+ r"""The name of the document"""
89
+
90
+ @model_serializer(mode="wrap")
91
+ def serialize_model(self, handler):
92
+ optional_fields = set(["document_name"])
93
+ serialized = handler(self)
94
+ m = {}
95
+
96
+ for n, f in type(self).model_fields.items():
97
+ k = f.alias or n
98
+ val = serialized.get(k)
99
+
100
+ if val != UNSET_SENTINEL:
101
+ if val is not None or k not in optional_fields:
102
+ m[k] = val
103
+
104
+ return m
105
+
106
+
107
+ DocumentTypedDict = TypeAliasType(
108
+ "DocumentTypedDict", Union[Document2TypedDict, Document1TypedDict]
109
+ )
110
+ r"""Document to run OCR on. Can be a DocumentURLChunk or ImageURLChunk."""
111
+
112
+
113
+ Document = Annotated[
114
+ Union[
115
+ Annotated[Document1, Tag("document_url")],
116
+ Annotated[Document2, Tag("image_url")],
117
+ ],
118
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
119
+ ]
120
+ r"""Document to run OCR on. Can be a DocumentURLChunk or ImageURLChunk."""
121
+
122
+
123
+ class OcrSettingsTypedDict(TypedDict):
124
+ r"""Optional settings for the OCR run"""
125
+
126
+ include_image_base64: NotRequired[Nullable[bool]]
127
+ r"""Whether to include image Base64 in the response. Null for default."""
128
+ max_images_to_include: NotRequired[int]
129
+ r"""Maximum number of images to extract. Null for no limit."""
130
+ image_min_size: NotRequired[int]
131
+ r"""Minimum height and width of image to extract. Null for no minimum."""
132
+
133
+
134
+ class OcrSettings(BaseModel):
135
+ r"""Optional settings for the OCR run"""
136
+
137
+ include_image_base64: OptionalNullable[bool] = UNSET
138
+ r"""Whether to include image Base64 in the response. Null for default."""
139
+
140
+ max_images_to_include: Optional[int] = None
141
+ r"""Maximum number of images to extract. Null for no limit."""
142
+
143
+ image_min_size: Optional[int] = None
144
+ r"""Minimum height and width of image to extract. Null for no minimum."""
145
+
146
+ @model_serializer(mode="wrap")
147
+ def serialize_model(self, handler):
148
+ optional_fields = set(
149
+ ["include_image_base64", "max_images_to_include", "image_min_size"]
150
+ )
151
+ nullable_fields = set(["include_image_base64"])
152
+ serialized = handler(self)
153
+ m = {}
154
+
155
+ for n, f in type(self).model_fields.items():
156
+ k = f.alias or n
157
+ val = serialized.get(k)
158
+ is_nullable_and_explicitly_set = (
159
+ k in nullable_fields
160
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
161
+ )
162
+
163
+ if val != UNSET_SENTINEL:
164
+ if (
165
+ val is not None
166
+ or k not in optional_fields
167
+ or is_nullable_and_explicitly_set
168
+ ):
169
+ m[k] = val
170
+
171
+ return m
172
+
173
+
174
+ class PostV2RouterOcrRequestBodyTypedDict(TypedDict):
175
+ r"""input"""
176
+
177
+ model: str
178
+ r"""ID of the model to use for OCR."""
179
+ document: DocumentTypedDict
180
+ r"""Document to run OCR on. Can be a DocumentURLChunk or ImageURLChunk."""
181
+ pages: NotRequired[Nullable[List[int]]]
182
+ r"""Specific pages to process. Can be a single number, range, or list. Starts from 0. Null for all pages."""
183
+ ocr_settings: NotRequired[OcrSettingsTypedDict]
184
+ r"""Optional settings for the OCR run"""
185
+
186
+
187
+ class PostV2RouterOcrRequestBody(BaseModel):
188
+ r"""input"""
189
+
190
+ model: str
191
+ r"""ID of the model to use for OCR."""
192
+
193
+ document: Document
194
+ r"""Document to run OCR on. Can be a DocumentURLChunk or ImageURLChunk."""
195
+
196
+ pages: OptionalNullable[List[int]] = UNSET
197
+ r"""Specific pages to process. Can be a single number, range, or list. Starts from 0. Null for all pages."""
198
+
199
+ ocr_settings: Optional[OcrSettings] = None
200
+ r"""Optional settings for the OCR run"""
201
+
202
+ @model_serializer(mode="wrap")
203
+ def serialize_model(self, handler):
204
+ optional_fields = set(["pages", "ocr_settings"])
205
+ nullable_fields = set(["pages"])
206
+ serialized = handler(self)
207
+ m = {}
208
+
209
+ for n, f in type(self).model_fields.items():
210
+ k = f.alias or n
211
+ val = serialized.get(k)
212
+ is_nullable_and_explicitly_set = (
213
+ k in nullable_fields
214
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
215
+ )
216
+
217
+ if val != UNSET_SENTINEL:
218
+ if (
219
+ val is not None
220
+ or k not in optional_fields
221
+ or is_nullable_and_explicitly_set
222
+ ):
223
+ m[k] = val
224
+
225
+ return m
226
+
227
+
228
+ class PostV2RouterOcrImagesTypedDict(TypedDict):
229
+ id: str
230
+ r"""The id of the image"""
231
+ image_base64: NotRequired[Nullable[str]]
232
+ r"""The base64 encoded image"""
233
+
234
+
235
+ class PostV2RouterOcrImages(BaseModel):
236
+ id: str
237
+ r"""The id of the image"""
238
+
239
+ image_base64: OptionalNullable[str] = UNSET
240
+ r"""The base64 encoded image"""
241
+
242
+ @model_serializer(mode="wrap")
243
+ def serialize_model(self, handler):
244
+ optional_fields = set(["image_base64"])
245
+ nullable_fields = set(["image_base64"])
246
+ serialized = handler(self)
247
+ m = {}
248
+
249
+ for n, f in type(self).model_fields.items():
250
+ k = f.alias or n
251
+ val = serialized.get(k)
252
+ is_nullable_and_explicitly_set = (
253
+ k in nullable_fields
254
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
255
+ )
256
+
257
+ if val != UNSET_SENTINEL:
258
+ if (
259
+ val is not None
260
+ or k not in optional_fields
261
+ or is_nullable_and_explicitly_set
262
+ ):
263
+ m[k] = val
264
+
265
+ return m
266
+
267
+
268
+ class DimensionsTypedDict(TypedDict):
269
+ r"""The dimensions of the PDF Page's screenshot image"""
270
+
271
+ dpi: int
272
+ r"""Dots per inch of the page-image"""
273
+ height: int
274
+ r"""Height of the image in pixels"""
275
+ width: int
276
+ r"""Width of the image in pixels"""
277
+
278
+
279
+ class Dimensions(BaseModel):
280
+ r"""The dimensions of the PDF Page's screenshot image"""
281
+
282
+ dpi: int
283
+ r"""Dots per inch of the page-image"""
284
+
285
+ height: int
286
+ r"""Height of the image in pixels"""
287
+
288
+ width: int
289
+ r"""Width of the image in pixels"""
290
+
291
+
292
+ class PagesTypedDict(TypedDict):
293
+ index: float
294
+ r"""The page index in a pdf document starting from 0"""
295
+ markdown: str
296
+ r"""The markdown string response of the page"""
297
+ images: List[PostV2RouterOcrImagesTypedDict]
298
+ dimensions: NotRequired[Nullable[DimensionsTypedDict]]
299
+ r"""The dimensions of the PDF Page's screenshot image"""
300
+
301
+
302
+ class Pages(BaseModel):
303
+ index: float
304
+ r"""The page index in a pdf document starting from 0"""
305
+
306
+ markdown: str
307
+ r"""The markdown string response of the page"""
308
+
309
+ images: List[PostV2RouterOcrImages]
310
+
311
+ dimensions: OptionalNullable[Dimensions] = UNSET
312
+ r"""The dimensions of the PDF Page's screenshot image"""
313
+
314
+ @model_serializer(mode="wrap")
315
+ def serialize_model(self, handler):
316
+ optional_fields = set(["dimensions"])
317
+ nullable_fields = set(["dimensions"])
318
+ serialized = handler(self)
319
+ m = {}
320
+
321
+ for n, f in type(self).model_fields.items():
322
+ k = f.alias or n
323
+ val = serialized.get(k)
324
+ is_nullable_and_explicitly_set = (
325
+ k in nullable_fields
326
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
327
+ )
328
+
329
+ if val != UNSET_SENTINEL:
330
+ if (
331
+ val is not None
332
+ or k not in optional_fields
333
+ or is_nullable_and_explicitly_set
334
+ ):
335
+ m[k] = val
336
+
337
+ return m
338
+
339
+
340
+ PostV2RouterOcrUsageType = Literal["tokens",]
341
+
342
+
343
+ class Usage2TypedDict(TypedDict):
344
+ r"""The usage information for the OCR run counted as tokens processed"""
345
+
346
+ type: PostV2RouterOcrUsageType
347
+ tokens_processed: int
348
+ r"""The number of tokens processed"""
349
+
350
+
351
+ class Usage2(BaseModel):
352
+ r"""The usage information for the OCR run counted as tokens processed"""
353
+
354
+ type: PostV2RouterOcrUsageType
355
+
356
+ tokens_processed: int
357
+ r"""The number of tokens processed"""
358
+
359
+
360
+ UsageType = Literal["pages",]
361
+
362
+
363
+ class Usage1TypedDict(TypedDict):
364
+ r"""The usage information for the OCR run counted as pages processed"""
365
+
366
+ type: UsageType
367
+ pages_processed: int
368
+ r"""The number of pages processed"""
369
+
370
+
371
+ class Usage1(BaseModel):
372
+ r"""The usage information for the OCR run counted as pages processed"""
373
+
374
+ type: UsageType
375
+
376
+ pages_processed: int
377
+ r"""The number of pages processed"""
378
+
379
+
380
+ PostV2RouterOcrUsageTypedDict = TypeAliasType(
381
+ "PostV2RouterOcrUsageTypedDict", Union[Usage1TypedDict, Usage2TypedDict]
382
+ )
383
+
384
+
385
+ PostV2RouterOcrUsage = Annotated[
386
+ Union[Annotated[Usage1, Tag("pages")], Annotated[Usage2, Tag("tokens")]],
387
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
388
+ ]
389
+
390
+
391
+ class PostV2RouterOcrResponseBodyTypedDict(TypedDict):
392
+ r"""Represents an OCR response from the API."""
393
+
394
+ model: str
395
+ r"""ID of the model used for OCR."""
396
+ pages: List[PagesTypedDict]
397
+ usage: PostV2RouterOcrUsageTypedDict
398
+
399
+
400
+ class PostV2RouterOcrResponseBody(BaseModel):
401
+ r"""Represents an OCR response from the API."""
402
+
403
+ model: str
404
+ r"""ID of the model used for OCR."""
405
+
406
+ pages: List[Pages]
407
+
408
+ usage: PostV2RouterOcrUsage