orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +332 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +258 -0
  10. orq_ai_sdk/embeddings.py +238 -0
  11. orq_ai_sdk/generations.py +272 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5341 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1922 -384
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1375 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +1890 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +384 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +504 -0
  41. orq_ai_sdk/models/createimageop.py +208 -117
  42. orq_ai_sdk/models/createimagevariationop.py +486 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2748 -1252
  49. orq_ai_sdk/models/creatererankop.py +416 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +316 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +562 -0
  54. orq_ai_sdk/models/createtranslationop.py +540 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1690 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1673 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1670 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +793 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1684 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1451 -197
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1428 -195
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1951 -404
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2844 -1450
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +660 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +232 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +251 -0
  158. orq_ai_sdk/transcriptions.py +326 -0
  159. orq_ai_sdk/translations.py +298 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +254 -0
  162. orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
  163. orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
  167. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
@@ -0,0 +1,540 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .publiccontact import PublicContact, PublicContactTypedDict
5
+ from .publicidentity import PublicIdentity, PublicIdentityTypedDict
6
+ from dataclasses import dataclass, field
7
+ import httpx
8
+ import io
9
+ from orq_ai_sdk.models import OrqError
10
+ from orq_ai_sdk.types import BaseModel, Nullable, UNSET_SENTINEL
11
+ from orq_ai_sdk.utils import FieldMetadata, MultipartFormMetadata
12
+ import pydantic
13
+ from pydantic import model_serializer
14
+ from typing import IO, List, Literal, Optional, Union
15
+ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
16
+
17
+
18
+ CreateTranslationResponseFormat = Literal[
19
+ "json",
20
+ "text",
21
+ "srt",
22
+ "verbose_json",
23
+ "vtt",
24
+ ]
25
+ r"""The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt."""
26
+
27
+
28
+ CreateTranslationTimestampsGranularity = Literal[
29
+ "none",
30
+ "word",
31
+ "character",
32
+ ]
33
+ r"""The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word."""
34
+
35
+
36
+ class CreateTranslationFallbacksTypedDict(TypedDict):
37
+ model: str
38
+ r"""Fallback model identifier"""
39
+
40
+
41
+ class CreateTranslationFallbacks(BaseModel):
42
+ model: str
43
+ r"""Fallback model identifier"""
44
+
45
+
46
+ class CreateTranslationRetryTypedDict(TypedDict):
47
+ r"""Retry configuration for the request"""
48
+
49
+ count: NotRequired[float]
50
+ r"""Number of retry attempts (1-5)"""
51
+ on_codes: NotRequired[List[float]]
52
+ r"""HTTP status codes that trigger retry logic"""
53
+
54
+
55
+ class CreateTranslationRetry(BaseModel):
56
+ r"""Retry configuration for the request"""
57
+
58
+ count: Optional[float] = 3
59
+ r"""Number of retry attempts (1-5)"""
60
+
61
+ on_codes: Optional[List[float]] = None
62
+ r"""HTTP status codes that trigger retry logic"""
63
+
64
+ @model_serializer(mode="wrap")
65
+ def serialize_model(self, handler):
66
+ optional_fields = set(["count", "on_codes"])
67
+ serialized = handler(self)
68
+ m = {}
69
+
70
+ for n, f in type(self).model_fields.items():
71
+ k = f.alias or n
72
+ val = serialized.get(k)
73
+
74
+ if val != UNSET_SENTINEL:
75
+ if val is not None or k not in optional_fields:
76
+ m[k] = val
77
+
78
+ return m
79
+
80
+
81
+ CreateTranslationLoadBalancerType = Literal["weight_based",]
82
+
83
+
84
+ class CreateTranslationLoadBalancerModelsTypedDict(TypedDict):
85
+ model: str
86
+ r"""Model identifier for load balancing"""
87
+ weight: NotRequired[float]
88
+ r"""Weight assigned to this model for load balancing"""
89
+
90
+
91
+ class CreateTranslationLoadBalancerModels(BaseModel):
92
+ model: str
93
+ r"""Model identifier for load balancing"""
94
+
95
+ weight: Optional[float] = 0.5
96
+ r"""Weight assigned to this model for load balancing"""
97
+
98
+ @model_serializer(mode="wrap")
99
+ def serialize_model(self, handler):
100
+ optional_fields = set(["weight"])
101
+ serialized = handler(self)
102
+ m = {}
103
+
104
+ for n, f in type(self).model_fields.items():
105
+ k = f.alias or n
106
+ val = serialized.get(k)
107
+
108
+ if val != UNSET_SENTINEL:
109
+ if val is not None or k not in optional_fields:
110
+ m[k] = val
111
+
112
+ return m
113
+
114
+
115
+ class CreateTranslationLoadBalancer1TypedDict(TypedDict):
116
+ type: CreateTranslationLoadBalancerType
117
+ models: List[CreateTranslationLoadBalancerModelsTypedDict]
118
+
119
+
120
+ class CreateTranslationLoadBalancer1(BaseModel):
121
+ type: CreateTranslationLoadBalancerType
122
+
123
+ models: List[CreateTranslationLoadBalancerModels]
124
+
125
+
126
+ CreateTranslationLoadBalancerTypedDict = CreateTranslationLoadBalancer1TypedDict
127
+ r"""Array of models with weights for load balancing requests"""
128
+
129
+
130
+ CreateTranslationLoadBalancer = CreateTranslationLoadBalancer1
131
+ r"""Array of models with weights for load balancing requests"""
132
+
133
+
134
+ class CreateTranslationTimeoutTypedDict(TypedDict):
135
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
136
+
137
+ call_timeout: float
138
+ r"""Timeout value in milliseconds"""
139
+
140
+
141
+ class CreateTranslationTimeout(BaseModel):
142
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
143
+
144
+ call_timeout: float
145
+ r"""Timeout value in milliseconds"""
146
+
147
+
148
+ class CreateTranslationOrqTypedDict(TypedDict):
149
+ name: NotRequired[str]
150
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
151
+ fallbacks: NotRequired[List[CreateTranslationFallbacksTypedDict]]
152
+ r"""Array of fallback models to use if primary model fails"""
153
+ retry: NotRequired[CreateTranslationRetryTypedDict]
154
+ r"""Retry configuration for the request"""
155
+ identity: NotRequired[PublicIdentityTypedDict]
156
+ r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
157
+ contact: NotRequired[PublicContactTypedDict]
158
+ r"""@deprecated Use identity instead. Information about the contact making the request."""
159
+ load_balancer: NotRequired[CreateTranslationLoadBalancerTypedDict]
160
+ r"""Array of models with weights for load balancing requests"""
161
+ timeout: NotRequired[CreateTranslationTimeoutTypedDict]
162
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
163
+
164
+
165
+ class CreateTranslationOrq(BaseModel):
166
+ name: Optional[str] = None
167
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
168
+
169
+ fallbacks: Optional[List[CreateTranslationFallbacks]] = None
170
+ r"""Array of fallback models to use if primary model fails"""
171
+
172
+ retry: Optional[CreateTranslationRetry] = None
173
+ r"""Retry configuration for the request"""
174
+
175
+ identity: Optional[PublicIdentity] = None
176
+ r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
177
+
178
+ contact: Annotated[
179
+ Optional[PublicContact],
180
+ pydantic.Field(
181
+ deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
182
+ ),
183
+ ] = None
184
+ r"""@deprecated Use identity instead. Information about the contact making the request."""
185
+
186
+ load_balancer: Optional[CreateTranslationLoadBalancer] = None
187
+ r"""Array of models with weights for load balancing requests"""
188
+
189
+ timeout: Optional[CreateTranslationTimeout] = None
190
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
191
+
192
+ @model_serializer(mode="wrap")
193
+ def serialize_model(self, handler):
194
+ optional_fields = set(
195
+ [
196
+ "name",
197
+ "fallbacks",
198
+ "retry",
199
+ "identity",
200
+ "contact",
201
+ "load_balancer",
202
+ "timeout",
203
+ ]
204
+ )
205
+ serialized = handler(self)
206
+ m = {}
207
+
208
+ for n, f in type(self).model_fields.items():
209
+ k = f.alias or n
210
+ val = serialized.get(k)
211
+
212
+ if val != UNSET_SENTINEL:
213
+ if val is not None or k not in optional_fields:
214
+ m[k] = val
215
+
216
+ return m
217
+
218
+
219
+ class CreateTranslationFileTypedDict(TypedDict):
220
+ file_name: str
221
+ content: Union[bytes, IO[bytes], io.BufferedReader]
222
+ content_type: NotRequired[str]
223
+
224
+
225
+ class CreateTranslationFile(BaseModel):
226
+ file_name: Annotated[
227
+ str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True)
228
+ ]
229
+
230
+ content: Annotated[
231
+ Union[bytes, IO[bytes], io.BufferedReader],
232
+ pydantic.Field(alias=""),
233
+ FieldMetadata(multipart=MultipartFormMetadata(content=True)),
234
+ ]
235
+
236
+ content_type: Annotated[
237
+ Optional[str],
238
+ pydantic.Field(alias="Content-Type"),
239
+ FieldMetadata(multipart=True),
240
+ ] = None
241
+
242
+ @model_serializer(mode="wrap")
243
+ def serialize_model(self, handler):
244
+ optional_fields = set(["contentType"])
245
+ serialized = handler(self)
246
+ m = {}
247
+
248
+ for n, f in type(self).model_fields.items():
249
+ k = f.alias or n
250
+ val = serialized.get(k)
251
+
252
+ if val != UNSET_SENTINEL:
253
+ if val is not None or k not in optional_fields:
254
+ m[k] = val
255
+
256
+ return m
257
+
258
+
259
+ class CreateTranslationRequestBodyTypedDict(TypedDict):
260
+ r"""Translates audio into English."""
261
+
262
+ model: str
263
+ r"""ID of the model to use"""
264
+ prompt: NotRequired[str]
265
+ r"""An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language."""
266
+ enable_logging: NotRequired[bool]
267
+ r"""When enable_logging is set to false, zero retention mode is used. This disables history features like request stitching and is only available to enterprise customers."""
268
+ diarize: NotRequired[bool]
269
+ r"""Whether to annotate which speaker is currently talking in the uploaded file."""
270
+ response_format: NotRequired[CreateTranslationResponseFormat]
271
+ r"""The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt."""
272
+ tag_audio_events: NotRequired[bool]
273
+ r"""Whether to tag audio events like (laughter), (footsteps), etc. in the transcription."""
274
+ num_speakers: NotRequired[float]
275
+ r"""The maximum amount of speakers talking in the uploaded file. Helps with predicting who speaks when, the maximum is 32."""
276
+ timestamps_granularity: NotRequired[CreateTranslationTimestampsGranularity]
277
+ r"""The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word."""
278
+ temperature: NotRequired[float]
279
+ r"""The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit."""
280
+ orq: NotRequired[CreateTranslationOrqTypedDict]
281
+ file: NotRequired[CreateTranslationFileTypedDict]
282
+ r"""The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm."""
283
+
284
+
285
+ class CreateTranslationRequestBody(BaseModel):
286
+ r"""Translates audio into English."""
287
+
288
+ model: Annotated[str, FieldMetadata(multipart=True)]
289
+ r"""ID of the model to use"""
290
+
291
+ prompt: Annotated[Optional[str], FieldMetadata(multipart=True)] = None
292
+ r"""An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language."""
293
+
294
+ enable_logging: Annotated[Optional[bool], FieldMetadata(multipart=True)] = True
295
+ r"""When enable_logging is set to false, zero retention mode is used. This disables history features like request stitching and is only available to enterprise customers."""
296
+
297
+ diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False
298
+ r"""Whether to annotate which speaker is currently talking in the uploaded file."""
299
+
300
+ response_format: Annotated[
301
+ Optional[CreateTranslationResponseFormat], FieldMetadata(multipart=True)
302
+ ] = None
303
+ r"""The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt."""
304
+
305
+ tag_audio_events: Annotated[Optional[bool], FieldMetadata(multipart=True)] = True
306
+ r"""Whether to tag audio events like (laughter), (footsteps), etc. in the transcription."""
307
+
308
+ num_speakers: Annotated[Optional[float], FieldMetadata(multipart=True)] = None
309
+ r"""The maximum amount of speakers talking in the uploaded file. Helps with predicting who speaks when, the maximum is 32."""
310
+
311
+ timestamps_granularity: Annotated[
312
+ Optional[CreateTranslationTimestampsGranularity], FieldMetadata(multipart=True)
313
+ ] = "word"
314
+ r"""The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word."""
315
+
316
+ temperature: Annotated[Optional[float], FieldMetadata(multipart=True)] = None
317
+ r"""The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit."""
318
+
319
+ orq: Annotated[
320
+ Optional[CreateTranslationOrq],
321
+ FieldMetadata(multipart=MultipartFormMetadata(json=True)),
322
+ ] = None
323
+
324
+ file: Annotated[
325
+ Optional[CreateTranslationFile],
326
+ FieldMetadata(multipart=MultipartFormMetadata(file=True)),
327
+ ] = None
328
+ r"""The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm."""
329
+
330
+ @model_serializer(mode="wrap")
331
+ def serialize_model(self, handler):
332
+ optional_fields = set(
333
+ [
334
+ "prompt",
335
+ "enable_logging",
336
+ "diarize",
337
+ "response_format",
338
+ "tag_audio_events",
339
+ "num_speakers",
340
+ "timestamps_granularity",
341
+ "temperature",
342
+ "orq",
343
+ "file",
344
+ ]
345
+ )
346
+ serialized = handler(self)
347
+ m = {}
348
+
349
+ for n, f in type(self).model_fields.items():
350
+ k = f.alias or n
351
+ val = serialized.get(k)
352
+
353
+ if val != UNSET_SENTINEL:
354
+ if val is not None or k not in optional_fields:
355
+ m[k] = val
356
+
357
+ return m
358
+
359
+
360
+ class CreateTranslationErrorTypedDict(TypedDict):
361
+ message: str
362
+ type: str
363
+ param: Nullable[str]
364
+ code: str
365
+
366
+
367
+ class CreateTranslationError(BaseModel):
368
+ message: str
369
+
370
+ type: str
371
+
372
+ param: Nullable[str]
373
+
374
+ code: str
375
+
376
+ @model_serializer(mode="wrap")
377
+ def serialize_model(self, handler):
378
+ serialized = handler(self)
379
+ m = {}
380
+
381
+ for n, f in type(self).model_fields.items():
382
+ k = f.alias or n
383
+ val = serialized.get(k)
384
+
385
+ if val != UNSET_SENTINEL:
386
+ m[k] = val
387
+
388
+ return m
389
+
390
+
391
+ class CreateTranslationRouterAudioTranslationsResponseBodyData(BaseModel):
392
+ error: CreateTranslationError
393
+
394
+
395
+ @dataclass(unsafe_hash=True)
396
+ class CreateTranslationRouterAudioTranslationsResponseBody(OrqError):
397
+ r"""Returns validation error"""
398
+
399
+ data: CreateTranslationRouterAudioTranslationsResponseBodyData = field(hash=False)
400
+
401
+ def __init__(
402
+ self,
403
+ data: CreateTranslationRouterAudioTranslationsResponseBodyData,
404
+ raw_response: httpx.Response,
405
+ body: Optional[str] = None,
406
+ ):
407
+ fallback = body or raw_response.text
408
+ message = str(data.error.message) or fallback
409
+ super().__init__(message, raw_response, body)
410
+ object.__setattr__(self, "data", data)
411
+
412
+
413
+ class ResponseBodyWordsTypedDict(TypedDict):
414
+ word: NotRequired[str]
415
+ start: NotRequired[float]
416
+ end: NotRequired[float]
417
+
418
+
419
+ class ResponseBodyWords(BaseModel):
420
+ word: Optional[str] = None
421
+
422
+ start: Optional[float] = None
423
+
424
+ end: Optional[float] = None
425
+
426
+ @model_serializer(mode="wrap")
427
+ def serialize_model(self, handler):
428
+ optional_fields = set(["word", "start", "end"])
429
+ serialized = handler(self)
430
+ m = {}
431
+
432
+ for n, f in type(self).model_fields.items():
433
+ k = f.alias or n
434
+ val = serialized.get(k)
435
+
436
+ if val != UNSET_SENTINEL:
437
+ if val is not None or k not in optional_fields:
438
+ m[k] = val
439
+
440
+ return m
441
+
442
+
443
+ class ResponseBodySegmentsTypedDict(TypedDict):
444
+ id: float
445
+ seek: float
446
+ start: float
447
+ end: float
448
+ text: str
449
+ tokens: List[float]
450
+ temperature: float
451
+ avg_logprob: float
452
+ compression_ratio: float
453
+ no_speech_prob: float
454
+
455
+
456
+ class ResponseBodySegments(BaseModel):
457
+ id: float
458
+
459
+ seek: float
460
+
461
+ start: float
462
+
463
+ end: float
464
+
465
+ text: str
466
+
467
+ tokens: List[float]
468
+
469
+ temperature: float
470
+
471
+ avg_logprob: float
472
+
473
+ compression_ratio: float
474
+
475
+ no_speech_prob: float
476
+
477
+
478
+ class CreateTranslationResponseBody2TypedDict(TypedDict):
479
+ text: str
480
+ task: NotRequired[str]
481
+ language: NotRequired[str]
482
+ duration: NotRequired[float]
483
+ words: NotRequired[List[ResponseBodyWordsTypedDict]]
484
+ segments: NotRequired[List[ResponseBodySegmentsTypedDict]]
485
+
486
+
487
+ class CreateTranslationResponseBody2(BaseModel):
488
+ text: str
489
+
490
+ task: Optional[str] = None
491
+
492
+ language: Optional[str] = None
493
+
494
+ duration: Optional[float] = None
495
+
496
+ words: Optional[List[ResponseBodyWords]] = None
497
+
498
+ segments: Optional[List[ResponseBodySegments]] = None
499
+
500
+ @model_serializer(mode="wrap")
501
+ def serialize_model(self, handler):
502
+ optional_fields = set(["task", "language", "duration", "words", "segments"])
503
+ serialized = handler(self)
504
+ m = {}
505
+
506
+ for n, f in type(self).model_fields.items():
507
+ k = f.alias or n
508
+ val = serialized.get(k)
509
+
510
+ if val != UNSET_SENTINEL:
511
+ if val is not None or k not in optional_fields:
512
+ m[k] = val
513
+
514
+ return m
515
+
516
+
517
+ class CreateTranslationResponseBody1TypedDict(TypedDict):
518
+ text: str
519
+
520
+
521
+ class CreateTranslationResponseBody1(BaseModel):
522
+ text: str
523
+
524
+
525
+ CreateTranslationResponseBodyTypedDict = TypeAliasType(
526
+ "CreateTranslationResponseBodyTypedDict",
527
+ Union[
528
+ CreateTranslationResponseBody1TypedDict,
529
+ CreateTranslationResponseBody2TypedDict,
530
+ str,
531
+ ],
532
+ )
533
+ r"""Returns the translated text"""
534
+
535
+
536
+ CreateTranslationResponseBody = TypeAliasType(
537
+ "CreateTranslationResponseBody",
538
+ Union[CreateTranslationResponseBody1, CreateTranslationResponseBody2, str],
539
+ )
540
+ r"""Returns the translated text"""
@@ -1,7 +1,8 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from orq_ai_sdk.types import BaseModel
4
+ from orq_ai_sdk.types import BaseModel, UNSET_SENTINEL
5
+ from pydantic import model_serializer
5
6
  from typing import Any, Dict, Literal, Optional
6
7
  from typing_extensions import NotRequired, TypedDict
7
8
 
@@ -25,3 +26,19 @@ class DataPart(BaseModel):
25
26
  data: Dict[str, Any]
26
27
 
27
28
  metadata: Optional[Dict[str, Any]] = None
29
+
30
+ @model_serializer(mode="wrap")
31
+ def serialize_model(self, handler):
32
+ optional_fields = set(["metadata"])
33
+ serialized = handler(self)
34
+ m = {}
35
+
36
+ for n, f in type(self).model_fields.items():
37
+ k = f.alias or n
38
+ val = serialized.get(k)
39
+
40
+ if val != UNSET_SENTINEL:
41
+ if val is not None or k not in optional_fields:
42
+ m[k] = val
43
+
44
+ return m
@@ -1,8 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from orq_ai_sdk.types import BaseModel
4
+ from orq_ai_sdk.types import BaseModel, UNSET_SENTINEL
5
5
  from orq_ai_sdk.utils import FieldMetadata, PathParamMetadata, RequestMetadata
6
+ from pydantic import model_serializer
6
7
  from typing import List, Optional
7
8
  from typing_extensions import Annotated, NotRequired, TypedDict
8
9
 
@@ -41,6 +42,22 @@ class DeleteChunksRequest(BaseModel):
41
42
  FieldMetadata(request=RequestMetadata(media_type="application/json")),
42
43
  ] = None
43
44
 
45
+ @model_serializer(mode="wrap")
46
+ def serialize_model(self, handler):
47
+ optional_fields = set(["RequestBody"])
48
+ serialized = handler(self)
49
+ m = {}
50
+
51
+ for n, f in type(self).model_fields.items():
52
+ k = f.alias or n
53
+ val = serialized.get(k)
54
+
55
+ if val != UNSET_SENTINEL:
56
+ if val is not None or k not in optional_fields:
57
+ m[k] = val
58
+
59
+ return m
60
+
44
61
 
45
62
  class DeleteChunksResponseBodyTypedDict(TypedDict):
46
63
  r"""Chunks deletion result"""
@@ -59,3 +76,19 @@ class DeleteChunksResponseBody(BaseModel):
59
76
 
60
77
  failed_ids: Optional[List[str]] = None
61
78
  r"""Array of chunk IDs that failed to delete"""
79
+
80
+ @model_serializer(mode="wrap")
81
+ def serialize_model(self, handler):
82
+ optional_fields = set(["failed_ids"])
83
+ serialized = handler(self)
84
+ m = {}
85
+
86
+ for n, f in type(self).model_fields.items():
87
+ k = f.alias or n
88
+ val = serialized.get(k)
89
+
90
+ if val != UNSET_SENTINEL:
91
+ if val is not None or k not in optional_fields:
92
+ m[k] = val
93
+
94
+ return m
@@ -10,32 +10,32 @@ from typing import Optional
10
10
  from typing_extensions import Annotated, TypedDict
11
11
 
12
12
 
13
- class DeleteContactRequestTypedDict(TypedDict):
13
+ class DeleteIdentityRequestTypedDict(TypedDict):
14
14
  id: str
15
- r"""Contact ID or External ID"""
15
+ r"""Identity ID or External ID"""
16
16
 
17
17
 
18
- class DeleteContactRequest(BaseModel):
18
+ class DeleteIdentityRequest(BaseModel):
19
19
  id: Annotated[
20
20
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
21
21
  ]
22
- r"""Contact ID or External ID"""
22
+ r"""Identity ID or External ID"""
23
23
 
24
24
 
25
- class DeleteContactResponseBodyData(BaseModel):
25
+ class DeleteIdentityResponseBodyData(BaseModel):
26
26
  error: str
27
27
  r"""Error message"""
28
28
 
29
29
 
30
30
  @dataclass(unsafe_hash=True)
31
- class DeleteContactResponseBody(OrqError):
32
- r"""Contact not found"""
31
+ class DeleteIdentityResponseBody(OrqError):
32
+ r"""Identity not found"""
33
33
 
34
- data: DeleteContactResponseBodyData = field(hash=False)
34
+ data: DeleteIdentityResponseBodyData = field(hash=False)
35
35
 
36
36
  def __init__(
37
37
  self,
38
- data: DeleteContactResponseBodyData,
38
+ data: DeleteIdentityResponseBodyData,
39
39
  raw_response: httpx.Response,
40
40
  body: Optional[str] = None,
41
41
  ):
@@ -1,8 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from dataclasses import dataclass, field
5
+ import httpx
6
+ from orq_ai_sdk.models import OrqError
4
7
  from orq_ai_sdk.types import BaseModel
5
8
  from orq_ai_sdk.utils import FieldMetadata, PathParamMetadata
9
+ from typing import Optional
6
10
  from typing_extensions import Annotated, TypedDict
7
11
 
8
12
 
@@ -16,3 +20,25 @@ class DeletePromptRequest(BaseModel):
16
20
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
17
21
  ]
18
22
  r"""Unique identifier of the prompt"""
23
+
24
+
25
+ class DeletePromptResponseBodyData(BaseModel):
26
+ message: str
27
+
28
+
29
+ @dataclass(unsafe_hash=True)
30
+ class DeletePromptResponseBody(OrqError):
31
+ r"""Prompt not found."""
32
+
33
+ data: DeletePromptResponseBodyData = field(hash=False)
34
+
35
+ def __init__(
36
+ self,
37
+ data: DeletePromptResponseBodyData,
38
+ raw_response: httpx.Response,
39
+ body: Optional[str] = None,
40
+ ):
41
+ fallback = body or raw_response.text
42
+ message = str(data.message) or fallback
43
+ super().__init__(message, raw_response, body)
44
+ object.__setattr__(self, "data", data)