orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +332 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +258 -0
  10. orq_ai_sdk/embeddings.py +238 -0
  11. orq_ai_sdk/generations.py +272 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5341 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1922 -384
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1375 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +1890 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +384 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +504 -0
  41. orq_ai_sdk/models/createimageop.py +208 -117
  42. orq_ai_sdk/models/createimagevariationop.py +486 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2748 -1252
  49. orq_ai_sdk/models/creatererankop.py +416 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +316 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +562 -0
  54. orq_ai_sdk/models/createtranslationop.py +540 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1690 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1673 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1670 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +793 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1684 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1451 -197
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1428 -195
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1951 -404
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2844 -1450
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +660 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +232 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +251 -0
  158. orq_ai_sdk/transcriptions.py +326 -0
  159. orq_ai_sdk/translations.py +298 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +254 -0
  162. orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
  163. orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
  167. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
orq_ai_sdk/sdk.py CHANGED
@@ -26,6 +26,7 @@ if TYPE_CHECKING:
26
26
  from orq_ai_sdk.evaluators import Evaluators
27
27
  from orq_ai_sdk.feedback import Feedback
28
28
  from orq_ai_sdk.files import Files
29
+ from orq_ai_sdk.identities import Identities
29
30
  from orq_ai_sdk.internal import Internal
30
31
  from orq_ai_sdk.knowledge import Knowledge
31
32
  from orq_ai_sdk.memorystores import MemoryStores
@@ -45,6 +46,7 @@ class Orq(BaseSDK):
45
46
  feedback: "Feedback"
46
47
  evals: "Evals"
47
48
  evaluators: "Evaluators"
49
+ identities: "Identities"
48
50
  deployments: "Deployments"
49
51
  agents: "Agents"
50
52
  conversations: "Conversations"
@@ -64,6 +66,7 @@ class Orq(BaseSDK):
64
66
  "feedback": ("orq_ai_sdk.feedback", "Feedback"),
65
67
  "evals": ("orq_ai_sdk.evals", "Evals"),
66
68
  "evaluators": ("orq_ai_sdk.evaluators", "Evaluators"),
69
+ "identities": ("orq_ai_sdk.identities", "Identities"),
67
70
  "deployments": ("orq_ai_sdk.deployments", "Deployments"),
68
71
  "agents": ("orq_ai_sdk.agents", "Agents"),
69
72
  "conversations": ("orq_ai_sdk.conversations", "Conversations"),
orq_ai_sdk/speech.py ADDED
@@ -0,0 +1,251 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from .basesdk import BaseSDK
4
+ from orq_ai_sdk import models, utils
5
+ from orq_ai_sdk._hooks import HookContext
6
+ from orq_ai_sdk.models import createspeechop as models_createspeechop
7
+ from orq_ai_sdk.types import OptionalNullable, UNSET
8
+ from orq_ai_sdk.utils import get_security_from_env
9
+ from typing import Mapping, Optional, Union
10
+
11
+
12
+ class Speech(BaseSDK):
13
+ def create(
14
+ self,
15
+ *,
16
+ input_: str,
17
+ model: str,
18
+ voice: str,
19
+ response_format: Optional[
20
+ models_createspeechop.CreateSpeechResponseFormat
21
+ ] = "mp3",
22
+ speed: Optional[float] = 1,
23
+ orq: Optional[
24
+ Union[
25
+ models_createspeechop.CreateSpeechOrq,
26
+ models_createspeechop.CreateSpeechOrqTypedDict,
27
+ ]
28
+ ] = None,
29
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
30
+ server_url: Optional[str] = None,
31
+ timeout_ms: Optional[int] = None,
32
+ http_headers: Optional[Mapping[str, str]] = None,
33
+ ):
34
+ r"""Create speech
35
+
36
+ Generates audio from the input text.
37
+
38
+ :param input: The text to generate audio for. The maximum length is 4096 characters
39
+ :param model: ID of the model to use
40
+ :param voice: The voice to use.
41
+
42
+ Available voices for OpenAI
43
+
44
+ `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`
45
+
46
+ Available voices for ElevenLabs
47
+
48
+ `aria`, `roger`, `sarah`, `laura`, `charlie`, `george`, `callum`, `river`, `liam`, `charlotte`, `alice`, `matilda`, `will`, `jessica`, `eric`, `chris`, `brian`, `daniel`, `lily`, `bill`
49
+ :param response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. If a format is provided but not supported by the provider, the response will be in the default format. When the provided format is not supported by the provider, the response will be in the default format.
50
+ :param speed: The speed of the generated audio.
51
+ :param orq:
52
+ :param retries: Override the default retry configuration for this method
53
+ :param server_url: Override the default server URL for this method
54
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
55
+ :param http_headers: Additional headers to set or replace on requests.
56
+ """
57
+ base_url = None
58
+ url_variables = None
59
+ if timeout_ms is None:
60
+ timeout_ms = self.sdk_configuration.timeout_ms
61
+
62
+ if timeout_ms is None:
63
+ timeout_ms = 600000
64
+
65
+ if server_url is not None:
66
+ base_url = server_url
67
+ else:
68
+ base_url = self._get_url(base_url, url_variables)
69
+
70
+ request = models.CreateSpeechRequestBody(
71
+ input=input_,
72
+ model=model,
73
+ voice=voice,
74
+ response_format=response_format,
75
+ speed=speed,
76
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateSpeechOrq]),
77
+ )
78
+
79
+ req = self._build_request(
80
+ method="POST",
81
+ path="/v2/router/audio/speech",
82
+ base_url=base_url,
83
+ url_variables=url_variables,
84
+ request=request,
85
+ request_body_required=True,
86
+ request_has_path_params=False,
87
+ request_has_query_params=True,
88
+ user_agent_header="user-agent",
89
+ accept_header_value="*/*",
90
+ http_headers=http_headers,
91
+ security=self.sdk_configuration.security,
92
+ get_serialized_body=lambda: utils.serialize_request_body(
93
+ request, False, False, "json", models.CreateSpeechRequestBody
94
+ ),
95
+ allow_empty_value=None,
96
+ timeout_ms=timeout_ms,
97
+ )
98
+
99
+ if retries == UNSET:
100
+ if self.sdk_configuration.retry_config is not UNSET:
101
+ retries = self.sdk_configuration.retry_config
102
+
103
+ retry_config = None
104
+ if isinstance(retries, utils.RetryConfig):
105
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
106
+
107
+ http_res = self.do_request(
108
+ hook_ctx=HookContext(
109
+ config=self.sdk_configuration,
110
+ base_url=base_url or "",
111
+ operation_id="createSpeech",
112
+ oauth2_scopes=None,
113
+ security_source=get_security_from_env(
114
+ self.sdk_configuration.security, models.Security
115
+ ),
116
+ ),
117
+ request=req,
118
+ error_status_codes=["4XX", "5XX"],
119
+ retry_config=retry_config,
120
+ )
121
+
122
+ if utils.match_response(http_res, "200", "*"):
123
+ return
124
+ if utils.match_response(http_res, "4XX", "*"):
125
+ http_res_text = utils.stream_to_text(http_res)
126
+ raise models.APIError("API error occurred", http_res, http_res_text)
127
+ if utils.match_response(http_res, "5XX", "*"):
128
+ http_res_text = utils.stream_to_text(http_res)
129
+ raise models.APIError("API error occurred", http_res, http_res_text)
130
+
131
+ raise models.APIError("Unexpected response received", http_res)
132
+
133
+ async def create_async(
134
+ self,
135
+ *,
136
+ input_: str,
137
+ model: str,
138
+ voice: str,
139
+ response_format: Optional[
140
+ models_createspeechop.CreateSpeechResponseFormat
141
+ ] = "mp3",
142
+ speed: Optional[float] = 1,
143
+ orq: Optional[
144
+ Union[
145
+ models_createspeechop.CreateSpeechOrq,
146
+ models_createspeechop.CreateSpeechOrqTypedDict,
147
+ ]
148
+ ] = None,
149
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
150
+ server_url: Optional[str] = None,
151
+ timeout_ms: Optional[int] = None,
152
+ http_headers: Optional[Mapping[str, str]] = None,
153
+ ):
154
+ r"""Create speech
155
+
156
+ Generates audio from the input text.
157
+
158
+ :param input: The text to generate audio for. The maximum length is 4096 characters
159
+ :param model: ID of the model to use
160
+ :param voice: The voice to use.
161
+
162
+ Available voices for OpenAI
163
+
164
+ `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`
165
+
166
+ Available voices for ElevenLabs
167
+
168
+ `aria`, `roger`, `sarah`, `laura`, `charlie`, `george`, `callum`, `river`, `liam`, `charlotte`, `alice`, `matilda`, `will`, `jessica`, `eric`, `chris`, `brian`, `daniel`, `lily`, `bill`
169
+ :param response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. If a format is provided but not supported by the provider, the response will be in the default format. When the provided format is not supported by the provider, the response will be in the default format.
170
+ :param speed: The speed of the generated audio.
171
+ :param orq:
172
+ :param retries: Override the default retry configuration for this method
173
+ :param server_url: Override the default server URL for this method
174
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
175
+ :param http_headers: Additional headers to set or replace on requests.
176
+ """
177
+ base_url = None
178
+ url_variables = None
179
+ if timeout_ms is None:
180
+ timeout_ms = self.sdk_configuration.timeout_ms
181
+
182
+ if timeout_ms is None:
183
+ timeout_ms = 600000
184
+
185
+ if server_url is not None:
186
+ base_url = server_url
187
+ else:
188
+ base_url = self._get_url(base_url, url_variables)
189
+
190
+ request = models.CreateSpeechRequestBody(
191
+ input=input_,
192
+ model=model,
193
+ voice=voice,
194
+ response_format=response_format,
195
+ speed=speed,
196
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateSpeechOrq]),
197
+ )
198
+
199
+ req = self._build_request_async(
200
+ method="POST",
201
+ path="/v2/router/audio/speech",
202
+ base_url=base_url,
203
+ url_variables=url_variables,
204
+ request=request,
205
+ request_body_required=True,
206
+ request_has_path_params=False,
207
+ request_has_query_params=True,
208
+ user_agent_header="user-agent",
209
+ accept_header_value="*/*",
210
+ http_headers=http_headers,
211
+ security=self.sdk_configuration.security,
212
+ get_serialized_body=lambda: utils.serialize_request_body(
213
+ request, False, False, "json", models.CreateSpeechRequestBody
214
+ ),
215
+ allow_empty_value=None,
216
+ timeout_ms=timeout_ms,
217
+ )
218
+
219
+ if retries == UNSET:
220
+ if self.sdk_configuration.retry_config is not UNSET:
221
+ retries = self.sdk_configuration.retry_config
222
+
223
+ retry_config = None
224
+ if isinstance(retries, utils.RetryConfig):
225
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
226
+
227
+ http_res = await self.do_request_async(
228
+ hook_ctx=HookContext(
229
+ config=self.sdk_configuration,
230
+ base_url=base_url or "",
231
+ operation_id="createSpeech",
232
+ oauth2_scopes=None,
233
+ security_source=get_security_from_env(
234
+ self.sdk_configuration.security, models.Security
235
+ ),
236
+ ),
237
+ request=req,
238
+ error_status_codes=["4XX", "5XX"],
239
+ retry_config=retry_config,
240
+ )
241
+
242
+ if utils.match_response(http_res, "200", "*"):
243
+ return
244
+ if utils.match_response(http_res, "4XX", "*"):
245
+ http_res_text = await utils.stream_to_text_async(http_res)
246
+ raise models.APIError("API error occurred", http_res, http_res_text)
247
+ if utils.match_response(http_res, "5XX", "*"):
248
+ http_res_text = await utils.stream_to_text_async(http_res)
249
+ raise models.APIError("API error occurred", http_res, http_res_text)
250
+
251
+ raise models.APIError("Unexpected response received", http_res)
@@ -0,0 +1,326 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from .basesdk import BaseSDK
4
+ from orq_ai_sdk import models, utils
5
+ from orq_ai_sdk._hooks import HookContext
6
+ from orq_ai_sdk.models import createtranscriptionop as models_createtranscriptionop
7
+ from orq_ai_sdk.types import OptionalNullable, UNSET
8
+ from orq_ai_sdk.utils import get_security_from_env
9
+ from orq_ai_sdk.utils.unmarshal_json_response import unmarshal_json_response
10
+ from typing import Any, List, Mapping, Optional, Union
11
+
12
+
13
+ class Transcriptions(BaseSDK):
14
+ def create(
15
+ self,
16
+ *,
17
+ model: str,
18
+ prompt: Optional[str] = None,
19
+ enable_logging: Optional[bool] = True,
20
+ diarize: Optional[bool] = False,
21
+ response_format: Optional[
22
+ models_createtranscriptionop.CreateTranscriptionResponseFormat
23
+ ] = None,
24
+ tag_audio_events: Optional[bool] = True,
25
+ num_speakers: Optional[float] = None,
26
+ timestamps_granularity: Optional[
27
+ models_createtranscriptionop.TimestampsGranularity
28
+ ] = "word",
29
+ temperature: Optional[float] = None,
30
+ language: Optional[str] = None,
31
+ timestamp_granularities: Optional[
32
+ List[models_createtranscriptionop.TimestampGranularities]
33
+ ] = None,
34
+ orq: Optional[
35
+ Union[
36
+ models_createtranscriptionop.CreateTranscriptionOrq,
37
+ models_createtranscriptionop.CreateTranscriptionOrqTypedDict,
38
+ ]
39
+ ] = None,
40
+ file: Optional[
41
+ Union[
42
+ models_createtranscriptionop.CreateTranscriptionFile,
43
+ models_createtranscriptionop.CreateTranscriptionFileTypedDict,
44
+ ]
45
+ ] = None,
46
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
47
+ server_url: Optional[str] = None,
48
+ timeout_ms: Optional[int] = None,
49
+ http_headers: Optional[Mapping[str, str]] = None,
50
+ ) -> models.CreateTranscriptionResponseBody:
51
+ r"""Create transcription
52
+
53
+ :param model: ID of the model to use
54
+ :param prompt: An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
55
+ :param enable_logging: When enable_logging is set to false, zero retention mode is used. This disables history features like request stitching and is only available to enterprise customers.
56
+ :param diarize: Whether to annotate which speaker is currently talking in the uploaded file.
57
+ :param response_format: The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
58
+ :param tag_audio_events: Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
59
+ :param num_speakers: The maximum amount of speakers talking in the uploaded file. Helps with predicting who speaks when, the maximum is 32.
60
+ :param timestamps_granularity: The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word.
61
+ :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
62
+ :param language: The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
63
+ :param timestamp_granularities: The timestamp granularities to populate for this transcription. response_format must be set to verbose_json to use timestamp granularities. Either or both of these options are supported: \"word\" or \"segment\". Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.
64
+ :param orq:
65
+ :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
66
+ :param retries: Override the default retry configuration for this method
67
+ :param server_url: Override the default server URL for this method
68
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
69
+ :param http_headers: Additional headers to set or replace on requests.
70
+ """
71
+ base_url = None
72
+ url_variables = None
73
+ if timeout_ms is None:
74
+ timeout_ms = self.sdk_configuration.timeout_ms
75
+
76
+ if timeout_ms is None:
77
+ timeout_ms = 600000
78
+
79
+ if server_url is not None:
80
+ base_url = server_url
81
+ else:
82
+ base_url = self._get_url(base_url, url_variables)
83
+
84
+ request = models.CreateTranscriptionRequestBody(
85
+ model=model,
86
+ prompt=prompt,
87
+ enable_logging=enable_logging,
88
+ diarize=diarize,
89
+ response_format=response_format,
90
+ tag_audio_events=tag_audio_events,
91
+ num_speakers=num_speakers,
92
+ timestamps_granularity=timestamps_granularity,
93
+ temperature=temperature,
94
+ language=language,
95
+ timestamp_granularities=timestamp_granularities,
96
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateTranscriptionOrq]),
97
+ file=utils.get_pydantic_model(
98
+ file, Optional[models.CreateTranscriptionFile]
99
+ ),
100
+ )
101
+
102
+ req = self._build_request(
103
+ method="POST",
104
+ path="/v2/router/audio/transcriptions",
105
+ base_url=base_url,
106
+ url_variables=url_variables,
107
+ request=request,
108
+ request_body_required=True,
109
+ request_has_path_params=False,
110
+ request_has_query_params=True,
111
+ user_agent_header="user-agent",
112
+ accept_header_value="application/json",
113
+ http_headers=http_headers,
114
+ security=self.sdk_configuration.security,
115
+ get_serialized_body=lambda: utils.serialize_request_body(
116
+ request,
117
+ False,
118
+ False,
119
+ "multipart",
120
+ models.CreateTranscriptionRequestBody,
121
+ ),
122
+ allow_empty_value=None,
123
+ timeout_ms=timeout_ms,
124
+ )
125
+
126
+ if retries == UNSET:
127
+ if self.sdk_configuration.retry_config is not UNSET:
128
+ retries = self.sdk_configuration.retry_config
129
+
130
+ retry_config = None
131
+ if isinstance(retries, utils.RetryConfig):
132
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
133
+
134
+ http_res = self.do_request(
135
+ hook_ctx=HookContext(
136
+ config=self.sdk_configuration,
137
+ base_url=base_url or "",
138
+ operation_id="createTranscription",
139
+ oauth2_scopes=None,
140
+ security_source=get_security_from_env(
141
+ self.sdk_configuration.security, models.Security
142
+ ),
143
+ ),
144
+ request=req,
145
+ error_status_codes=["422", "4XX", "5XX"],
146
+ retry_config=retry_config,
147
+ )
148
+
149
+ response_data: Any = None
150
+ if utils.match_response(http_res, "200", "application/json"):
151
+ return unmarshal_json_response(
152
+ models.CreateTranscriptionResponseBody, http_res
153
+ )
154
+ if utils.match_response(http_res, "422", "application/json"):
155
+ response_data = unmarshal_json_response(
156
+ models.CreateTranscriptionRouterAudioTranscriptionsResponseBodyData,
157
+ http_res,
158
+ )
159
+ raise models.CreateTranscriptionRouterAudioTranscriptionsResponseBody(
160
+ response_data, http_res
161
+ )
162
+ if utils.match_response(http_res, "4XX", "*"):
163
+ http_res_text = utils.stream_to_text(http_res)
164
+ raise models.APIError("API error occurred", http_res, http_res_text)
165
+ if utils.match_response(http_res, "5XX", "*"):
166
+ http_res_text = utils.stream_to_text(http_res)
167
+ raise models.APIError("API error occurred", http_res, http_res_text)
168
+
169
+ raise models.APIError("Unexpected response received", http_res)
170
+
171
+ async def create_async(
172
+ self,
173
+ *,
174
+ model: str,
175
+ prompt: Optional[str] = None,
176
+ enable_logging: Optional[bool] = True,
177
+ diarize: Optional[bool] = False,
178
+ response_format: Optional[
179
+ models_createtranscriptionop.CreateTranscriptionResponseFormat
180
+ ] = None,
181
+ tag_audio_events: Optional[bool] = True,
182
+ num_speakers: Optional[float] = None,
183
+ timestamps_granularity: Optional[
184
+ models_createtranscriptionop.TimestampsGranularity
185
+ ] = "word",
186
+ temperature: Optional[float] = None,
187
+ language: Optional[str] = None,
188
+ timestamp_granularities: Optional[
189
+ List[models_createtranscriptionop.TimestampGranularities]
190
+ ] = None,
191
+ orq: Optional[
192
+ Union[
193
+ models_createtranscriptionop.CreateTranscriptionOrq,
194
+ models_createtranscriptionop.CreateTranscriptionOrqTypedDict,
195
+ ]
196
+ ] = None,
197
+ file: Optional[
198
+ Union[
199
+ models_createtranscriptionop.CreateTranscriptionFile,
200
+ models_createtranscriptionop.CreateTranscriptionFileTypedDict,
201
+ ]
202
+ ] = None,
203
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
204
+ server_url: Optional[str] = None,
205
+ timeout_ms: Optional[int] = None,
206
+ http_headers: Optional[Mapping[str, str]] = None,
207
+ ) -> models.CreateTranscriptionResponseBody:
208
+ r"""Create transcription
209
+
210
+ :param model: ID of the model to use
211
+ :param prompt: An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
212
+ :param enable_logging: When enable_logging is set to false, zero retention mode is used. This disables history features like request stitching and is only available to enterprise customers.
213
+ :param diarize: Whether to annotate which speaker is currently talking in the uploaded file.
214
+ :param response_format: The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
215
+ :param tag_audio_events: Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
216
+ :param num_speakers: The maximum amount of speakers talking in the uploaded file. Helps with predicting who speaks when, the maximum is 32.
217
+ :param timestamps_granularity: The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word.
218
+ :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
219
+ :param language: The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
220
+ :param timestamp_granularities: The timestamp granularities to populate for this transcription. response_format must be set to verbose_json to use timestamp granularities. Either or both of these options are supported: \"word\" or \"segment\". Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.
221
+ :param orq:
222
+ :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
223
+ :param retries: Override the default retry configuration for this method
224
+ :param server_url: Override the default server URL for this method
225
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
226
+ :param http_headers: Additional headers to set or replace on requests.
227
+ """
228
+ base_url = None
229
+ url_variables = None
230
+ if timeout_ms is None:
231
+ timeout_ms = self.sdk_configuration.timeout_ms
232
+
233
+ if timeout_ms is None:
234
+ timeout_ms = 600000
235
+
236
+ if server_url is not None:
237
+ base_url = server_url
238
+ else:
239
+ base_url = self._get_url(base_url, url_variables)
240
+
241
+ request = models.CreateTranscriptionRequestBody(
242
+ model=model,
243
+ prompt=prompt,
244
+ enable_logging=enable_logging,
245
+ diarize=diarize,
246
+ response_format=response_format,
247
+ tag_audio_events=tag_audio_events,
248
+ num_speakers=num_speakers,
249
+ timestamps_granularity=timestamps_granularity,
250
+ temperature=temperature,
251
+ language=language,
252
+ timestamp_granularities=timestamp_granularities,
253
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateTranscriptionOrq]),
254
+ file=utils.get_pydantic_model(
255
+ file, Optional[models.CreateTranscriptionFile]
256
+ ),
257
+ )
258
+
259
+ req = self._build_request_async(
260
+ method="POST",
261
+ path="/v2/router/audio/transcriptions",
262
+ base_url=base_url,
263
+ url_variables=url_variables,
264
+ request=request,
265
+ request_body_required=True,
266
+ request_has_path_params=False,
267
+ request_has_query_params=True,
268
+ user_agent_header="user-agent",
269
+ accept_header_value="application/json",
270
+ http_headers=http_headers,
271
+ security=self.sdk_configuration.security,
272
+ get_serialized_body=lambda: utils.serialize_request_body(
273
+ request,
274
+ False,
275
+ False,
276
+ "multipart",
277
+ models.CreateTranscriptionRequestBody,
278
+ ),
279
+ allow_empty_value=None,
280
+ timeout_ms=timeout_ms,
281
+ )
282
+
283
+ if retries == UNSET:
284
+ if self.sdk_configuration.retry_config is not UNSET:
285
+ retries = self.sdk_configuration.retry_config
286
+
287
+ retry_config = None
288
+ if isinstance(retries, utils.RetryConfig):
289
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
290
+
291
+ http_res = await self.do_request_async(
292
+ hook_ctx=HookContext(
293
+ config=self.sdk_configuration,
294
+ base_url=base_url or "",
295
+ operation_id="createTranscription",
296
+ oauth2_scopes=None,
297
+ security_source=get_security_from_env(
298
+ self.sdk_configuration.security, models.Security
299
+ ),
300
+ ),
301
+ request=req,
302
+ error_status_codes=["422", "4XX", "5XX"],
303
+ retry_config=retry_config,
304
+ )
305
+
306
+ response_data: Any = None
307
+ if utils.match_response(http_res, "200", "application/json"):
308
+ return unmarshal_json_response(
309
+ models.CreateTranscriptionResponseBody, http_res
310
+ )
311
+ if utils.match_response(http_res, "422", "application/json"):
312
+ response_data = unmarshal_json_response(
313
+ models.CreateTranscriptionRouterAudioTranscriptionsResponseBodyData,
314
+ http_res,
315
+ )
316
+ raise models.CreateTranscriptionRouterAudioTranscriptionsResponseBody(
317
+ response_data, http_res
318
+ )
319
+ if utils.match_response(http_res, "4XX", "*"):
320
+ http_res_text = await utils.stream_to_text_async(http_res)
321
+ raise models.APIError("API error occurred", http_res, http_res_text)
322
+ if utils.match_response(http_res, "5XX", "*"):
323
+ http_res_text = await utils.stream_to_text_async(http_res)
324
+ raise models.APIError("API error occurred", http_res, http_res_text)
325
+
326
+ raise models.APIError("Unexpected response received", http_res)