orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.3.0rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/agents.py +186 -186
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +438 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +364 -0
- orq_ai_sdk/embeddings.py +344 -0
- orq_ai_sdk/generations.py +370 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5746 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1945 -383
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1381 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +2078 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +579 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +715 -0
- orq_ai_sdk/models/createimageop.py +407 -128
- orq_ai_sdk/models/createimagevariationop.py +706 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2759 -1251
- orq_ai_sdk/models/creatererankop.py +608 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +466 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +732 -0
- orq_ai_sdk/models/createtranslationop.py +702 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1696 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1679 -230
- orq_ai_sdk/models/getpromptversionop.py +1676 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +805 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1690 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +799 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1462 -196
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1439 -194
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1968 -397
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2854 -1448
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +666 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +330 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +333 -0
- orq_ai_sdk/transcriptions.py +416 -0
- orq_ai_sdk/translations.py +384 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +364 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/METADATA +169 -148
- orq_ai_sdk-4.3.0rc7.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.3.0rc7.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -0,0 +1,466 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .publiccontact import PublicContact, PublicContactTypedDict
|
|
5
|
+
from .publicidentity import PublicIdentity, PublicIdentityTypedDict
|
|
6
|
+
from orq_ai_sdk.types import BaseModel, UNSET_SENTINEL
|
|
7
|
+
import pydantic
|
|
8
|
+
from pydantic import model_serializer
|
|
9
|
+
from typing import List, Literal, Optional
|
|
10
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
CreateSpeechResponseFormat = Literal[
|
|
14
|
+
"mp3",
|
|
15
|
+
"opus",
|
|
16
|
+
"aac",
|
|
17
|
+
"flac",
|
|
18
|
+
"wav",
|
|
19
|
+
"pcm",
|
|
20
|
+
]
|
|
21
|
+
r"""The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. If a format is provided but not supported by the provider, the response will be in the default format. When the provided format is not supported by the provider, the response will be in the default format."""
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class CreateSpeechFallbacksTypedDict(TypedDict):
|
|
25
|
+
model: str
|
|
26
|
+
r"""Fallback model identifier"""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class CreateSpeechFallbacks(BaseModel):
|
|
30
|
+
model: str
|
|
31
|
+
r"""Fallback model identifier"""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class CreateSpeechRetryTypedDict(TypedDict):
|
|
35
|
+
r"""Retry configuration for the request"""
|
|
36
|
+
|
|
37
|
+
count: NotRequired[float]
|
|
38
|
+
r"""Number of retry attempts (1-5)"""
|
|
39
|
+
on_codes: NotRequired[List[float]]
|
|
40
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class CreateSpeechRetry(BaseModel):
|
|
44
|
+
r"""Retry configuration for the request"""
|
|
45
|
+
|
|
46
|
+
count: Optional[float] = 3
|
|
47
|
+
r"""Number of retry attempts (1-5)"""
|
|
48
|
+
|
|
49
|
+
on_codes: Optional[List[float]] = None
|
|
50
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
51
|
+
|
|
52
|
+
@model_serializer(mode="wrap")
|
|
53
|
+
def serialize_model(self, handler):
|
|
54
|
+
optional_fields = set(["count", "on_codes"])
|
|
55
|
+
serialized = handler(self)
|
|
56
|
+
m = {}
|
|
57
|
+
|
|
58
|
+
for n, f in type(self).model_fields.items():
|
|
59
|
+
k = f.alias or n
|
|
60
|
+
val = serialized.get(k)
|
|
61
|
+
|
|
62
|
+
if val != UNSET_SENTINEL:
|
|
63
|
+
if val is not None or k not in optional_fields:
|
|
64
|
+
m[k] = val
|
|
65
|
+
|
|
66
|
+
return m
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
CreateSpeechLoadBalancerType = Literal["weight_based",]
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class CreateSpeechLoadBalancerModelsTypedDict(TypedDict):
|
|
73
|
+
model: str
|
|
74
|
+
r"""Model identifier for load balancing"""
|
|
75
|
+
weight: NotRequired[float]
|
|
76
|
+
r"""Weight assigned to this model for load balancing"""
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class CreateSpeechLoadBalancerModels(BaseModel):
|
|
80
|
+
model: str
|
|
81
|
+
r"""Model identifier for load balancing"""
|
|
82
|
+
|
|
83
|
+
weight: Optional[float] = 0.5
|
|
84
|
+
r"""Weight assigned to this model for load balancing"""
|
|
85
|
+
|
|
86
|
+
@model_serializer(mode="wrap")
|
|
87
|
+
def serialize_model(self, handler):
|
|
88
|
+
optional_fields = set(["weight"])
|
|
89
|
+
serialized = handler(self)
|
|
90
|
+
m = {}
|
|
91
|
+
|
|
92
|
+
for n, f in type(self).model_fields.items():
|
|
93
|
+
k = f.alias or n
|
|
94
|
+
val = serialized.get(k)
|
|
95
|
+
|
|
96
|
+
if val != UNSET_SENTINEL:
|
|
97
|
+
if val is not None or k not in optional_fields:
|
|
98
|
+
m[k] = val
|
|
99
|
+
|
|
100
|
+
return m
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class CreateSpeechLoadBalancer1TypedDict(TypedDict):
|
|
104
|
+
type: CreateSpeechLoadBalancerType
|
|
105
|
+
models: List[CreateSpeechLoadBalancerModelsTypedDict]
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class CreateSpeechLoadBalancer1(BaseModel):
|
|
109
|
+
type: CreateSpeechLoadBalancerType
|
|
110
|
+
|
|
111
|
+
models: List[CreateSpeechLoadBalancerModels]
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
CreateSpeechLoadBalancerTypedDict = CreateSpeechLoadBalancer1TypedDict
|
|
115
|
+
r"""Load balancer configuration for the request."""
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
CreateSpeechLoadBalancer = CreateSpeechLoadBalancer1
|
|
119
|
+
r"""Load balancer configuration for the request."""
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class CreateSpeechTimeoutTypedDict(TypedDict):
|
|
123
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
124
|
+
|
|
125
|
+
call_timeout: float
|
|
126
|
+
r"""Timeout value in milliseconds"""
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class CreateSpeechTimeout(BaseModel):
|
|
130
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
131
|
+
|
|
132
|
+
call_timeout: float
|
|
133
|
+
r"""Timeout value in milliseconds"""
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class CreateSpeechRouterAudioSpeechRetryTypedDict(TypedDict):
|
|
137
|
+
r"""Retry configuration for the request"""
|
|
138
|
+
|
|
139
|
+
count: NotRequired[float]
|
|
140
|
+
r"""Number of retry attempts (1-5)"""
|
|
141
|
+
on_codes: NotRequired[List[float]]
|
|
142
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class CreateSpeechRouterAudioSpeechRetry(BaseModel):
|
|
146
|
+
r"""Retry configuration for the request"""
|
|
147
|
+
|
|
148
|
+
count: Optional[float] = 3
|
|
149
|
+
r"""Number of retry attempts (1-5)"""
|
|
150
|
+
|
|
151
|
+
on_codes: Optional[List[float]] = None
|
|
152
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
153
|
+
|
|
154
|
+
@model_serializer(mode="wrap")
|
|
155
|
+
def serialize_model(self, handler):
|
|
156
|
+
optional_fields = set(["count", "on_codes"])
|
|
157
|
+
serialized = handler(self)
|
|
158
|
+
m = {}
|
|
159
|
+
|
|
160
|
+
for n, f in type(self).model_fields.items():
|
|
161
|
+
k = f.alias or n
|
|
162
|
+
val = serialized.get(k)
|
|
163
|
+
|
|
164
|
+
if val != UNSET_SENTINEL:
|
|
165
|
+
if val is not None or k not in optional_fields:
|
|
166
|
+
m[k] = val
|
|
167
|
+
|
|
168
|
+
return m
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
class CreateSpeechRouterAudioSpeechFallbacksTypedDict(TypedDict):
|
|
172
|
+
model: str
|
|
173
|
+
r"""Fallback model identifier"""
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class CreateSpeechRouterAudioSpeechFallbacks(BaseModel):
|
|
177
|
+
model: str
|
|
178
|
+
r"""Fallback model identifier"""
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
class CreateSpeechThreadTypedDict(TypedDict):
|
|
182
|
+
r"""Thread information to group related requests"""
|
|
183
|
+
|
|
184
|
+
id: str
|
|
185
|
+
r"""Unique thread identifier to group related invocations."""
|
|
186
|
+
tags: NotRequired[List[str]]
|
|
187
|
+
r"""Optional tags to differentiate or categorize threads"""
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class CreateSpeechThread(BaseModel):
|
|
191
|
+
r"""Thread information to group related requests"""
|
|
192
|
+
|
|
193
|
+
id: str
|
|
194
|
+
r"""Unique thread identifier to group related invocations."""
|
|
195
|
+
|
|
196
|
+
tags: Optional[List[str]] = None
|
|
197
|
+
r"""Optional tags to differentiate or categorize threads"""
|
|
198
|
+
|
|
199
|
+
@model_serializer(mode="wrap")
|
|
200
|
+
def serialize_model(self, handler):
|
|
201
|
+
optional_fields = set(["tags"])
|
|
202
|
+
serialized = handler(self)
|
|
203
|
+
m = {}
|
|
204
|
+
|
|
205
|
+
for n, f in type(self).model_fields.items():
|
|
206
|
+
k = f.alias or n
|
|
207
|
+
val = serialized.get(k)
|
|
208
|
+
|
|
209
|
+
if val != UNSET_SENTINEL:
|
|
210
|
+
if val is not None or k not in optional_fields:
|
|
211
|
+
m[k] = val
|
|
212
|
+
|
|
213
|
+
return m
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
CreateSpeechLoadBalancerRouterAudioSpeechType = Literal["weight_based",]
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
class CreateSpeechLoadBalancerRouterAudioSpeechModelsTypedDict(TypedDict):
|
|
220
|
+
model: str
|
|
221
|
+
r"""Model identifier for load balancing"""
|
|
222
|
+
weight: NotRequired[float]
|
|
223
|
+
r"""Weight assigned to this model for load balancing"""
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class CreateSpeechLoadBalancerRouterAudioSpeechModels(BaseModel):
|
|
227
|
+
model: str
|
|
228
|
+
r"""Model identifier for load balancing"""
|
|
229
|
+
|
|
230
|
+
weight: Optional[float] = 0.5
|
|
231
|
+
r"""Weight assigned to this model for load balancing"""
|
|
232
|
+
|
|
233
|
+
@model_serializer(mode="wrap")
|
|
234
|
+
def serialize_model(self, handler):
|
|
235
|
+
optional_fields = set(["weight"])
|
|
236
|
+
serialized = handler(self)
|
|
237
|
+
m = {}
|
|
238
|
+
|
|
239
|
+
for n, f in type(self).model_fields.items():
|
|
240
|
+
k = f.alias or n
|
|
241
|
+
val = serialized.get(k)
|
|
242
|
+
|
|
243
|
+
if val != UNSET_SENTINEL:
|
|
244
|
+
if val is not None or k not in optional_fields:
|
|
245
|
+
m[k] = val
|
|
246
|
+
|
|
247
|
+
return m
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
class CreateSpeechLoadBalancerRouterAudioSpeech1TypedDict(TypedDict):
|
|
251
|
+
type: CreateSpeechLoadBalancerRouterAudioSpeechType
|
|
252
|
+
models: List[CreateSpeechLoadBalancerRouterAudioSpeechModelsTypedDict]
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
class CreateSpeechLoadBalancerRouterAudioSpeech1(BaseModel):
|
|
256
|
+
type: CreateSpeechLoadBalancerRouterAudioSpeechType
|
|
257
|
+
|
|
258
|
+
models: List[CreateSpeechLoadBalancerRouterAudioSpeechModels]
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
CreateSpeechRouterAudioSpeechLoadBalancerTypedDict = (
|
|
262
|
+
CreateSpeechLoadBalancerRouterAudioSpeech1TypedDict
|
|
263
|
+
)
|
|
264
|
+
r"""Array of models with weights for load balancing requests"""
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
CreateSpeechRouterAudioSpeechLoadBalancer = CreateSpeechLoadBalancerRouterAudioSpeech1
|
|
268
|
+
r"""Array of models with weights for load balancing requests"""
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
class CreateSpeechRouterAudioSpeechTimeoutTypedDict(TypedDict):
|
|
272
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
273
|
+
|
|
274
|
+
call_timeout: float
|
|
275
|
+
r"""Timeout value in milliseconds"""
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
class CreateSpeechRouterAudioSpeechTimeout(BaseModel):
|
|
279
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
280
|
+
|
|
281
|
+
call_timeout: float
|
|
282
|
+
r"""Timeout value in milliseconds"""
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
class CreateSpeechOrqTypedDict(TypedDict):
|
|
286
|
+
retry: NotRequired[CreateSpeechRouterAudioSpeechRetryTypedDict]
|
|
287
|
+
r"""Retry configuration for the request"""
|
|
288
|
+
fallbacks: NotRequired[List[CreateSpeechRouterAudioSpeechFallbacksTypedDict]]
|
|
289
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
290
|
+
name: NotRequired[str]
|
|
291
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
292
|
+
identity: NotRequired[PublicIdentityTypedDict]
|
|
293
|
+
r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
|
|
294
|
+
contact: NotRequired[PublicContactTypedDict]
|
|
295
|
+
r"""@deprecated Use identity instead. Information about the contact making the request."""
|
|
296
|
+
thread: NotRequired[CreateSpeechThreadTypedDict]
|
|
297
|
+
r"""Thread information to group related requests"""
|
|
298
|
+
load_balancer: NotRequired[CreateSpeechRouterAudioSpeechLoadBalancerTypedDict]
|
|
299
|
+
r"""Array of models with weights for load balancing requests"""
|
|
300
|
+
timeout: NotRequired[CreateSpeechRouterAudioSpeechTimeoutTypedDict]
|
|
301
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
class CreateSpeechOrq(BaseModel):
|
|
305
|
+
retry: Optional[CreateSpeechRouterAudioSpeechRetry] = None
|
|
306
|
+
r"""Retry configuration for the request"""
|
|
307
|
+
|
|
308
|
+
fallbacks: Optional[List[CreateSpeechRouterAudioSpeechFallbacks]] = None
|
|
309
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
310
|
+
|
|
311
|
+
name: Optional[str] = None
|
|
312
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
313
|
+
|
|
314
|
+
identity: Optional[PublicIdentity] = None
|
|
315
|
+
r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
|
|
316
|
+
|
|
317
|
+
contact: Annotated[
|
|
318
|
+
Optional[PublicContact],
|
|
319
|
+
pydantic.Field(
|
|
320
|
+
deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
321
|
+
),
|
|
322
|
+
] = None
|
|
323
|
+
r"""@deprecated Use identity instead. Information about the contact making the request."""
|
|
324
|
+
|
|
325
|
+
thread: Optional[CreateSpeechThread] = None
|
|
326
|
+
r"""Thread information to group related requests"""
|
|
327
|
+
|
|
328
|
+
load_balancer: Optional[CreateSpeechRouterAudioSpeechLoadBalancer] = None
|
|
329
|
+
r"""Array of models with weights for load balancing requests"""
|
|
330
|
+
|
|
331
|
+
timeout: Optional[CreateSpeechRouterAudioSpeechTimeout] = None
|
|
332
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
333
|
+
|
|
334
|
+
@model_serializer(mode="wrap")
|
|
335
|
+
def serialize_model(self, handler):
|
|
336
|
+
optional_fields = set(
|
|
337
|
+
[
|
|
338
|
+
"retry",
|
|
339
|
+
"fallbacks",
|
|
340
|
+
"name",
|
|
341
|
+
"identity",
|
|
342
|
+
"contact",
|
|
343
|
+
"thread",
|
|
344
|
+
"load_balancer",
|
|
345
|
+
"timeout",
|
|
346
|
+
]
|
|
347
|
+
)
|
|
348
|
+
serialized = handler(self)
|
|
349
|
+
m = {}
|
|
350
|
+
|
|
351
|
+
for n, f in type(self).model_fields.items():
|
|
352
|
+
k = f.alias or n
|
|
353
|
+
val = serialized.get(k)
|
|
354
|
+
|
|
355
|
+
if val != UNSET_SENTINEL:
|
|
356
|
+
if val is not None or k not in optional_fields:
|
|
357
|
+
m[k] = val
|
|
358
|
+
|
|
359
|
+
return m
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
class CreateSpeechRequestBodyTypedDict(TypedDict):
|
|
363
|
+
r"""input"""
|
|
364
|
+
|
|
365
|
+
input: str
|
|
366
|
+
r"""The text to generate audio for. The maximum length is 4096 characters"""
|
|
367
|
+
model: str
|
|
368
|
+
r"""ID of the model to use"""
|
|
369
|
+
voice: str
|
|
370
|
+
r"""The voice to use.
|
|
371
|
+
|
|
372
|
+
Available voices for OpenAI
|
|
373
|
+
|
|
374
|
+
`alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`
|
|
375
|
+
|
|
376
|
+
Available voices for ElevenLabs
|
|
377
|
+
|
|
378
|
+
`aria`, `roger`, `sarah`, `laura`, `charlie`, `george`, `callum`, `river`, `liam`, `charlotte`, `alice`, `matilda`, `will`, `jessica`, `eric`, `chris`, `brian`, `daniel`, `lily`, `bill`
|
|
379
|
+
"""
|
|
380
|
+
response_format: NotRequired[CreateSpeechResponseFormat]
|
|
381
|
+
r"""The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. If a format is provided but not supported by the provider, the response will be in the default format. When the provided format is not supported by the provider, the response will be in the default format."""
|
|
382
|
+
speed: NotRequired[float]
|
|
383
|
+
r"""The speed of the generated audio."""
|
|
384
|
+
name: NotRequired[str]
|
|
385
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
386
|
+
fallbacks: NotRequired[List[CreateSpeechFallbacksTypedDict]]
|
|
387
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
388
|
+
retry: NotRequired[CreateSpeechRetryTypedDict]
|
|
389
|
+
r"""Retry configuration for the request"""
|
|
390
|
+
load_balancer: NotRequired[CreateSpeechLoadBalancerTypedDict]
|
|
391
|
+
r"""Load balancer configuration for the request."""
|
|
392
|
+
timeout: NotRequired[CreateSpeechTimeoutTypedDict]
|
|
393
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
394
|
+
orq: NotRequired[CreateSpeechOrqTypedDict]
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
class CreateSpeechRequestBody(BaseModel):
|
|
398
|
+
r"""input"""
|
|
399
|
+
|
|
400
|
+
input: str
|
|
401
|
+
r"""The text to generate audio for. The maximum length is 4096 characters"""
|
|
402
|
+
|
|
403
|
+
model: str
|
|
404
|
+
r"""ID of the model to use"""
|
|
405
|
+
|
|
406
|
+
voice: str
|
|
407
|
+
r"""The voice to use.
|
|
408
|
+
|
|
409
|
+
Available voices for OpenAI
|
|
410
|
+
|
|
411
|
+
`alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`
|
|
412
|
+
|
|
413
|
+
Available voices for ElevenLabs
|
|
414
|
+
|
|
415
|
+
`aria`, `roger`, `sarah`, `laura`, `charlie`, `george`, `callum`, `river`, `liam`, `charlotte`, `alice`, `matilda`, `will`, `jessica`, `eric`, `chris`, `brian`, `daniel`, `lily`, `bill`
|
|
416
|
+
"""
|
|
417
|
+
|
|
418
|
+
response_format: Optional[CreateSpeechResponseFormat] = "mp3"
|
|
419
|
+
r"""The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. If a format is provided but not supported by the provider, the response will be in the default format. When the provided format is not supported by the provider, the response will be in the default format."""
|
|
420
|
+
|
|
421
|
+
speed: Optional[float] = 1
|
|
422
|
+
r"""The speed of the generated audio."""
|
|
423
|
+
|
|
424
|
+
name: Optional[str] = None
|
|
425
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
426
|
+
|
|
427
|
+
fallbacks: Optional[List[CreateSpeechFallbacks]] = None
|
|
428
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
429
|
+
|
|
430
|
+
retry: Optional[CreateSpeechRetry] = None
|
|
431
|
+
r"""Retry configuration for the request"""
|
|
432
|
+
|
|
433
|
+
load_balancer: Optional[CreateSpeechLoadBalancer] = None
|
|
434
|
+
r"""Load balancer configuration for the request."""
|
|
435
|
+
|
|
436
|
+
timeout: Optional[CreateSpeechTimeout] = None
|
|
437
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
438
|
+
|
|
439
|
+
orq: Optional[CreateSpeechOrq] = None
|
|
440
|
+
|
|
441
|
+
@model_serializer(mode="wrap")
|
|
442
|
+
def serialize_model(self, handler):
|
|
443
|
+
optional_fields = set(
|
|
444
|
+
[
|
|
445
|
+
"response_format",
|
|
446
|
+
"speed",
|
|
447
|
+
"name",
|
|
448
|
+
"fallbacks",
|
|
449
|
+
"retry",
|
|
450
|
+
"load_balancer",
|
|
451
|
+
"timeout",
|
|
452
|
+
"orq",
|
|
453
|
+
]
|
|
454
|
+
)
|
|
455
|
+
serialized = handler(self)
|
|
456
|
+
m = {}
|
|
457
|
+
|
|
458
|
+
for n, f in type(self).model_fields.items():
|
|
459
|
+
k = f.alias or n
|
|
460
|
+
val = serialized.get(k)
|
|
461
|
+
|
|
462
|
+
if val != UNSET_SENTINEL:
|
|
463
|
+
if val is not None or k not in optional_fields:
|
|
464
|
+
m[k] = val
|
|
465
|
+
|
|
466
|
+
return m
|