orq-ai-sdk 4.2.0rc48__py3-none-any.whl → 4.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_hooks/globalhook.py +0 -1
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +438 -0
- orq_ai_sdk/contacts.py +43 -886
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +364 -0
- orq_ai_sdk/embeddings.py +344 -0
- orq_ai_sdk/generations.py +370 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +3839 -424
- orq_ai_sdk/models/conversationresponse.py +1 -1
- orq_ai_sdk/models/conversationwithmessagesresponse.py +1 -1
- orq_ai_sdk/models/createagentrequestop.py +768 -12
- orq_ai_sdk/models/createagentresponse.py +68 -2
- orq_ai_sdk/models/createchatcompletionop.py +538 -313
- orq_ai_sdk/models/createcompletionop.py +2078 -0
- orq_ai_sdk/models/createcontactop.py +5 -10
- orq_ai_sdk/models/createconversationop.py +1 -1
- orq_ai_sdk/models/createconversationresponseop.py +2 -2
- orq_ai_sdk/models/createdatasetitemop.py +4 -4
- orq_ai_sdk/models/createdatasetop.py +1 -1
- orq_ai_sdk/models/createdatasourceop.py +1 -1
- orq_ai_sdk/models/createembeddingop.py +579 -0
- orq_ai_sdk/models/createevalop.py +14 -14
- orq_ai_sdk/models/createidentityop.py +1 -1
- orq_ai_sdk/models/createimageeditop.py +715 -0
- orq_ai_sdk/models/createimageop.py +228 -82
- orq_ai_sdk/models/createimagevariationop.py +706 -0
- orq_ai_sdk/models/creatememoryop.py +4 -2
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +375 -6
- orq_ai_sdk/models/creatererankop.py +608 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +466 -0
- orq_ai_sdk/models/createtoolop.py +6 -6
- orq_ai_sdk/models/createtranscriptionop.py +732 -0
- orq_ai_sdk/models/createtranslationop.py +702 -0
- orq_ai_sdk/models/deploymentgetconfigop.py +17 -7
- orq_ai_sdk/models/deploymentsop.py +1 -0
- orq_ai_sdk/models/deploymentstreamop.py +7 -0
- orq_ai_sdk/models/filegetop.py +1 -1
- orq_ai_sdk/models/filelistop.py +1 -1
- orq_ai_sdk/models/fileuploadop.py +1 -1
- orq_ai_sdk/models/generateconversationnameop.py +1 -1
- orq_ai_sdk/models/getallmemoriesop.py +4 -2
- orq_ai_sdk/models/getallpromptsop.py +188 -3
- orq_ai_sdk/models/getalltoolsop.py +6 -6
- orq_ai_sdk/models/getevalsop.py +17 -17
- orq_ai_sdk/models/getonepromptop.py +188 -3
- orq_ai_sdk/models/getpromptversionop.py +188 -3
- orq_ai_sdk/models/invokedeploymentrequest.py +11 -4
- orq_ai_sdk/models/listagentsop.py +372 -0
- orq_ai_sdk/models/listdatasetdatapointsop.py +4 -4
- orq_ai_sdk/models/listdatasetsop.py +1 -1
- orq_ai_sdk/models/listdatasourcesop.py +1 -1
- orq_ai_sdk/models/listidentitiesop.py +1 -1
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +188 -3
- orq_ai_sdk/models/partdoneevent.py +1 -1
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +9 -3
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +1 -1
- orq_ai_sdk/models/responsedoneevent.py +14 -11
- orq_ai_sdk/models/retrieveagentrequestop.py +382 -0
- orq_ai_sdk/models/retrievedatapointop.py +4 -4
- orq_ai_sdk/models/retrievedatasetop.py +1 -1
- orq_ai_sdk/models/retrievedatasourceop.py +1 -1
- orq_ai_sdk/models/retrieveidentityop.py +1 -1
- orq_ai_sdk/models/retrievememoryop.py +4 -2
- orq_ai_sdk/models/retrievetoolop.py +6 -6
- orq_ai_sdk/models/runagentop.py +379 -9
- orq_ai_sdk/models/streamrunagentop.py +385 -9
- orq_ai_sdk/models/updateagentop.py +770 -12
- orq_ai_sdk/models/updateconversationop.py +1 -1
- orq_ai_sdk/models/updatedatapointop.py +4 -4
- orq_ai_sdk/models/updatedatasetop.py +1 -1
- orq_ai_sdk/models/updatedatasourceop.py +1 -1
- orq_ai_sdk/models/updateevalop.py +14 -14
- orq_ai_sdk/models/updateidentityop.py +1 -1
- orq_ai_sdk/models/updatememoryop.py +4 -2
- orq_ai_sdk/models/updatepromptop.py +375 -6
- orq_ai_sdk/models/updatetoolop.py +7 -7
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +666 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/rerank.py +330 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/speech.py +333 -0
- orq_ai_sdk/transcriptions.py +416 -0
- orq_ai_sdk/translations.py +384 -0
- orq_ai_sdk/variations.py +364 -0
- orq_ai_sdk-4.2.12.dist-info/METADATA +888 -0
- {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/RECORD +98 -75
- {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/WHEEL +1 -1
- orq_ai_sdk/models/deletecontactop.py +0 -44
- orq_ai_sdk/models/listcontactsop.py +0 -265
- orq_ai_sdk/models/retrievecontactop.py +0 -142
- orq_ai_sdk/models/updatecontactop.py +0 -233
- orq_ai_sdk-4.2.0rc48.dist-info/METADATA +0 -788
- {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,398 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from .basesdk import BaseSDK
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from orq_ai_sdk import models, utils
|
|
6
|
+
from orq_ai_sdk._hooks import HookContext
|
|
7
|
+
from orq_ai_sdk.models import createresponseop as models_createresponseop
|
|
8
|
+
from orq_ai_sdk.types import OptionalNullable, UNSET
|
|
9
|
+
from orq_ai_sdk.utils import eventstreaming, get_security_from_env
|
|
10
|
+
from orq_ai_sdk.utils.unmarshal_json_response import unmarshal_json_response
|
|
11
|
+
from typing import Dict, List, Mapping, Optional, Union
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CreateAcceptEnum(str, Enum):
|
|
15
|
+
APPLICATION_JSON = "application/json"
|
|
16
|
+
TEXT_EVENT_STREAM = "text/event-stream"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class OrqResponses(BaseSDK):
|
|
20
|
+
def create(
|
|
21
|
+
self,
|
|
22
|
+
*,
|
|
23
|
+
model: str,
|
|
24
|
+
input_: Union[
|
|
25
|
+
models_createresponseop.CreateResponseInput,
|
|
26
|
+
models_createresponseop.CreateResponseInputTypedDict,
|
|
27
|
+
],
|
|
28
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
29
|
+
temperature: OptionalNullable[float] = UNSET,
|
|
30
|
+
top_p: OptionalNullable[float] = UNSET,
|
|
31
|
+
previous_response_id: OptionalNullable[str] = UNSET,
|
|
32
|
+
instructions: OptionalNullable[str] = UNSET,
|
|
33
|
+
reasoning: OptionalNullable[
|
|
34
|
+
Union[
|
|
35
|
+
models_createresponseop.Reasoning,
|
|
36
|
+
models_createresponseop.ReasoningTypedDict,
|
|
37
|
+
]
|
|
38
|
+
] = UNSET,
|
|
39
|
+
max_output_tokens: OptionalNullable[int] = UNSET,
|
|
40
|
+
text: OptionalNullable[
|
|
41
|
+
Union[
|
|
42
|
+
models_createresponseop.CreateResponseText,
|
|
43
|
+
models_createresponseop.CreateResponseTextTypedDict,
|
|
44
|
+
]
|
|
45
|
+
] = UNSET,
|
|
46
|
+
include: OptionalNullable[List[models_createresponseop.Include]] = UNSET,
|
|
47
|
+
parallel_tool_calls: OptionalNullable[bool] = UNSET,
|
|
48
|
+
store: OptionalNullable[bool] = True,
|
|
49
|
+
tools: Optional[
|
|
50
|
+
Union[
|
|
51
|
+
List[models_createresponseop.CreateResponseTools],
|
|
52
|
+
List[models_createresponseop.CreateResponseToolsTypedDict],
|
|
53
|
+
]
|
|
54
|
+
] = None,
|
|
55
|
+
tool_choice: Optional[
|
|
56
|
+
Union[
|
|
57
|
+
models_createresponseop.CreateResponseToolChoice,
|
|
58
|
+
models_createresponseop.CreateResponseToolChoiceTypedDict,
|
|
59
|
+
]
|
|
60
|
+
] = None,
|
|
61
|
+
stream: Optional[bool] = False,
|
|
62
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
63
|
+
server_url: Optional[str] = None,
|
|
64
|
+
timeout_ms: Optional[int] = None,
|
|
65
|
+
accept_header_override: Optional[CreateAcceptEnum] = None,
|
|
66
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
67
|
+
) -> models.CreateResponseResponse:
|
|
68
|
+
r"""Create response
|
|
69
|
+
|
|
70
|
+
Creates a model response for the given input.
|
|
71
|
+
|
|
72
|
+
:param model: ID of the model to use. You can use the List models API to see all of your available models.
|
|
73
|
+
:param input: The actual user input(s) for the model. Can be a simple string, or an array of structured input items (messages, tool outputs) representing a conversation history or complex input.
|
|
74
|
+
:param metadata: Developer-defined key-value pairs that will be included in response objects
|
|
75
|
+
:param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
|
|
76
|
+
:param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
|
|
77
|
+
:param previous_response_id: The ID of a previous response to continue the conversation from. The model will have access to the previous response context.
|
|
78
|
+
:param instructions: Developer-provided instructions that the model should follow. Overwrites the default system message.
|
|
79
|
+
:param reasoning: Configuration for reasoning models
|
|
80
|
+
:param max_output_tokens: The maximum number of tokens that can be generated in the response
|
|
81
|
+
:param text:
|
|
82
|
+
:param include: Specifies which (potentially large) fields to include in the response. By default, the results of Code Interpreter and file searches are excluded. Available options:
|
|
83
|
+
- code_interpreter_call.outputs: Include the outputs of Code Interpreter tool calls
|
|
84
|
+
- computer_call_output.output.image_url: Include the image URLs from computer use tool calls
|
|
85
|
+
- file_search_call.results: Include the results of file search tool calls
|
|
86
|
+
- message.input_image.image_url: Include URLs of input images
|
|
87
|
+
- message.output_text.logprobs: Include log probabilities for output text (when logprobs is enabled)
|
|
88
|
+
- reasoning.encrypted_content: Include encrypted reasoning content for reasoning models
|
|
89
|
+
:param parallel_tool_calls: Whether to enable parallel function calling during tool use.
|
|
90
|
+
:param store: Whether to store this response for use in distillations or evals.
|
|
91
|
+
:param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
|
|
92
|
+
:param tool_choice: How the model should select which tool (or tools) to use when generating a response. Can be a string (`none`, `auto`, `required`) or an object to force a specific tool.
|
|
93
|
+
:param stream:
|
|
94
|
+
:param retries: Override the default retry configuration for this method
|
|
95
|
+
:param server_url: Override the default server URL for this method
|
|
96
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
97
|
+
:param accept_header_override: Override the default accept header for this method
|
|
98
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
99
|
+
"""
|
|
100
|
+
base_url = None
|
|
101
|
+
url_variables = None
|
|
102
|
+
if timeout_ms is None:
|
|
103
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
104
|
+
|
|
105
|
+
if timeout_ms is None:
|
|
106
|
+
timeout_ms = 600000
|
|
107
|
+
|
|
108
|
+
if server_url is not None:
|
|
109
|
+
base_url = server_url
|
|
110
|
+
else:
|
|
111
|
+
base_url = self._get_url(base_url, url_variables)
|
|
112
|
+
|
|
113
|
+
request = models.CreateResponseRequestBody(
|
|
114
|
+
model=model,
|
|
115
|
+
metadata=metadata,
|
|
116
|
+
temperature=temperature,
|
|
117
|
+
top_p=top_p,
|
|
118
|
+
previous_response_id=previous_response_id,
|
|
119
|
+
instructions=instructions,
|
|
120
|
+
reasoning=utils.get_pydantic_model(
|
|
121
|
+
reasoning, OptionalNullable[models.Reasoning]
|
|
122
|
+
),
|
|
123
|
+
max_output_tokens=max_output_tokens,
|
|
124
|
+
text=utils.get_pydantic_model(
|
|
125
|
+
text, OptionalNullable[models.CreateResponseText]
|
|
126
|
+
),
|
|
127
|
+
input=utils.get_pydantic_model(input_, models.CreateResponseInput),
|
|
128
|
+
include=include,
|
|
129
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
130
|
+
store=store,
|
|
131
|
+
tools=utils.get_pydantic_model(
|
|
132
|
+
tools, Optional[List[models.CreateResponseTools]]
|
|
133
|
+
),
|
|
134
|
+
tool_choice=utils.get_pydantic_model(
|
|
135
|
+
tool_choice, Optional[models.CreateResponseToolChoice]
|
|
136
|
+
),
|
|
137
|
+
stream=stream,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
req = self._build_request(
|
|
141
|
+
method="POST",
|
|
142
|
+
path="/v2/router/responses",
|
|
143
|
+
base_url=base_url,
|
|
144
|
+
url_variables=url_variables,
|
|
145
|
+
request=request,
|
|
146
|
+
request_body_required=True,
|
|
147
|
+
request_has_path_params=False,
|
|
148
|
+
request_has_query_params=True,
|
|
149
|
+
user_agent_header="user-agent",
|
|
150
|
+
accept_header_value=accept_header_override.value
|
|
151
|
+
if accept_header_override is not None
|
|
152
|
+
else "application/json;q=1, text/event-stream;q=0",
|
|
153
|
+
http_headers=http_headers,
|
|
154
|
+
security=self.sdk_configuration.security,
|
|
155
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
156
|
+
request, False, False, "json", models.CreateResponseRequestBody
|
|
157
|
+
),
|
|
158
|
+
allow_empty_value=None,
|
|
159
|
+
timeout_ms=timeout_ms,
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
if retries == UNSET:
|
|
163
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
164
|
+
retries = self.sdk_configuration.retry_config
|
|
165
|
+
|
|
166
|
+
retry_config = None
|
|
167
|
+
if isinstance(retries, utils.RetryConfig):
|
|
168
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
169
|
+
|
|
170
|
+
http_res = self.do_request(
|
|
171
|
+
hook_ctx=HookContext(
|
|
172
|
+
config=self.sdk_configuration,
|
|
173
|
+
base_url=base_url or "",
|
|
174
|
+
operation_id="createResponse",
|
|
175
|
+
oauth2_scopes=None,
|
|
176
|
+
security_source=get_security_from_env(
|
|
177
|
+
self.sdk_configuration.security, models.Security
|
|
178
|
+
),
|
|
179
|
+
),
|
|
180
|
+
request=req,
|
|
181
|
+
error_status_codes=["4XX", "5XX"],
|
|
182
|
+
stream=True,
|
|
183
|
+
retry_config=retry_config,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
187
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
188
|
+
return unmarshal_json_response(
|
|
189
|
+
models.CreateResponseResponseBody, http_res, http_res_text
|
|
190
|
+
)
|
|
191
|
+
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
192
|
+
return eventstreaming.EventStream(
|
|
193
|
+
http_res,
|
|
194
|
+
lambda raw: utils.unmarshal_json(
|
|
195
|
+
raw, models.CreateResponseRouterResponsesResponseBody
|
|
196
|
+
),
|
|
197
|
+
sentinel="[DONE]",
|
|
198
|
+
client_ref=self,
|
|
199
|
+
)
|
|
200
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
201
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
202
|
+
raise models.APIError("API error occurred", http_res, http_res_text)
|
|
203
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
204
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
205
|
+
raise models.APIError("API error occurred", http_res, http_res_text)
|
|
206
|
+
|
|
207
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
208
|
+
raise models.APIError("Unexpected response received", http_res, http_res_text)
|
|
209
|
+
|
|
210
|
+
async def create_async(
|
|
211
|
+
self,
|
|
212
|
+
*,
|
|
213
|
+
model: str,
|
|
214
|
+
input_: Union[
|
|
215
|
+
models_createresponseop.CreateResponseInput,
|
|
216
|
+
models_createresponseop.CreateResponseInputTypedDict,
|
|
217
|
+
],
|
|
218
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
219
|
+
temperature: OptionalNullable[float] = UNSET,
|
|
220
|
+
top_p: OptionalNullable[float] = UNSET,
|
|
221
|
+
previous_response_id: OptionalNullable[str] = UNSET,
|
|
222
|
+
instructions: OptionalNullable[str] = UNSET,
|
|
223
|
+
reasoning: OptionalNullable[
|
|
224
|
+
Union[
|
|
225
|
+
models_createresponseop.Reasoning,
|
|
226
|
+
models_createresponseop.ReasoningTypedDict,
|
|
227
|
+
]
|
|
228
|
+
] = UNSET,
|
|
229
|
+
max_output_tokens: OptionalNullable[int] = UNSET,
|
|
230
|
+
text: OptionalNullable[
|
|
231
|
+
Union[
|
|
232
|
+
models_createresponseop.CreateResponseText,
|
|
233
|
+
models_createresponseop.CreateResponseTextTypedDict,
|
|
234
|
+
]
|
|
235
|
+
] = UNSET,
|
|
236
|
+
include: OptionalNullable[List[models_createresponseop.Include]] = UNSET,
|
|
237
|
+
parallel_tool_calls: OptionalNullable[bool] = UNSET,
|
|
238
|
+
store: OptionalNullable[bool] = True,
|
|
239
|
+
tools: Optional[
|
|
240
|
+
Union[
|
|
241
|
+
List[models_createresponseop.CreateResponseTools],
|
|
242
|
+
List[models_createresponseop.CreateResponseToolsTypedDict],
|
|
243
|
+
]
|
|
244
|
+
] = None,
|
|
245
|
+
tool_choice: Optional[
|
|
246
|
+
Union[
|
|
247
|
+
models_createresponseop.CreateResponseToolChoice,
|
|
248
|
+
models_createresponseop.CreateResponseToolChoiceTypedDict,
|
|
249
|
+
]
|
|
250
|
+
] = None,
|
|
251
|
+
stream: Optional[bool] = False,
|
|
252
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
253
|
+
server_url: Optional[str] = None,
|
|
254
|
+
timeout_ms: Optional[int] = None,
|
|
255
|
+
accept_header_override: Optional[CreateAcceptEnum] = None,
|
|
256
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
257
|
+
) -> models.CreateResponseResponse:
|
|
258
|
+
r"""Create response
|
|
259
|
+
|
|
260
|
+
Creates a model response for the given input.
|
|
261
|
+
|
|
262
|
+
:param model: ID of the model to use. You can use the List models API to see all of your available models.
|
|
263
|
+
:param input: The actual user input(s) for the model. Can be a simple string, or an array of structured input items (messages, tool outputs) representing a conversation history or complex input.
|
|
264
|
+
:param metadata: Developer-defined key-value pairs that will be included in response objects
|
|
265
|
+
:param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
|
|
266
|
+
:param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
|
|
267
|
+
:param previous_response_id: The ID of a previous response to continue the conversation from. The model will have access to the previous response context.
|
|
268
|
+
:param instructions: Developer-provided instructions that the model should follow. Overwrites the default system message.
|
|
269
|
+
:param reasoning: Configuration for reasoning models
|
|
270
|
+
:param max_output_tokens: The maximum number of tokens that can be generated in the response
|
|
271
|
+
:param text:
|
|
272
|
+
:param include: Specifies which (potentially large) fields to include in the response. By default, the results of Code Interpreter and file searches are excluded. Available options:
|
|
273
|
+
- code_interpreter_call.outputs: Include the outputs of Code Interpreter tool calls
|
|
274
|
+
- computer_call_output.output.image_url: Include the image URLs from computer use tool calls
|
|
275
|
+
- file_search_call.results: Include the results of file search tool calls
|
|
276
|
+
- message.input_image.image_url: Include URLs of input images
|
|
277
|
+
- message.output_text.logprobs: Include log probabilities for output text (when logprobs is enabled)
|
|
278
|
+
- reasoning.encrypted_content: Include encrypted reasoning content for reasoning models
|
|
279
|
+
:param parallel_tool_calls: Whether to enable parallel function calling during tool use.
|
|
280
|
+
:param store: Whether to store this response for use in distillations or evals.
|
|
281
|
+
:param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
|
|
282
|
+
:param tool_choice: How the model should select which tool (or tools) to use when generating a response. Can be a string (`none`, `auto`, `required`) or an object to force a specific tool.
|
|
283
|
+
:param stream:
|
|
284
|
+
:param retries: Override the default retry configuration for this method
|
|
285
|
+
:param server_url: Override the default server URL for this method
|
|
286
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
287
|
+
:param accept_header_override: Override the default accept header for this method
|
|
288
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
289
|
+
"""
|
|
290
|
+
base_url = None
|
|
291
|
+
url_variables = None
|
|
292
|
+
if timeout_ms is None:
|
|
293
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
294
|
+
|
|
295
|
+
if timeout_ms is None:
|
|
296
|
+
timeout_ms = 600000
|
|
297
|
+
|
|
298
|
+
if server_url is not None:
|
|
299
|
+
base_url = server_url
|
|
300
|
+
else:
|
|
301
|
+
base_url = self._get_url(base_url, url_variables)
|
|
302
|
+
|
|
303
|
+
request = models.CreateResponseRequestBody(
|
|
304
|
+
model=model,
|
|
305
|
+
metadata=metadata,
|
|
306
|
+
temperature=temperature,
|
|
307
|
+
top_p=top_p,
|
|
308
|
+
previous_response_id=previous_response_id,
|
|
309
|
+
instructions=instructions,
|
|
310
|
+
reasoning=utils.get_pydantic_model(
|
|
311
|
+
reasoning, OptionalNullable[models.Reasoning]
|
|
312
|
+
),
|
|
313
|
+
max_output_tokens=max_output_tokens,
|
|
314
|
+
text=utils.get_pydantic_model(
|
|
315
|
+
text, OptionalNullable[models.CreateResponseText]
|
|
316
|
+
),
|
|
317
|
+
input=utils.get_pydantic_model(input_, models.CreateResponseInput),
|
|
318
|
+
include=include,
|
|
319
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
320
|
+
store=store,
|
|
321
|
+
tools=utils.get_pydantic_model(
|
|
322
|
+
tools, Optional[List[models.CreateResponseTools]]
|
|
323
|
+
),
|
|
324
|
+
tool_choice=utils.get_pydantic_model(
|
|
325
|
+
tool_choice, Optional[models.CreateResponseToolChoice]
|
|
326
|
+
),
|
|
327
|
+
stream=stream,
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
req = self._build_request_async(
|
|
331
|
+
method="POST",
|
|
332
|
+
path="/v2/router/responses",
|
|
333
|
+
base_url=base_url,
|
|
334
|
+
url_variables=url_variables,
|
|
335
|
+
request=request,
|
|
336
|
+
request_body_required=True,
|
|
337
|
+
request_has_path_params=False,
|
|
338
|
+
request_has_query_params=True,
|
|
339
|
+
user_agent_header="user-agent",
|
|
340
|
+
accept_header_value=accept_header_override.value
|
|
341
|
+
if accept_header_override is not None
|
|
342
|
+
else "application/json;q=1, text/event-stream;q=0",
|
|
343
|
+
http_headers=http_headers,
|
|
344
|
+
security=self.sdk_configuration.security,
|
|
345
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
346
|
+
request, False, False, "json", models.CreateResponseRequestBody
|
|
347
|
+
),
|
|
348
|
+
allow_empty_value=None,
|
|
349
|
+
timeout_ms=timeout_ms,
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
if retries == UNSET:
|
|
353
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
354
|
+
retries = self.sdk_configuration.retry_config
|
|
355
|
+
|
|
356
|
+
retry_config = None
|
|
357
|
+
if isinstance(retries, utils.RetryConfig):
|
|
358
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
359
|
+
|
|
360
|
+
http_res = await self.do_request_async(
|
|
361
|
+
hook_ctx=HookContext(
|
|
362
|
+
config=self.sdk_configuration,
|
|
363
|
+
base_url=base_url or "",
|
|
364
|
+
operation_id="createResponse",
|
|
365
|
+
oauth2_scopes=None,
|
|
366
|
+
security_source=get_security_from_env(
|
|
367
|
+
self.sdk_configuration.security, models.Security
|
|
368
|
+
),
|
|
369
|
+
),
|
|
370
|
+
request=req,
|
|
371
|
+
error_status_codes=["4XX", "5XX"],
|
|
372
|
+
stream=True,
|
|
373
|
+
retry_config=retry_config,
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
377
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
378
|
+
return unmarshal_json_response(
|
|
379
|
+
models.CreateResponseResponseBody, http_res, http_res_text
|
|
380
|
+
)
|
|
381
|
+
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
382
|
+
return eventstreaming.EventStreamAsync(
|
|
383
|
+
http_res,
|
|
384
|
+
lambda raw: utils.unmarshal_json(
|
|
385
|
+
raw, models.CreateResponseRouterResponsesResponseBody
|
|
386
|
+
),
|
|
387
|
+
sentinel="[DONE]",
|
|
388
|
+
client_ref=self,
|
|
389
|
+
)
|
|
390
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
391
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
392
|
+
raise models.APIError("API error occurred", http_res, http_res_text)
|
|
393
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
394
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
395
|
+
raise models.APIError("API error occurred", http_res, http_res_text)
|
|
396
|
+
|
|
397
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
398
|
+
raise models.APIError("Unexpected response received", http_res, http_res_text)
|