mistralai 1.7.1__py3-none-any.whl → 1.8.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_version.py +2 -2
- mistralai/beta.py +22 -0
- mistralai/conversations.py +2660 -0
- mistralai/embeddings.py +12 -0
- mistralai/extra/__init__.py +10 -2
- mistralai/extra/exceptions.py +14 -0
- mistralai/extra/mcp/__init__.py +0 -0
- mistralai/extra/mcp/auth.py +166 -0
- mistralai/extra/mcp/base.py +155 -0
- mistralai/extra/mcp/sse.py +165 -0
- mistralai/extra/mcp/stdio.py +22 -0
- mistralai/extra/run/__init__.py +0 -0
- mistralai/extra/run/context.py +295 -0
- mistralai/extra/run/result.py +212 -0
- mistralai/extra/run/tools.py +225 -0
- mistralai/extra/run/utils.py +36 -0
- mistralai/extra/tests/test_struct_chat.py +1 -1
- mistralai/mistral_agents.py +1160 -0
- mistralai/models/__init__.py +472 -1
- mistralai/models/agent.py +129 -0
- mistralai/models/agentconversation.py +71 -0
- mistralai/models/agentcreationrequest.py +109 -0
- mistralai/models/agenthandoffdoneevent.py +33 -0
- mistralai/models/agenthandoffentry.py +75 -0
- mistralai/models/agenthandoffstartedevent.py +33 -0
- mistralai/models/agents_api_v1_agents_getop.py +16 -0
- mistralai/models/agents_api_v1_agents_listop.py +24 -0
- mistralai/models/agents_api_v1_agents_update_versionop.py +21 -0
- mistralai/models/agents_api_v1_agents_updateop.py +23 -0
- mistralai/models/agents_api_v1_conversations_append_streamop.py +28 -0
- mistralai/models/agents_api_v1_conversations_appendop.py +28 -0
- mistralai/models/agents_api_v1_conversations_getop.py +33 -0
- mistralai/models/agents_api_v1_conversations_historyop.py +16 -0
- mistralai/models/agents_api_v1_conversations_listop.py +37 -0
- mistralai/models/agents_api_v1_conversations_messagesop.py +16 -0
- mistralai/models/agents_api_v1_conversations_restart_streamop.py +26 -0
- mistralai/models/agents_api_v1_conversations_restartop.py +26 -0
- mistralai/models/agentupdaterequest.py +111 -0
- mistralai/models/builtinconnectors.py +13 -0
- mistralai/models/chatcompletionresponse.py +6 -6
- mistralai/models/codeinterpretertool.py +17 -0
- mistralai/models/completionargs.py +100 -0
- mistralai/models/completionargsstop.py +13 -0
- mistralai/models/completionjobout.py +3 -3
- mistralai/models/conversationappendrequest.py +35 -0
- mistralai/models/conversationappendstreamrequest.py +37 -0
- mistralai/models/conversationevents.py +72 -0
- mistralai/models/conversationhistory.py +58 -0
- mistralai/models/conversationinputs.py +14 -0
- mistralai/models/conversationmessages.py +28 -0
- mistralai/models/conversationrequest.py +133 -0
- mistralai/models/conversationresponse.py +51 -0
- mistralai/models/conversationrestartrequest.py +42 -0
- mistralai/models/conversationrestartstreamrequest.py +44 -0
- mistralai/models/conversationstreamrequest.py +135 -0
- mistralai/models/conversationusageinfo.py +63 -0
- mistralai/models/documentlibrarytool.py +22 -0
- mistralai/models/embeddingdtype.py +7 -0
- mistralai/models/embeddingrequest.py +43 -3
- mistralai/models/fimcompletionresponse.py +6 -6
- mistralai/models/functioncallentry.py +76 -0
- mistralai/models/functioncallentryarguments.py +15 -0
- mistralai/models/functioncallevent.py +36 -0
- mistralai/models/functionresultentry.py +69 -0
- mistralai/models/functiontool.py +21 -0
- mistralai/models/imagegenerationtool.py +17 -0
- mistralai/models/inputentries.py +18 -0
- mistralai/models/messageentries.py +18 -0
- mistralai/models/messageinputcontentchunks.py +26 -0
- mistralai/models/messageinputentry.py +89 -0
- mistralai/models/messageoutputcontentchunks.py +30 -0
- mistralai/models/messageoutputentry.py +100 -0
- mistralai/models/messageoutputevent.py +93 -0
- mistralai/models/modelconversation.py +127 -0
- mistralai/models/outputcontentchunks.py +30 -0
- mistralai/models/responsedoneevent.py +25 -0
- mistralai/models/responseerrorevent.py +27 -0
- mistralai/models/responsestartedevent.py +24 -0
- mistralai/models/ssetypes.py +18 -0
- mistralai/models/toolexecutiondoneevent.py +34 -0
- mistralai/models/toolexecutionentry.py +70 -0
- mistralai/models/toolexecutionstartedevent.py +31 -0
- mistralai/models/toolfilechunk.py +61 -0
- mistralai/models/toolreferencechunk.py +61 -0
- mistralai/models/websearchpremiumtool.py +17 -0
- mistralai/models/websearchtool.py +17 -0
- mistralai/sdk.py +3 -0
- {mistralai-1.7.1.dist-info → mistralai-1.8.1.dist-info}/METADATA +42 -7
- {mistralai-1.7.1.dist-info → mistralai-1.8.1.dist-info}/RECORD +91 -14
- {mistralai-1.7.1.dist-info → mistralai-1.8.1.dist-info}/LICENSE +0 -0
- {mistralai-1.7.1.dist-info → mistralai-1.8.1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,2660 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from .basesdk import BaseSDK
|
|
4
|
+
from mistralai import models, utils
|
|
5
|
+
from mistralai._hooks import HookContext
|
|
6
|
+
from mistralai.types import OptionalNullable, UNSET
|
|
7
|
+
from mistralai.utils import eventstreaming, get_security_from_env
|
|
8
|
+
from typing import Any, List, Mapping, Optional, Union
|
|
9
|
+
|
|
10
|
+
# region imports
|
|
11
|
+
import typing
|
|
12
|
+
from typing import AsyncGenerator
|
|
13
|
+
import logging
|
|
14
|
+
from collections import defaultdict
|
|
15
|
+
|
|
16
|
+
from mistralai.models import (
|
|
17
|
+
ResponseStartedEvent,
|
|
18
|
+
ConversationEventsData,
|
|
19
|
+
InputEntries,
|
|
20
|
+
)
|
|
21
|
+
from mistralai.extra.run.result import (
|
|
22
|
+
RunResult,
|
|
23
|
+
RunResultEvents,
|
|
24
|
+
FunctionResultEvent,
|
|
25
|
+
reconstitue_entries,
|
|
26
|
+
)
|
|
27
|
+
from mistralai.extra.run.utils import run_requirements
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
if typing.TYPE_CHECKING:
|
|
32
|
+
from mistralai.extra.run.context import RunContext
|
|
33
|
+
|
|
34
|
+
# endregion imports
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class Conversations(BaseSDK):
|
|
38
|
+
r"""(beta) Converstations API"""
|
|
39
|
+
|
|
40
|
+
# region sdk-class-body
|
|
41
|
+
# Custom run code allowing client side execution of code
|
|
42
|
+
|
|
43
|
+
@run_requirements
|
|
44
|
+
async def run_async(
|
|
45
|
+
self,
|
|
46
|
+
run_ctx: "RunContext",
|
|
47
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
48
|
+
instructions: OptionalNullable[str] = UNSET,
|
|
49
|
+
tools: OptionalNullable[
|
|
50
|
+
Union[List[models.Tools], List[models.ToolsTypedDict]]
|
|
51
|
+
] = UNSET,
|
|
52
|
+
completion_args: OptionalNullable[
|
|
53
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
54
|
+
] = UNSET,
|
|
55
|
+
name: OptionalNullable[str] = UNSET,
|
|
56
|
+
description: OptionalNullable[str] = UNSET,
|
|
57
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
58
|
+
server_url: Optional[str] = None,
|
|
59
|
+
timeout_ms: Optional[int] = None,
|
|
60
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
61
|
+
) -> RunResult:
|
|
62
|
+
"""Run a conversation with the given inputs and context.
|
|
63
|
+
|
|
64
|
+
The execution of a run will only stop when no required local execution can be done."""
|
|
65
|
+
from mistralai.beta import Beta
|
|
66
|
+
from mistralai.extra.run.context import _validate_run
|
|
67
|
+
from mistralai.extra.run.tools import get_function_calls
|
|
68
|
+
|
|
69
|
+
req, run_result, input_entries = await _validate_run(
|
|
70
|
+
beta_client=Beta(self.sdk_configuration),
|
|
71
|
+
run_ctx=run_ctx,
|
|
72
|
+
inputs=inputs,
|
|
73
|
+
instructions=instructions,
|
|
74
|
+
tools=tools,
|
|
75
|
+
completion_args=completion_args,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
while True:
|
|
79
|
+
if run_ctx.conversation_id is None:
|
|
80
|
+
res = await self.start_async(
|
|
81
|
+
inputs=input_entries,
|
|
82
|
+
http_headers=http_headers,
|
|
83
|
+
name=name,
|
|
84
|
+
description=description,
|
|
85
|
+
retries=retries,
|
|
86
|
+
server_url=server_url,
|
|
87
|
+
timeout_ms=timeout_ms,
|
|
88
|
+
**req,
|
|
89
|
+
)
|
|
90
|
+
run_result.conversation_id = res.conversation_id
|
|
91
|
+
run_ctx.conversation_id = res.conversation_id
|
|
92
|
+
logger.info(
|
|
93
|
+
f"Started Run with conversation with id {res.conversation_id}"
|
|
94
|
+
)
|
|
95
|
+
else:
|
|
96
|
+
res = await self.append_async(
|
|
97
|
+
conversation_id=run_ctx.conversation_id,
|
|
98
|
+
inputs=input_entries,
|
|
99
|
+
retries=retries,
|
|
100
|
+
server_url=server_url,
|
|
101
|
+
timeout_ms=timeout_ms,
|
|
102
|
+
)
|
|
103
|
+
run_ctx.request_count += 1
|
|
104
|
+
run_result.output_entries.extend(res.outputs)
|
|
105
|
+
fcalls = get_function_calls(res.outputs)
|
|
106
|
+
if not fcalls:
|
|
107
|
+
logger.debug("No more function calls to execute")
|
|
108
|
+
break
|
|
109
|
+
else:
|
|
110
|
+
fresults = await run_ctx.execute_function_calls(fcalls)
|
|
111
|
+
run_result.output_entries.extend(fresults)
|
|
112
|
+
input_entries = typing.cast(list[InputEntries], fresults)
|
|
113
|
+
return run_result
|
|
114
|
+
|
|
115
|
+
@run_requirements
|
|
116
|
+
async def run_stream_async(
|
|
117
|
+
self,
|
|
118
|
+
run_ctx: "RunContext",
|
|
119
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
120
|
+
instructions: OptionalNullable[str] = UNSET,
|
|
121
|
+
tools: OptionalNullable[
|
|
122
|
+
Union[List[models.Tools], List[models.ToolsTypedDict]]
|
|
123
|
+
] = UNSET,
|
|
124
|
+
completion_args: OptionalNullable[
|
|
125
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
126
|
+
] = UNSET,
|
|
127
|
+
name: OptionalNullable[str] = UNSET,
|
|
128
|
+
description: OptionalNullable[str] = UNSET,
|
|
129
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
130
|
+
server_url: Optional[str] = None,
|
|
131
|
+
timeout_ms: Optional[int] = None,
|
|
132
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
133
|
+
) -> AsyncGenerator[Union[RunResultEvents, RunResult], None]:
|
|
134
|
+
"""Similar to `run_async` but returns a generator which streams events.
|
|
135
|
+
|
|
136
|
+
The last streamed object is the RunResult object which summarises what happened in the run."""
|
|
137
|
+
from mistralai.beta import Beta
|
|
138
|
+
from mistralai.extra.run.context import _validate_run
|
|
139
|
+
from mistralai.extra.run.tools import get_function_calls
|
|
140
|
+
|
|
141
|
+
req, run_result, input_entries = await _validate_run(
|
|
142
|
+
beta_client=Beta(self.sdk_configuration),
|
|
143
|
+
run_ctx=run_ctx,
|
|
144
|
+
inputs=inputs,
|
|
145
|
+
instructions=instructions,
|
|
146
|
+
tools=tools,
|
|
147
|
+
completion_args=completion_args,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
async def run_generator() -> (
|
|
151
|
+
AsyncGenerator[Union[RunResultEvents, RunResult], None]
|
|
152
|
+
):
|
|
153
|
+
current_entries = input_entries
|
|
154
|
+
while True:
|
|
155
|
+
received_event_tracker: defaultdict[
|
|
156
|
+
int, list[ConversationEventsData]
|
|
157
|
+
] = defaultdict(list)
|
|
158
|
+
if run_ctx.conversation_id is None:
|
|
159
|
+
res = await self.start_stream_async(
|
|
160
|
+
inputs=current_entries,
|
|
161
|
+
http_headers=http_headers,
|
|
162
|
+
name=name,
|
|
163
|
+
description=description,
|
|
164
|
+
retries=retries,
|
|
165
|
+
server_url=server_url,
|
|
166
|
+
timeout_ms=timeout_ms,
|
|
167
|
+
**req,
|
|
168
|
+
)
|
|
169
|
+
else:
|
|
170
|
+
res = await self.append_stream_async(
|
|
171
|
+
conversation_id=run_ctx.conversation_id,
|
|
172
|
+
inputs=current_entries,
|
|
173
|
+
retries=retries,
|
|
174
|
+
server_url=server_url,
|
|
175
|
+
timeout_ms=timeout_ms,
|
|
176
|
+
)
|
|
177
|
+
async for event in res:
|
|
178
|
+
if (
|
|
179
|
+
isinstance(event.data, ResponseStartedEvent)
|
|
180
|
+
and run_ctx.conversation_id is None
|
|
181
|
+
):
|
|
182
|
+
run_result.conversation_id = event.data.conversation_id
|
|
183
|
+
run_ctx.conversation_id = event.data.conversation_id
|
|
184
|
+
logger.info(
|
|
185
|
+
f"Started Run with conversation with id {run_ctx.conversation_id}"
|
|
186
|
+
)
|
|
187
|
+
if (
|
|
188
|
+
output_index := getattr(event.data, "output_index", None)
|
|
189
|
+
) is not None:
|
|
190
|
+
received_event_tracker[output_index].append(event.data)
|
|
191
|
+
yield typing.cast(RunResultEvents, event)
|
|
192
|
+
run_ctx.request_count += 1
|
|
193
|
+
outputs = reconstitue_entries(received_event_tracker)
|
|
194
|
+
run_result.output_entries.extend(outputs)
|
|
195
|
+
fcalls = get_function_calls(outputs)
|
|
196
|
+
if not fcalls:
|
|
197
|
+
logger.debug("No more function calls to execute")
|
|
198
|
+
break
|
|
199
|
+
else:
|
|
200
|
+
fresults = await run_ctx.execute_function_calls(fcalls)
|
|
201
|
+
run_result.output_entries.extend(fresults)
|
|
202
|
+
for fresult in fresults:
|
|
203
|
+
yield RunResultEvents(
|
|
204
|
+
event="function.result",
|
|
205
|
+
data=FunctionResultEvent(
|
|
206
|
+
type="function.result",
|
|
207
|
+
result=fresult.result,
|
|
208
|
+
tool_call_id=fresult.tool_call_id,
|
|
209
|
+
),
|
|
210
|
+
)
|
|
211
|
+
current_entries = typing.cast(list[InputEntries], fresults)
|
|
212
|
+
yield run_result
|
|
213
|
+
|
|
214
|
+
return run_generator()
|
|
215
|
+
|
|
216
|
+
# endregion sdk-class-body
|
|
217
|
+
|
|
218
|
+
def start(
|
|
219
|
+
self,
|
|
220
|
+
*,
|
|
221
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
222
|
+
stream: Optional[bool] = False,
|
|
223
|
+
store: OptionalNullable[bool] = UNSET,
|
|
224
|
+
handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET,
|
|
225
|
+
instructions: OptionalNullable[str] = UNSET,
|
|
226
|
+
tools: OptionalNullable[
|
|
227
|
+
Union[List[models.Tools], List[models.ToolsTypedDict]]
|
|
228
|
+
] = UNSET,
|
|
229
|
+
completion_args: OptionalNullable[
|
|
230
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
231
|
+
] = UNSET,
|
|
232
|
+
name: OptionalNullable[str] = UNSET,
|
|
233
|
+
description: OptionalNullable[str] = UNSET,
|
|
234
|
+
agent_id: OptionalNullable[str] = UNSET,
|
|
235
|
+
model: OptionalNullable[str] = UNSET,
|
|
236
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
237
|
+
server_url: Optional[str] = None,
|
|
238
|
+
timeout_ms: Optional[int] = None,
|
|
239
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
240
|
+
) -> models.ConversationResponse:
|
|
241
|
+
r"""Create a conversation and append entries to it.
|
|
242
|
+
|
|
243
|
+
Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation.
|
|
244
|
+
|
|
245
|
+
:param inputs:
|
|
246
|
+
:param stream:
|
|
247
|
+
:param store:
|
|
248
|
+
:param handoff_execution:
|
|
249
|
+
:param instructions:
|
|
250
|
+
:param tools:
|
|
251
|
+
:param completion_args:
|
|
252
|
+
:param name:
|
|
253
|
+
:param description:
|
|
254
|
+
:param agent_id:
|
|
255
|
+
:param model:
|
|
256
|
+
:param retries: Override the default retry configuration for this method
|
|
257
|
+
:param server_url: Override the default server URL for this method
|
|
258
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
259
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
260
|
+
"""
|
|
261
|
+
base_url = None
|
|
262
|
+
url_variables = None
|
|
263
|
+
if timeout_ms is None:
|
|
264
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
265
|
+
|
|
266
|
+
if server_url is not None:
|
|
267
|
+
base_url = server_url
|
|
268
|
+
else:
|
|
269
|
+
base_url = self._get_url(base_url, url_variables)
|
|
270
|
+
|
|
271
|
+
request = models.ConversationRequest(
|
|
272
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
273
|
+
stream=stream,
|
|
274
|
+
store=store,
|
|
275
|
+
handoff_execution=handoff_execution,
|
|
276
|
+
instructions=instructions,
|
|
277
|
+
tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]),
|
|
278
|
+
completion_args=utils.get_pydantic_model(
|
|
279
|
+
completion_args, OptionalNullable[models.CompletionArgs]
|
|
280
|
+
),
|
|
281
|
+
name=name,
|
|
282
|
+
description=description,
|
|
283
|
+
agent_id=agent_id,
|
|
284
|
+
model=model,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
req = self._build_request(
|
|
288
|
+
method="POST",
|
|
289
|
+
path="/v1/conversations",
|
|
290
|
+
base_url=base_url,
|
|
291
|
+
url_variables=url_variables,
|
|
292
|
+
request=request,
|
|
293
|
+
request_body_required=True,
|
|
294
|
+
request_has_path_params=False,
|
|
295
|
+
request_has_query_params=True,
|
|
296
|
+
user_agent_header="user-agent",
|
|
297
|
+
accept_header_value="application/json",
|
|
298
|
+
http_headers=http_headers,
|
|
299
|
+
security=self.sdk_configuration.security,
|
|
300
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
301
|
+
request, False, False, "json", models.ConversationRequest
|
|
302
|
+
),
|
|
303
|
+
timeout_ms=timeout_ms,
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
if retries == UNSET:
|
|
307
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
308
|
+
retries = self.sdk_configuration.retry_config
|
|
309
|
+
|
|
310
|
+
retry_config = None
|
|
311
|
+
if isinstance(retries, utils.RetryConfig):
|
|
312
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
313
|
+
|
|
314
|
+
http_res = self.do_request(
|
|
315
|
+
hook_ctx=HookContext(
|
|
316
|
+
base_url=base_url or "",
|
|
317
|
+
operation_id="agents_api_v1_conversations_start",
|
|
318
|
+
oauth2_scopes=[],
|
|
319
|
+
security_source=get_security_from_env(
|
|
320
|
+
self.sdk_configuration.security, models.Security
|
|
321
|
+
),
|
|
322
|
+
),
|
|
323
|
+
request=req,
|
|
324
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
325
|
+
retry_config=retry_config,
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
response_data: Any = None
|
|
329
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
330
|
+
return utils.unmarshal_json(http_res.text, models.ConversationResponse)
|
|
331
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
332
|
+
response_data = utils.unmarshal_json(
|
|
333
|
+
http_res.text, models.HTTPValidationErrorData
|
|
334
|
+
)
|
|
335
|
+
raise models.HTTPValidationError(data=response_data)
|
|
336
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
337
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
338
|
+
raise models.SDKError(
|
|
339
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
340
|
+
)
|
|
341
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
342
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
343
|
+
raise models.SDKError(
|
|
344
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
content_type = http_res.headers.get("Content-Type")
|
|
348
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
349
|
+
raise models.SDKError(
|
|
350
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
351
|
+
http_res.status_code,
|
|
352
|
+
http_res_text,
|
|
353
|
+
http_res,
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
async def start_async(
|
|
357
|
+
self,
|
|
358
|
+
*,
|
|
359
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
360
|
+
stream: Optional[bool] = False,
|
|
361
|
+
store: OptionalNullable[bool] = UNSET,
|
|
362
|
+
handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET,
|
|
363
|
+
instructions: OptionalNullable[str] = UNSET,
|
|
364
|
+
tools: OptionalNullable[
|
|
365
|
+
Union[List[models.Tools], List[models.ToolsTypedDict]]
|
|
366
|
+
] = UNSET,
|
|
367
|
+
completion_args: OptionalNullable[
|
|
368
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
369
|
+
] = UNSET,
|
|
370
|
+
name: OptionalNullable[str] = UNSET,
|
|
371
|
+
description: OptionalNullable[str] = UNSET,
|
|
372
|
+
agent_id: OptionalNullable[str] = UNSET,
|
|
373
|
+
model: OptionalNullable[str] = UNSET,
|
|
374
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
375
|
+
server_url: Optional[str] = None,
|
|
376
|
+
timeout_ms: Optional[int] = None,
|
|
377
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
378
|
+
) -> models.ConversationResponse:
|
|
379
|
+
r"""Create a conversation and append entries to it.
|
|
380
|
+
|
|
381
|
+
Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation.
|
|
382
|
+
|
|
383
|
+
:param inputs:
|
|
384
|
+
:param stream:
|
|
385
|
+
:param store:
|
|
386
|
+
:param handoff_execution:
|
|
387
|
+
:param instructions:
|
|
388
|
+
:param tools:
|
|
389
|
+
:param completion_args:
|
|
390
|
+
:param name:
|
|
391
|
+
:param description:
|
|
392
|
+
:param agent_id:
|
|
393
|
+
:param model:
|
|
394
|
+
:param retries: Override the default retry configuration for this method
|
|
395
|
+
:param server_url: Override the default server URL for this method
|
|
396
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
397
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
398
|
+
"""
|
|
399
|
+
base_url = None
|
|
400
|
+
url_variables = None
|
|
401
|
+
if timeout_ms is None:
|
|
402
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
403
|
+
|
|
404
|
+
if server_url is not None:
|
|
405
|
+
base_url = server_url
|
|
406
|
+
else:
|
|
407
|
+
base_url = self._get_url(base_url, url_variables)
|
|
408
|
+
|
|
409
|
+
request = models.ConversationRequest(
|
|
410
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
411
|
+
stream=stream,
|
|
412
|
+
store=store,
|
|
413
|
+
handoff_execution=handoff_execution,
|
|
414
|
+
instructions=instructions,
|
|
415
|
+
tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]),
|
|
416
|
+
completion_args=utils.get_pydantic_model(
|
|
417
|
+
completion_args, OptionalNullable[models.CompletionArgs]
|
|
418
|
+
),
|
|
419
|
+
name=name,
|
|
420
|
+
description=description,
|
|
421
|
+
agent_id=agent_id,
|
|
422
|
+
model=model,
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
req = self._build_request_async(
|
|
426
|
+
method="POST",
|
|
427
|
+
path="/v1/conversations",
|
|
428
|
+
base_url=base_url,
|
|
429
|
+
url_variables=url_variables,
|
|
430
|
+
request=request,
|
|
431
|
+
request_body_required=True,
|
|
432
|
+
request_has_path_params=False,
|
|
433
|
+
request_has_query_params=True,
|
|
434
|
+
user_agent_header="user-agent",
|
|
435
|
+
accept_header_value="application/json",
|
|
436
|
+
http_headers=http_headers,
|
|
437
|
+
security=self.sdk_configuration.security,
|
|
438
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
439
|
+
request, False, False, "json", models.ConversationRequest
|
|
440
|
+
),
|
|
441
|
+
timeout_ms=timeout_ms,
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
if retries == UNSET:
|
|
445
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
446
|
+
retries = self.sdk_configuration.retry_config
|
|
447
|
+
|
|
448
|
+
retry_config = None
|
|
449
|
+
if isinstance(retries, utils.RetryConfig):
|
|
450
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
451
|
+
|
|
452
|
+
http_res = await self.do_request_async(
|
|
453
|
+
hook_ctx=HookContext(
|
|
454
|
+
base_url=base_url or "",
|
|
455
|
+
operation_id="agents_api_v1_conversations_start",
|
|
456
|
+
oauth2_scopes=[],
|
|
457
|
+
security_source=get_security_from_env(
|
|
458
|
+
self.sdk_configuration.security, models.Security
|
|
459
|
+
),
|
|
460
|
+
),
|
|
461
|
+
request=req,
|
|
462
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
463
|
+
retry_config=retry_config,
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
response_data: Any = None
|
|
467
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
468
|
+
return utils.unmarshal_json(http_res.text, models.ConversationResponse)
|
|
469
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
470
|
+
response_data = utils.unmarshal_json(
|
|
471
|
+
http_res.text, models.HTTPValidationErrorData
|
|
472
|
+
)
|
|
473
|
+
raise models.HTTPValidationError(data=response_data)
|
|
474
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
475
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
476
|
+
raise models.SDKError(
|
|
477
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
478
|
+
)
|
|
479
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
480
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
481
|
+
raise models.SDKError(
|
|
482
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
content_type = http_res.headers.get("Content-Type")
|
|
486
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
487
|
+
raise models.SDKError(
|
|
488
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
489
|
+
http_res.status_code,
|
|
490
|
+
http_res_text,
|
|
491
|
+
http_res,
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
def list(
|
|
495
|
+
self,
|
|
496
|
+
*,
|
|
497
|
+
page: Optional[int] = 0,
|
|
498
|
+
page_size: Optional[int] = 100,
|
|
499
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
500
|
+
server_url: Optional[str] = None,
|
|
501
|
+
timeout_ms: Optional[int] = None,
|
|
502
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
503
|
+
) -> List[models.ResponseBody]:
|
|
504
|
+
r"""List all created conversations.
|
|
505
|
+
|
|
506
|
+
Retrieve a list of conversation entities sorted by creation time.
|
|
507
|
+
|
|
508
|
+
:param page:
|
|
509
|
+
:param page_size:
|
|
510
|
+
:param retries: Override the default retry configuration for this method
|
|
511
|
+
:param server_url: Override the default server URL for this method
|
|
512
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
513
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
514
|
+
"""
|
|
515
|
+
base_url = None
|
|
516
|
+
url_variables = None
|
|
517
|
+
if timeout_ms is None:
|
|
518
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
519
|
+
|
|
520
|
+
if server_url is not None:
|
|
521
|
+
base_url = server_url
|
|
522
|
+
else:
|
|
523
|
+
base_url = self._get_url(base_url, url_variables)
|
|
524
|
+
|
|
525
|
+
request = models.AgentsAPIV1ConversationsListRequest(
|
|
526
|
+
page=page,
|
|
527
|
+
page_size=page_size,
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
req = self._build_request(
|
|
531
|
+
method="GET",
|
|
532
|
+
path="/v1/conversations",
|
|
533
|
+
base_url=base_url,
|
|
534
|
+
url_variables=url_variables,
|
|
535
|
+
request=request,
|
|
536
|
+
request_body_required=False,
|
|
537
|
+
request_has_path_params=False,
|
|
538
|
+
request_has_query_params=True,
|
|
539
|
+
user_agent_header="user-agent",
|
|
540
|
+
accept_header_value="application/json",
|
|
541
|
+
http_headers=http_headers,
|
|
542
|
+
security=self.sdk_configuration.security,
|
|
543
|
+
timeout_ms=timeout_ms,
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
if retries == UNSET:
|
|
547
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
548
|
+
retries = self.sdk_configuration.retry_config
|
|
549
|
+
|
|
550
|
+
retry_config = None
|
|
551
|
+
if isinstance(retries, utils.RetryConfig):
|
|
552
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
553
|
+
|
|
554
|
+
http_res = self.do_request(
|
|
555
|
+
hook_ctx=HookContext(
|
|
556
|
+
base_url=base_url or "",
|
|
557
|
+
operation_id="agents_api_v1_conversations_list",
|
|
558
|
+
oauth2_scopes=[],
|
|
559
|
+
security_source=get_security_from_env(
|
|
560
|
+
self.sdk_configuration.security, models.Security
|
|
561
|
+
),
|
|
562
|
+
),
|
|
563
|
+
request=req,
|
|
564
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
565
|
+
retry_config=retry_config,
|
|
566
|
+
)
|
|
567
|
+
|
|
568
|
+
response_data: Any = None
|
|
569
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
570
|
+
return utils.unmarshal_json(http_res.text, List[models.ResponseBody])
|
|
571
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
572
|
+
response_data = utils.unmarshal_json(
|
|
573
|
+
http_res.text, models.HTTPValidationErrorData
|
|
574
|
+
)
|
|
575
|
+
raise models.HTTPValidationError(data=response_data)
|
|
576
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
577
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
578
|
+
raise models.SDKError(
|
|
579
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
580
|
+
)
|
|
581
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
582
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
583
|
+
raise models.SDKError(
|
|
584
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
585
|
+
)
|
|
586
|
+
|
|
587
|
+
content_type = http_res.headers.get("Content-Type")
|
|
588
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
589
|
+
raise models.SDKError(
|
|
590
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
591
|
+
http_res.status_code,
|
|
592
|
+
http_res_text,
|
|
593
|
+
http_res,
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
async def list_async(
|
|
597
|
+
self,
|
|
598
|
+
*,
|
|
599
|
+
page: Optional[int] = 0,
|
|
600
|
+
page_size: Optional[int] = 100,
|
|
601
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
602
|
+
server_url: Optional[str] = None,
|
|
603
|
+
timeout_ms: Optional[int] = None,
|
|
604
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
605
|
+
) -> List[models.ResponseBody]:
|
|
606
|
+
r"""List all created conversations.
|
|
607
|
+
|
|
608
|
+
Retrieve a list of conversation entities sorted by creation time.
|
|
609
|
+
|
|
610
|
+
:param page:
|
|
611
|
+
:param page_size:
|
|
612
|
+
:param retries: Override the default retry configuration for this method
|
|
613
|
+
:param server_url: Override the default server URL for this method
|
|
614
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
615
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
616
|
+
"""
|
|
617
|
+
base_url = None
|
|
618
|
+
url_variables = None
|
|
619
|
+
if timeout_ms is None:
|
|
620
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
621
|
+
|
|
622
|
+
if server_url is not None:
|
|
623
|
+
base_url = server_url
|
|
624
|
+
else:
|
|
625
|
+
base_url = self._get_url(base_url, url_variables)
|
|
626
|
+
|
|
627
|
+
request = models.AgentsAPIV1ConversationsListRequest(
|
|
628
|
+
page=page,
|
|
629
|
+
page_size=page_size,
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
req = self._build_request_async(
|
|
633
|
+
method="GET",
|
|
634
|
+
path="/v1/conversations",
|
|
635
|
+
base_url=base_url,
|
|
636
|
+
url_variables=url_variables,
|
|
637
|
+
request=request,
|
|
638
|
+
request_body_required=False,
|
|
639
|
+
request_has_path_params=False,
|
|
640
|
+
request_has_query_params=True,
|
|
641
|
+
user_agent_header="user-agent",
|
|
642
|
+
accept_header_value="application/json",
|
|
643
|
+
http_headers=http_headers,
|
|
644
|
+
security=self.sdk_configuration.security,
|
|
645
|
+
timeout_ms=timeout_ms,
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
if retries == UNSET:
|
|
649
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
650
|
+
retries = self.sdk_configuration.retry_config
|
|
651
|
+
|
|
652
|
+
retry_config = None
|
|
653
|
+
if isinstance(retries, utils.RetryConfig):
|
|
654
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
655
|
+
|
|
656
|
+
http_res = await self.do_request_async(
|
|
657
|
+
hook_ctx=HookContext(
|
|
658
|
+
base_url=base_url or "",
|
|
659
|
+
operation_id="agents_api_v1_conversations_list",
|
|
660
|
+
oauth2_scopes=[],
|
|
661
|
+
security_source=get_security_from_env(
|
|
662
|
+
self.sdk_configuration.security, models.Security
|
|
663
|
+
),
|
|
664
|
+
),
|
|
665
|
+
request=req,
|
|
666
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
667
|
+
retry_config=retry_config,
|
|
668
|
+
)
|
|
669
|
+
|
|
670
|
+
response_data: Any = None
|
|
671
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
672
|
+
return utils.unmarshal_json(http_res.text, List[models.ResponseBody])
|
|
673
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
674
|
+
response_data = utils.unmarshal_json(
|
|
675
|
+
http_res.text, models.HTTPValidationErrorData
|
|
676
|
+
)
|
|
677
|
+
raise models.HTTPValidationError(data=response_data)
|
|
678
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
679
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
680
|
+
raise models.SDKError(
|
|
681
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
682
|
+
)
|
|
683
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
684
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
685
|
+
raise models.SDKError(
|
|
686
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
687
|
+
)
|
|
688
|
+
|
|
689
|
+
content_type = http_res.headers.get("Content-Type")
|
|
690
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
691
|
+
raise models.SDKError(
|
|
692
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
693
|
+
http_res.status_code,
|
|
694
|
+
http_res_text,
|
|
695
|
+
http_res,
|
|
696
|
+
)
|
|
697
|
+
|
|
698
|
+
def get(
|
|
699
|
+
self,
|
|
700
|
+
*,
|
|
701
|
+
conversation_id: str,
|
|
702
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
703
|
+
server_url: Optional[str] = None,
|
|
704
|
+
timeout_ms: Optional[int] = None,
|
|
705
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
706
|
+
) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet:
|
|
707
|
+
r"""Retrieve a conversation information.
|
|
708
|
+
|
|
709
|
+
Given a conversation_id retrieve a conversation entity with its attributes.
|
|
710
|
+
|
|
711
|
+
:param conversation_id:
|
|
712
|
+
:param retries: Override the default retry configuration for this method
|
|
713
|
+
:param server_url: Override the default server URL for this method
|
|
714
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
715
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
716
|
+
"""
|
|
717
|
+
base_url = None
|
|
718
|
+
url_variables = None
|
|
719
|
+
if timeout_ms is None:
|
|
720
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
721
|
+
|
|
722
|
+
if server_url is not None:
|
|
723
|
+
base_url = server_url
|
|
724
|
+
else:
|
|
725
|
+
base_url = self._get_url(base_url, url_variables)
|
|
726
|
+
|
|
727
|
+
request = models.AgentsAPIV1ConversationsGetRequest(
|
|
728
|
+
conversation_id=conversation_id,
|
|
729
|
+
)
|
|
730
|
+
|
|
731
|
+
req = self._build_request(
|
|
732
|
+
method="GET",
|
|
733
|
+
path="/v1/conversations/{conversation_id}",
|
|
734
|
+
base_url=base_url,
|
|
735
|
+
url_variables=url_variables,
|
|
736
|
+
request=request,
|
|
737
|
+
request_body_required=False,
|
|
738
|
+
request_has_path_params=True,
|
|
739
|
+
request_has_query_params=True,
|
|
740
|
+
user_agent_header="user-agent",
|
|
741
|
+
accept_header_value="application/json",
|
|
742
|
+
http_headers=http_headers,
|
|
743
|
+
security=self.sdk_configuration.security,
|
|
744
|
+
timeout_ms=timeout_ms,
|
|
745
|
+
)
|
|
746
|
+
|
|
747
|
+
if retries == UNSET:
|
|
748
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
749
|
+
retries = self.sdk_configuration.retry_config
|
|
750
|
+
|
|
751
|
+
retry_config = None
|
|
752
|
+
if isinstance(retries, utils.RetryConfig):
|
|
753
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
754
|
+
|
|
755
|
+
http_res = self.do_request(
|
|
756
|
+
hook_ctx=HookContext(
|
|
757
|
+
base_url=base_url or "",
|
|
758
|
+
operation_id="agents_api_v1_conversations_get",
|
|
759
|
+
oauth2_scopes=[],
|
|
760
|
+
security_source=get_security_from_env(
|
|
761
|
+
self.sdk_configuration.security, models.Security
|
|
762
|
+
),
|
|
763
|
+
),
|
|
764
|
+
request=req,
|
|
765
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
766
|
+
retry_config=retry_config,
|
|
767
|
+
)
|
|
768
|
+
|
|
769
|
+
response_data: Any = None
|
|
770
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
771
|
+
return utils.unmarshal_json(
|
|
772
|
+
http_res.text,
|
|
773
|
+
models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet,
|
|
774
|
+
)
|
|
775
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
776
|
+
response_data = utils.unmarshal_json(
|
|
777
|
+
http_res.text, models.HTTPValidationErrorData
|
|
778
|
+
)
|
|
779
|
+
raise models.HTTPValidationError(data=response_data)
|
|
780
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
781
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
782
|
+
raise models.SDKError(
|
|
783
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
784
|
+
)
|
|
785
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
786
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
787
|
+
raise models.SDKError(
|
|
788
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
789
|
+
)
|
|
790
|
+
|
|
791
|
+
content_type = http_res.headers.get("Content-Type")
|
|
792
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
793
|
+
raise models.SDKError(
|
|
794
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
795
|
+
http_res.status_code,
|
|
796
|
+
http_res_text,
|
|
797
|
+
http_res,
|
|
798
|
+
)
|
|
799
|
+
|
|
800
|
+
async def get_async(
|
|
801
|
+
self,
|
|
802
|
+
*,
|
|
803
|
+
conversation_id: str,
|
|
804
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
805
|
+
server_url: Optional[str] = None,
|
|
806
|
+
timeout_ms: Optional[int] = None,
|
|
807
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
808
|
+
) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet:
|
|
809
|
+
r"""Retrieve a conversation information.
|
|
810
|
+
|
|
811
|
+
Given a conversation_id retrieve a conversation entity with its attributes.
|
|
812
|
+
|
|
813
|
+
:param conversation_id:
|
|
814
|
+
:param retries: Override the default retry configuration for this method
|
|
815
|
+
:param server_url: Override the default server URL for this method
|
|
816
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
817
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
818
|
+
"""
|
|
819
|
+
base_url = None
|
|
820
|
+
url_variables = None
|
|
821
|
+
if timeout_ms is None:
|
|
822
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
823
|
+
|
|
824
|
+
if server_url is not None:
|
|
825
|
+
base_url = server_url
|
|
826
|
+
else:
|
|
827
|
+
base_url = self._get_url(base_url, url_variables)
|
|
828
|
+
|
|
829
|
+
request = models.AgentsAPIV1ConversationsGetRequest(
|
|
830
|
+
conversation_id=conversation_id,
|
|
831
|
+
)
|
|
832
|
+
|
|
833
|
+
req = self._build_request_async(
|
|
834
|
+
method="GET",
|
|
835
|
+
path="/v1/conversations/{conversation_id}",
|
|
836
|
+
base_url=base_url,
|
|
837
|
+
url_variables=url_variables,
|
|
838
|
+
request=request,
|
|
839
|
+
request_body_required=False,
|
|
840
|
+
request_has_path_params=True,
|
|
841
|
+
request_has_query_params=True,
|
|
842
|
+
user_agent_header="user-agent",
|
|
843
|
+
accept_header_value="application/json",
|
|
844
|
+
http_headers=http_headers,
|
|
845
|
+
security=self.sdk_configuration.security,
|
|
846
|
+
timeout_ms=timeout_ms,
|
|
847
|
+
)
|
|
848
|
+
|
|
849
|
+
if retries == UNSET:
|
|
850
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
851
|
+
retries = self.sdk_configuration.retry_config
|
|
852
|
+
|
|
853
|
+
retry_config = None
|
|
854
|
+
if isinstance(retries, utils.RetryConfig):
|
|
855
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
856
|
+
|
|
857
|
+
http_res = await self.do_request_async(
|
|
858
|
+
hook_ctx=HookContext(
|
|
859
|
+
base_url=base_url or "",
|
|
860
|
+
operation_id="agents_api_v1_conversations_get",
|
|
861
|
+
oauth2_scopes=[],
|
|
862
|
+
security_source=get_security_from_env(
|
|
863
|
+
self.sdk_configuration.security, models.Security
|
|
864
|
+
),
|
|
865
|
+
),
|
|
866
|
+
request=req,
|
|
867
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
868
|
+
retry_config=retry_config,
|
|
869
|
+
)
|
|
870
|
+
|
|
871
|
+
response_data: Any = None
|
|
872
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
873
|
+
return utils.unmarshal_json(
|
|
874
|
+
http_res.text,
|
|
875
|
+
models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet,
|
|
876
|
+
)
|
|
877
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
878
|
+
response_data = utils.unmarshal_json(
|
|
879
|
+
http_res.text, models.HTTPValidationErrorData
|
|
880
|
+
)
|
|
881
|
+
raise models.HTTPValidationError(data=response_data)
|
|
882
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
883
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
884
|
+
raise models.SDKError(
|
|
885
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
886
|
+
)
|
|
887
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
888
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
889
|
+
raise models.SDKError(
|
|
890
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
891
|
+
)
|
|
892
|
+
|
|
893
|
+
content_type = http_res.headers.get("Content-Type")
|
|
894
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
895
|
+
raise models.SDKError(
|
|
896
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
897
|
+
http_res.status_code,
|
|
898
|
+
http_res_text,
|
|
899
|
+
http_res,
|
|
900
|
+
)
|
|
901
|
+
|
|
902
|
+
def append(
|
|
903
|
+
self,
|
|
904
|
+
*,
|
|
905
|
+
conversation_id: str,
|
|
906
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
907
|
+
stream: Optional[bool] = False,
|
|
908
|
+
store: Optional[bool] = True,
|
|
909
|
+
handoff_execution: Optional[
|
|
910
|
+
models.ConversationAppendRequestHandoffExecution
|
|
911
|
+
] = "server",
|
|
912
|
+
completion_args: Optional[
|
|
913
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
914
|
+
] = None,
|
|
915
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
916
|
+
server_url: Optional[str] = None,
|
|
917
|
+
timeout_ms: Optional[int] = None,
|
|
918
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
919
|
+
) -> models.ConversationResponse:
|
|
920
|
+
r"""Append new entries to an existing conversation.
|
|
921
|
+
|
|
922
|
+
Run completion on the history of the conversation and the user entries. Return the new created entries.
|
|
923
|
+
|
|
924
|
+
:param conversation_id: ID of the conversation to which we append entries.
|
|
925
|
+
:param inputs:
|
|
926
|
+
:param stream:
|
|
927
|
+
:param store: Whether to store the results into our servers or not.
|
|
928
|
+
:param handoff_execution:
|
|
929
|
+
:param completion_args: White-listed arguments from the completion API
|
|
930
|
+
:param retries: Override the default retry configuration for this method
|
|
931
|
+
:param server_url: Override the default server URL for this method
|
|
932
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
933
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
934
|
+
"""
|
|
935
|
+
base_url = None
|
|
936
|
+
url_variables = None
|
|
937
|
+
if timeout_ms is None:
|
|
938
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
939
|
+
|
|
940
|
+
if server_url is not None:
|
|
941
|
+
base_url = server_url
|
|
942
|
+
else:
|
|
943
|
+
base_url = self._get_url(base_url, url_variables)
|
|
944
|
+
|
|
945
|
+
request = models.AgentsAPIV1ConversationsAppendRequest(
|
|
946
|
+
conversation_id=conversation_id,
|
|
947
|
+
conversation_append_request=models.ConversationAppendRequest(
|
|
948
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
949
|
+
stream=stream,
|
|
950
|
+
store=store,
|
|
951
|
+
handoff_execution=handoff_execution,
|
|
952
|
+
completion_args=utils.get_pydantic_model(
|
|
953
|
+
completion_args, Optional[models.CompletionArgs]
|
|
954
|
+
),
|
|
955
|
+
),
|
|
956
|
+
)
|
|
957
|
+
|
|
958
|
+
req = self._build_request(
|
|
959
|
+
method="POST",
|
|
960
|
+
path="/v1/conversations/{conversation_id}",
|
|
961
|
+
base_url=base_url,
|
|
962
|
+
url_variables=url_variables,
|
|
963
|
+
request=request,
|
|
964
|
+
request_body_required=True,
|
|
965
|
+
request_has_path_params=True,
|
|
966
|
+
request_has_query_params=True,
|
|
967
|
+
user_agent_header="user-agent",
|
|
968
|
+
accept_header_value="application/json",
|
|
969
|
+
http_headers=http_headers,
|
|
970
|
+
security=self.sdk_configuration.security,
|
|
971
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
972
|
+
request.conversation_append_request,
|
|
973
|
+
False,
|
|
974
|
+
False,
|
|
975
|
+
"json",
|
|
976
|
+
models.ConversationAppendRequest,
|
|
977
|
+
),
|
|
978
|
+
timeout_ms=timeout_ms,
|
|
979
|
+
)
|
|
980
|
+
|
|
981
|
+
if retries == UNSET:
|
|
982
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
983
|
+
retries = self.sdk_configuration.retry_config
|
|
984
|
+
|
|
985
|
+
retry_config = None
|
|
986
|
+
if isinstance(retries, utils.RetryConfig):
|
|
987
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
988
|
+
|
|
989
|
+
http_res = self.do_request(
|
|
990
|
+
hook_ctx=HookContext(
|
|
991
|
+
base_url=base_url or "",
|
|
992
|
+
operation_id="agents_api_v1_conversations_append",
|
|
993
|
+
oauth2_scopes=[],
|
|
994
|
+
security_source=get_security_from_env(
|
|
995
|
+
self.sdk_configuration.security, models.Security
|
|
996
|
+
),
|
|
997
|
+
),
|
|
998
|
+
request=req,
|
|
999
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1000
|
+
retry_config=retry_config,
|
|
1001
|
+
)
|
|
1002
|
+
|
|
1003
|
+
response_data: Any = None
|
|
1004
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
1005
|
+
return utils.unmarshal_json(http_res.text, models.ConversationResponse)
|
|
1006
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1007
|
+
response_data = utils.unmarshal_json(
|
|
1008
|
+
http_res.text, models.HTTPValidationErrorData
|
|
1009
|
+
)
|
|
1010
|
+
raise models.HTTPValidationError(data=response_data)
|
|
1011
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1012
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1013
|
+
raise models.SDKError(
|
|
1014
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1015
|
+
)
|
|
1016
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1017
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1018
|
+
raise models.SDKError(
|
|
1019
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1020
|
+
)
|
|
1021
|
+
|
|
1022
|
+
content_type = http_res.headers.get("Content-Type")
|
|
1023
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1024
|
+
raise models.SDKError(
|
|
1025
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1026
|
+
http_res.status_code,
|
|
1027
|
+
http_res_text,
|
|
1028
|
+
http_res,
|
|
1029
|
+
)
|
|
1030
|
+
|
|
1031
|
+
async def append_async(
|
|
1032
|
+
self,
|
|
1033
|
+
*,
|
|
1034
|
+
conversation_id: str,
|
|
1035
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
1036
|
+
stream: Optional[bool] = False,
|
|
1037
|
+
store: Optional[bool] = True,
|
|
1038
|
+
handoff_execution: Optional[
|
|
1039
|
+
models.ConversationAppendRequestHandoffExecution
|
|
1040
|
+
] = "server",
|
|
1041
|
+
completion_args: Optional[
|
|
1042
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
1043
|
+
] = None,
|
|
1044
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
1045
|
+
server_url: Optional[str] = None,
|
|
1046
|
+
timeout_ms: Optional[int] = None,
|
|
1047
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
1048
|
+
) -> models.ConversationResponse:
|
|
1049
|
+
r"""Append new entries to an existing conversation.
|
|
1050
|
+
|
|
1051
|
+
Run completion on the history of the conversation and the user entries. Return the new created entries.
|
|
1052
|
+
|
|
1053
|
+
:param conversation_id: ID of the conversation to which we append entries.
|
|
1054
|
+
:param inputs:
|
|
1055
|
+
:param stream:
|
|
1056
|
+
:param store: Whether to store the results into our servers or not.
|
|
1057
|
+
:param handoff_execution:
|
|
1058
|
+
:param completion_args: White-listed arguments from the completion API
|
|
1059
|
+
:param retries: Override the default retry configuration for this method
|
|
1060
|
+
:param server_url: Override the default server URL for this method
|
|
1061
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
1062
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
1063
|
+
"""
|
|
1064
|
+
base_url = None
|
|
1065
|
+
url_variables = None
|
|
1066
|
+
if timeout_ms is None:
|
|
1067
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
1068
|
+
|
|
1069
|
+
if server_url is not None:
|
|
1070
|
+
base_url = server_url
|
|
1071
|
+
else:
|
|
1072
|
+
base_url = self._get_url(base_url, url_variables)
|
|
1073
|
+
|
|
1074
|
+
request = models.AgentsAPIV1ConversationsAppendRequest(
|
|
1075
|
+
conversation_id=conversation_id,
|
|
1076
|
+
conversation_append_request=models.ConversationAppendRequest(
|
|
1077
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
1078
|
+
stream=stream,
|
|
1079
|
+
store=store,
|
|
1080
|
+
handoff_execution=handoff_execution,
|
|
1081
|
+
completion_args=utils.get_pydantic_model(
|
|
1082
|
+
completion_args, Optional[models.CompletionArgs]
|
|
1083
|
+
),
|
|
1084
|
+
),
|
|
1085
|
+
)
|
|
1086
|
+
|
|
1087
|
+
req = self._build_request_async(
|
|
1088
|
+
method="POST",
|
|
1089
|
+
path="/v1/conversations/{conversation_id}",
|
|
1090
|
+
base_url=base_url,
|
|
1091
|
+
url_variables=url_variables,
|
|
1092
|
+
request=request,
|
|
1093
|
+
request_body_required=True,
|
|
1094
|
+
request_has_path_params=True,
|
|
1095
|
+
request_has_query_params=True,
|
|
1096
|
+
user_agent_header="user-agent",
|
|
1097
|
+
accept_header_value="application/json",
|
|
1098
|
+
http_headers=http_headers,
|
|
1099
|
+
security=self.sdk_configuration.security,
|
|
1100
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
1101
|
+
request.conversation_append_request,
|
|
1102
|
+
False,
|
|
1103
|
+
False,
|
|
1104
|
+
"json",
|
|
1105
|
+
models.ConversationAppendRequest,
|
|
1106
|
+
),
|
|
1107
|
+
timeout_ms=timeout_ms,
|
|
1108
|
+
)
|
|
1109
|
+
|
|
1110
|
+
if retries == UNSET:
|
|
1111
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
1112
|
+
retries = self.sdk_configuration.retry_config
|
|
1113
|
+
|
|
1114
|
+
retry_config = None
|
|
1115
|
+
if isinstance(retries, utils.RetryConfig):
|
|
1116
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
1117
|
+
|
|
1118
|
+
http_res = await self.do_request_async(
|
|
1119
|
+
hook_ctx=HookContext(
|
|
1120
|
+
base_url=base_url or "",
|
|
1121
|
+
operation_id="agents_api_v1_conversations_append",
|
|
1122
|
+
oauth2_scopes=[],
|
|
1123
|
+
security_source=get_security_from_env(
|
|
1124
|
+
self.sdk_configuration.security, models.Security
|
|
1125
|
+
),
|
|
1126
|
+
),
|
|
1127
|
+
request=req,
|
|
1128
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1129
|
+
retry_config=retry_config,
|
|
1130
|
+
)
|
|
1131
|
+
|
|
1132
|
+
response_data: Any = None
|
|
1133
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
1134
|
+
return utils.unmarshal_json(http_res.text, models.ConversationResponse)
|
|
1135
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1136
|
+
response_data = utils.unmarshal_json(
|
|
1137
|
+
http_res.text, models.HTTPValidationErrorData
|
|
1138
|
+
)
|
|
1139
|
+
raise models.HTTPValidationError(data=response_data)
|
|
1140
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1141
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1142
|
+
raise models.SDKError(
|
|
1143
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1144
|
+
)
|
|
1145
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1146
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1147
|
+
raise models.SDKError(
|
|
1148
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1149
|
+
)
|
|
1150
|
+
|
|
1151
|
+
content_type = http_res.headers.get("Content-Type")
|
|
1152
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1153
|
+
raise models.SDKError(
|
|
1154
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1155
|
+
http_res.status_code,
|
|
1156
|
+
http_res_text,
|
|
1157
|
+
http_res,
|
|
1158
|
+
)
|
|
1159
|
+
|
|
1160
|
+
def get_history(
|
|
1161
|
+
self,
|
|
1162
|
+
*,
|
|
1163
|
+
conversation_id: str,
|
|
1164
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
1165
|
+
server_url: Optional[str] = None,
|
|
1166
|
+
timeout_ms: Optional[int] = None,
|
|
1167
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
1168
|
+
) -> models.ConversationHistory:
|
|
1169
|
+
r"""Retrieve all entries in a conversation.
|
|
1170
|
+
|
|
1171
|
+
Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call.
|
|
1172
|
+
|
|
1173
|
+
:param conversation_id:
|
|
1174
|
+
:param retries: Override the default retry configuration for this method
|
|
1175
|
+
:param server_url: Override the default server URL for this method
|
|
1176
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
1177
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
1178
|
+
"""
|
|
1179
|
+
base_url = None
|
|
1180
|
+
url_variables = None
|
|
1181
|
+
if timeout_ms is None:
|
|
1182
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
1183
|
+
|
|
1184
|
+
if server_url is not None:
|
|
1185
|
+
base_url = server_url
|
|
1186
|
+
else:
|
|
1187
|
+
base_url = self._get_url(base_url, url_variables)
|
|
1188
|
+
|
|
1189
|
+
request = models.AgentsAPIV1ConversationsHistoryRequest(
|
|
1190
|
+
conversation_id=conversation_id,
|
|
1191
|
+
)
|
|
1192
|
+
|
|
1193
|
+
req = self._build_request(
|
|
1194
|
+
method="GET",
|
|
1195
|
+
path="/v1/conversations/{conversation_id}/history",
|
|
1196
|
+
base_url=base_url,
|
|
1197
|
+
url_variables=url_variables,
|
|
1198
|
+
request=request,
|
|
1199
|
+
request_body_required=False,
|
|
1200
|
+
request_has_path_params=True,
|
|
1201
|
+
request_has_query_params=True,
|
|
1202
|
+
user_agent_header="user-agent",
|
|
1203
|
+
accept_header_value="application/json",
|
|
1204
|
+
http_headers=http_headers,
|
|
1205
|
+
security=self.sdk_configuration.security,
|
|
1206
|
+
timeout_ms=timeout_ms,
|
|
1207
|
+
)
|
|
1208
|
+
|
|
1209
|
+
if retries == UNSET:
|
|
1210
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
1211
|
+
retries = self.sdk_configuration.retry_config
|
|
1212
|
+
|
|
1213
|
+
retry_config = None
|
|
1214
|
+
if isinstance(retries, utils.RetryConfig):
|
|
1215
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
1216
|
+
|
|
1217
|
+
http_res = self.do_request(
|
|
1218
|
+
hook_ctx=HookContext(
|
|
1219
|
+
base_url=base_url or "",
|
|
1220
|
+
operation_id="agents_api_v1_conversations_history",
|
|
1221
|
+
oauth2_scopes=[],
|
|
1222
|
+
security_source=get_security_from_env(
|
|
1223
|
+
self.sdk_configuration.security, models.Security
|
|
1224
|
+
),
|
|
1225
|
+
),
|
|
1226
|
+
request=req,
|
|
1227
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1228
|
+
retry_config=retry_config,
|
|
1229
|
+
)
|
|
1230
|
+
|
|
1231
|
+
response_data: Any = None
|
|
1232
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
1233
|
+
return utils.unmarshal_json(http_res.text, models.ConversationHistory)
|
|
1234
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1235
|
+
response_data = utils.unmarshal_json(
|
|
1236
|
+
http_res.text, models.HTTPValidationErrorData
|
|
1237
|
+
)
|
|
1238
|
+
raise models.HTTPValidationError(data=response_data)
|
|
1239
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1240
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1241
|
+
raise models.SDKError(
|
|
1242
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1243
|
+
)
|
|
1244
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1245
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1246
|
+
raise models.SDKError(
|
|
1247
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1248
|
+
)
|
|
1249
|
+
|
|
1250
|
+
content_type = http_res.headers.get("Content-Type")
|
|
1251
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1252
|
+
raise models.SDKError(
|
|
1253
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1254
|
+
http_res.status_code,
|
|
1255
|
+
http_res_text,
|
|
1256
|
+
http_res,
|
|
1257
|
+
)
|
|
1258
|
+
|
|
1259
|
+
async def get_history_async(
|
|
1260
|
+
self,
|
|
1261
|
+
*,
|
|
1262
|
+
conversation_id: str,
|
|
1263
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
1264
|
+
server_url: Optional[str] = None,
|
|
1265
|
+
timeout_ms: Optional[int] = None,
|
|
1266
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
1267
|
+
) -> models.ConversationHistory:
|
|
1268
|
+
r"""Retrieve all entries in a conversation.
|
|
1269
|
+
|
|
1270
|
+
Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call.
|
|
1271
|
+
|
|
1272
|
+
:param conversation_id:
|
|
1273
|
+
:param retries: Override the default retry configuration for this method
|
|
1274
|
+
:param server_url: Override the default server URL for this method
|
|
1275
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
1276
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
1277
|
+
"""
|
|
1278
|
+
base_url = None
|
|
1279
|
+
url_variables = None
|
|
1280
|
+
if timeout_ms is None:
|
|
1281
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
1282
|
+
|
|
1283
|
+
if server_url is not None:
|
|
1284
|
+
base_url = server_url
|
|
1285
|
+
else:
|
|
1286
|
+
base_url = self._get_url(base_url, url_variables)
|
|
1287
|
+
|
|
1288
|
+
request = models.AgentsAPIV1ConversationsHistoryRequest(
|
|
1289
|
+
conversation_id=conversation_id,
|
|
1290
|
+
)
|
|
1291
|
+
|
|
1292
|
+
req = self._build_request_async(
|
|
1293
|
+
method="GET",
|
|
1294
|
+
path="/v1/conversations/{conversation_id}/history",
|
|
1295
|
+
base_url=base_url,
|
|
1296
|
+
url_variables=url_variables,
|
|
1297
|
+
request=request,
|
|
1298
|
+
request_body_required=False,
|
|
1299
|
+
request_has_path_params=True,
|
|
1300
|
+
request_has_query_params=True,
|
|
1301
|
+
user_agent_header="user-agent",
|
|
1302
|
+
accept_header_value="application/json",
|
|
1303
|
+
http_headers=http_headers,
|
|
1304
|
+
security=self.sdk_configuration.security,
|
|
1305
|
+
timeout_ms=timeout_ms,
|
|
1306
|
+
)
|
|
1307
|
+
|
|
1308
|
+
if retries == UNSET:
|
|
1309
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
1310
|
+
retries = self.sdk_configuration.retry_config
|
|
1311
|
+
|
|
1312
|
+
retry_config = None
|
|
1313
|
+
if isinstance(retries, utils.RetryConfig):
|
|
1314
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
1315
|
+
|
|
1316
|
+
http_res = await self.do_request_async(
|
|
1317
|
+
hook_ctx=HookContext(
|
|
1318
|
+
base_url=base_url or "",
|
|
1319
|
+
operation_id="agents_api_v1_conversations_history",
|
|
1320
|
+
oauth2_scopes=[],
|
|
1321
|
+
security_source=get_security_from_env(
|
|
1322
|
+
self.sdk_configuration.security, models.Security
|
|
1323
|
+
),
|
|
1324
|
+
),
|
|
1325
|
+
request=req,
|
|
1326
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1327
|
+
retry_config=retry_config,
|
|
1328
|
+
)
|
|
1329
|
+
|
|
1330
|
+
response_data: Any = None
|
|
1331
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
1332
|
+
return utils.unmarshal_json(http_res.text, models.ConversationHistory)
|
|
1333
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1334
|
+
response_data = utils.unmarshal_json(
|
|
1335
|
+
http_res.text, models.HTTPValidationErrorData
|
|
1336
|
+
)
|
|
1337
|
+
raise models.HTTPValidationError(data=response_data)
|
|
1338
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1339
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1340
|
+
raise models.SDKError(
|
|
1341
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1342
|
+
)
|
|
1343
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1344
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1345
|
+
raise models.SDKError(
|
|
1346
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1347
|
+
)
|
|
1348
|
+
|
|
1349
|
+
content_type = http_res.headers.get("Content-Type")
|
|
1350
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1351
|
+
raise models.SDKError(
|
|
1352
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1353
|
+
http_res.status_code,
|
|
1354
|
+
http_res_text,
|
|
1355
|
+
http_res,
|
|
1356
|
+
)
|
|
1357
|
+
|
|
1358
|
+
def get_messages(
|
|
1359
|
+
self,
|
|
1360
|
+
*,
|
|
1361
|
+
conversation_id: str,
|
|
1362
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
1363
|
+
server_url: Optional[str] = None,
|
|
1364
|
+
timeout_ms: Optional[int] = None,
|
|
1365
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
1366
|
+
) -> models.ConversationMessages:
|
|
1367
|
+
r"""Retrieve all messages in a conversation.
|
|
1368
|
+
|
|
1369
|
+
Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only.
|
|
1370
|
+
|
|
1371
|
+
:param conversation_id:
|
|
1372
|
+
:param retries: Override the default retry configuration for this method
|
|
1373
|
+
:param server_url: Override the default server URL for this method
|
|
1374
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
1375
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
1376
|
+
"""
|
|
1377
|
+
base_url = None
|
|
1378
|
+
url_variables = None
|
|
1379
|
+
if timeout_ms is None:
|
|
1380
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
1381
|
+
|
|
1382
|
+
if server_url is not None:
|
|
1383
|
+
base_url = server_url
|
|
1384
|
+
else:
|
|
1385
|
+
base_url = self._get_url(base_url, url_variables)
|
|
1386
|
+
|
|
1387
|
+
request = models.AgentsAPIV1ConversationsMessagesRequest(
|
|
1388
|
+
conversation_id=conversation_id,
|
|
1389
|
+
)
|
|
1390
|
+
|
|
1391
|
+
req = self._build_request(
|
|
1392
|
+
method="GET",
|
|
1393
|
+
path="/v1/conversations/{conversation_id}/messages",
|
|
1394
|
+
base_url=base_url,
|
|
1395
|
+
url_variables=url_variables,
|
|
1396
|
+
request=request,
|
|
1397
|
+
request_body_required=False,
|
|
1398
|
+
request_has_path_params=True,
|
|
1399
|
+
request_has_query_params=True,
|
|
1400
|
+
user_agent_header="user-agent",
|
|
1401
|
+
accept_header_value="application/json",
|
|
1402
|
+
http_headers=http_headers,
|
|
1403
|
+
security=self.sdk_configuration.security,
|
|
1404
|
+
timeout_ms=timeout_ms,
|
|
1405
|
+
)
|
|
1406
|
+
|
|
1407
|
+
if retries == UNSET:
|
|
1408
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
1409
|
+
retries = self.sdk_configuration.retry_config
|
|
1410
|
+
|
|
1411
|
+
retry_config = None
|
|
1412
|
+
if isinstance(retries, utils.RetryConfig):
|
|
1413
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
1414
|
+
|
|
1415
|
+
http_res = self.do_request(
|
|
1416
|
+
hook_ctx=HookContext(
|
|
1417
|
+
base_url=base_url or "",
|
|
1418
|
+
operation_id="agents_api_v1_conversations_messages",
|
|
1419
|
+
oauth2_scopes=[],
|
|
1420
|
+
security_source=get_security_from_env(
|
|
1421
|
+
self.sdk_configuration.security, models.Security
|
|
1422
|
+
),
|
|
1423
|
+
),
|
|
1424
|
+
request=req,
|
|
1425
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1426
|
+
retry_config=retry_config,
|
|
1427
|
+
)
|
|
1428
|
+
|
|
1429
|
+
response_data: Any = None
|
|
1430
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
1431
|
+
return utils.unmarshal_json(http_res.text, models.ConversationMessages)
|
|
1432
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1433
|
+
response_data = utils.unmarshal_json(
|
|
1434
|
+
http_res.text, models.HTTPValidationErrorData
|
|
1435
|
+
)
|
|
1436
|
+
raise models.HTTPValidationError(data=response_data)
|
|
1437
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1438
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1439
|
+
raise models.SDKError(
|
|
1440
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1441
|
+
)
|
|
1442
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1443
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1444
|
+
raise models.SDKError(
|
|
1445
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1446
|
+
)
|
|
1447
|
+
|
|
1448
|
+
content_type = http_res.headers.get("Content-Type")
|
|
1449
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1450
|
+
raise models.SDKError(
|
|
1451
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1452
|
+
http_res.status_code,
|
|
1453
|
+
http_res_text,
|
|
1454
|
+
http_res,
|
|
1455
|
+
)
|
|
1456
|
+
|
|
1457
|
+
async def get_messages_async(
|
|
1458
|
+
self,
|
|
1459
|
+
*,
|
|
1460
|
+
conversation_id: str,
|
|
1461
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
1462
|
+
server_url: Optional[str] = None,
|
|
1463
|
+
timeout_ms: Optional[int] = None,
|
|
1464
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
1465
|
+
) -> models.ConversationMessages:
|
|
1466
|
+
r"""Retrieve all messages in a conversation.
|
|
1467
|
+
|
|
1468
|
+
Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only.
|
|
1469
|
+
|
|
1470
|
+
:param conversation_id:
|
|
1471
|
+
:param retries: Override the default retry configuration for this method
|
|
1472
|
+
:param server_url: Override the default server URL for this method
|
|
1473
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
1474
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
1475
|
+
"""
|
|
1476
|
+
base_url = None
|
|
1477
|
+
url_variables = None
|
|
1478
|
+
if timeout_ms is None:
|
|
1479
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
1480
|
+
|
|
1481
|
+
if server_url is not None:
|
|
1482
|
+
base_url = server_url
|
|
1483
|
+
else:
|
|
1484
|
+
base_url = self._get_url(base_url, url_variables)
|
|
1485
|
+
|
|
1486
|
+
request = models.AgentsAPIV1ConversationsMessagesRequest(
|
|
1487
|
+
conversation_id=conversation_id,
|
|
1488
|
+
)
|
|
1489
|
+
|
|
1490
|
+
req = self._build_request_async(
|
|
1491
|
+
method="GET",
|
|
1492
|
+
path="/v1/conversations/{conversation_id}/messages",
|
|
1493
|
+
base_url=base_url,
|
|
1494
|
+
url_variables=url_variables,
|
|
1495
|
+
request=request,
|
|
1496
|
+
request_body_required=False,
|
|
1497
|
+
request_has_path_params=True,
|
|
1498
|
+
request_has_query_params=True,
|
|
1499
|
+
user_agent_header="user-agent",
|
|
1500
|
+
accept_header_value="application/json",
|
|
1501
|
+
http_headers=http_headers,
|
|
1502
|
+
security=self.sdk_configuration.security,
|
|
1503
|
+
timeout_ms=timeout_ms,
|
|
1504
|
+
)
|
|
1505
|
+
|
|
1506
|
+
if retries == UNSET:
|
|
1507
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
1508
|
+
retries = self.sdk_configuration.retry_config
|
|
1509
|
+
|
|
1510
|
+
retry_config = None
|
|
1511
|
+
if isinstance(retries, utils.RetryConfig):
|
|
1512
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
1513
|
+
|
|
1514
|
+
http_res = await self.do_request_async(
|
|
1515
|
+
hook_ctx=HookContext(
|
|
1516
|
+
base_url=base_url or "",
|
|
1517
|
+
operation_id="agents_api_v1_conversations_messages",
|
|
1518
|
+
oauth2_scopes=[],
|
|
1519
|
+
security_source=get_security_from_env(
|
|
1520
|
+
self.sdk_configuration.security, models.Security
|
|
1521
|
+
),
|
|
1522
|
+
),
|
|
1523
|
+
request=req,
|
|
1524
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1525
|
+
retry_config=retry_config,
|
|
1526
|
+
)
|
|
1527
|
+
|
|
1528
|
+
response_data: Any = None
|
|
1529
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
1530
|
+
return utils.unmarshal_json(http_res.text, models.ConversationMessages)
|
|
1531
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1532
|
+
response_data = utils.unmarshal_json(
|
|
1533
|
+
http_res.text, models.HTTPValidationErrorData
|
|
1534
|
+
)
|
|
1535
|
+
raise models.HTTPValidationError(data=response_data)
|
|
1536
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1537
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1538
|
+
raise models.SDKError(
|
|
1539
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1540
|
+
)
|
|
1541
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1542
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1543
|
+
raise models.SDKError(
|
|
1544
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1545
|
+
)
|
|
1546
|
+
|
|
1547
|
+
content_type = http_res.headers.get("Content-Type")
|
|
1548
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1549
|
+
raise models.SDKError(
|
|
1550
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1551
|
+
http_res.status_code,
|
|
1552
|
+
http_res_text,
|
|
1553
|
+
http_res,
|
|
1554
|
+
)
|
|
1555
|
+
|
|
1556
|
+
def restart(
|
|
1557
|
+
self,
|
|
1558
|
+
*,
|
|
1559
|
+
conversation_id: str,
|
|
1560
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
1561
|
+
from_entry_id: str,
|
|
1562
|
+
stream: Optional[bool] = False,
|
|
1563
|
+
store: Optional[bool] = True,
|
|
1564
|
+
handoff_execution: Optional[
|
|
1565
|
+
models.ConversationRestartRequestHandoffExecution
|
|
1566
|
+
] = "server",
|
|
1567
|
+
completion_args: Optional[
|
|
1568
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
1569
|
+
] = None,
|
|
1570
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
1571
|
+
server_url: Optional[str] = None,
|
|
1572
|
+
timeout_ms: Optional[int] = None,
|
|
1573
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
1574
|
+
) -> models.ConversationResponse:
|
|
1575
|
+
r"""Restart a conversation starting from a given entry.
|
|
1576
|
+
|
|
1577
|
+
Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned.
|
|
1578
|
+
|
|
1579
|
+
:param conversation_id:
|
|
1580
|
+
:param inputs:
|
|
1581
|
+
:param from_entry_id:
|
|
1582
|
+
:param stream:
|
|
1583
|
+
:param store: Whether to store the results into our servers or not.
|
|
1584
|
+
:param handoff_execution:
|
|
1585
|
+
:param completion_args: White-listed arguments from the completion API
|
|
1586
|
+
:param retries: Override the default retry configuration for this method
|
|
1587
|
+
:param server_url: Override the default server URL for this method
|
|
1588
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
1589
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
1590
|
+
"""
|
|
1591
|
+
base_url = None
|
|
1592
|
+
url_variables = None
|
|
1593
|
+
if timeout_ms is None:
|
|
1594
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
1595
|
+
|
|
1596
|
+
if server_url is not None:
|
|
1597
|
+
base_url = server_url
|
|
1598
|
+
else:
|
|
1599
|
+
base_url = self._get_url(base_url, url_variables)
|
|
1600
|
+
|
|
1601
|
+
request = models.AgentsAPIV1ConversationsRestartRequest(
|
|
1602
|
+
conversation_id=conversation_id,
|
|
1603
|
+
conversation_restart_request=models.ConversationRestartRequest(
|
|
1604
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
1605
|
+
stream=stream,
|
|
1606
|
+
store=store,
|
|
1607
|
+
handoff_execution=handoff_execution,
|
|
1608
|
+
from_entry_id=from_entry_id,
|
|
1609
|
+
completion_args=utils.get_pydantic_model(
|
|
1610
|
+
completion_args, Optional[models.CompletionArgs]
|
|
1611
|
+
),
|
|
1612
|
+
),
|
|
1613
|
+
)
|
|
1614
|
+
|
|
1615
|
+
req = self._build_request(
|
|
1616
|
+
method="POST",
|
|
1617
|
+
path="/v1/conversations/{conversation_id}/restart",
|
|
1618
|
+
base_url=base_url,
|
|
1619
|
+
url_variables=url_variables,
|
|
1620
|
+
request=request,
|
|
1621
|
+
request_body_required=True,
|
|
1622
|
+
request_has_path_params=True,
|
|
1623
|
+
request_has_query_params=True,
|
|
1624
|
+
user_agent_header="user-agent",
|
|
1625
|
+
accept_header_value="application/json",
|
|
1626
|
+
http_headers=http_headers,
|
|
1627
|
+
security=self.sdk_configuration.security,
|
|
1628
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
1629
|
+
request.conversation_restart_request,
|
|
1630
|
+
False,
|
|
1631
|
+
False,
|
|
1632
|
+
"json",
|
|
1633
|
+
models.ConversationRestartRequest,
|
|
1634
|
+
),
|
|
1635
|
+
timeout_ms=timeout_ms,
|
|
1636
|
+
)
|
|
1637
|
+
|
|
1638
|
+
if retries == UNSET:
|
|
1639
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
1640
|
+
retries = self.sdk_configuration.retry_config
|
|
1641
|
+
|
|
1642
|
+
retry_config = None
|
|
1643
|
+
if isinstance(retries, utils.RetryConfig):
|
|
1644
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
1645
|
+
|
|
1646
|
+
http_res = self.do_request(
|
|
1647
|
+
hook_ctx=HookContext(
|
|
1648
|
+
base_url=base_url or "",
|
|
1649
|
+
operation_id="agents_api_v1_conversations_restart",
|
|
1650
|
+
oauth2_scopes=[],
|
|
1651
|
+
security_source=get_security_from_env(
|
|
1652
|
+
self.sdk_configuration.security, models.Security
|
|
1653
|
+
),
|
|
1654
|
+
),
|
|
1655
|
+
request=req,
|
|
1656
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1657
|
+
retry_config=retry_config,
|
|
1658
|
+
)
|
|
1659
|
+
|
|
1660
|
+
response_data: Any = None
|
|
1661
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
1662
|
+
return utils.unmarshal_json(http_res.text, models.ConversationResponse)
|
|
1663
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1664
|
+
response_data = utils.unmarshal_json(
|
|
1665
|
+
http_res.text, models.HTTPValidationErrorData
|
|
1666
|
+
)
|
|
1667
|
+
raise models.HTTPValidationError(data=response_data)
|
|
1668
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1669
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1670
|
+
raise models.SDKError(
|
|
1671
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1672
|
+
)
|
|
1673
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1674
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1675
|
+
raise models.SDKError(
|
|
1676
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1677
|
+
)
|
|
1678
|
+
|
|
1679
|
+
content_type = http_res.headers.get("Content-Type")
|
|
1680
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1681
|
+
raise models.SDKError(
|
|
1682
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1683
|
+
http_res.status_code,
|
|
1684
|
+
http_res_text,
|
|
1685
|
+
http_res,
|
|
1686
|
+
)
|
|
1687
|
+
|
|
1688
|
+
async def restart_async(
|
|
1689
|
+
self,
|
|
1690
|
+
*,
|
|
1691
|
+
conversation_id: str,
|
|
1692
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
1693
|
+
from_entry_id: str,
|
|
1694
|
+
stream: Optional[bool] = False,
|
|
1695
|
+
store: Optional[bool] = True,
|
|
1696
|
+
handoff_execution: Optional[
|
|
1697
|
+
models.ConversationRestartRequestHandoffExecution
|
|
1698
|
+
] = "server",
|
|
1699
|
+
completion_args: Optional[
|
|
1700
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
1701
|
+
] = None,
|
|
1702
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
1703
|
+
server_url: Optional[str] = None,
|
|
1704
|
+
timeout_ms: Optional[int] = None,
|
|
1705
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
1706
|
+
) -> models.ConversationResponse:
|
|
1707
|
+
r"""Restart a conversation starting from a given entry.
|
|
1708
|
+
|
|
1709
|
+
Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned.
|
|
1710
|
+
|
|
1711
|
+
:param conversation_id:
|
|
1712
|
+
:param inputs:
|
|
1713
|
+
:param from_entry_id:
|
|
1714
|
+
:param stream:
|
|
1715
|
+
:param store: Whether to store the results into our servers or not.
|
|
1716
|
+
:param handoff_execution:
|
|
1717
|
+
:param completion_args: White-listed arguments from the completion API
|
|
1718
|
+
:param retries: Override the default retry configuration for this method
|
|
1719
|
+
:param server_url: Override the default server URL for this method
|
|
1720
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
1721
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
1722
|
+
"""
|
|
1723
|
+
base_url = None
|
|
1724
|
+
url_variables = None
|
|
1725
|
+
if timeout_ms is None:
|
|
1726
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
1727
|
+
|
|
1728
|
+
if server_url is not None:
|
|
1729
|
+
base_url = server_url
|
|
1730
|
+
else:
|
|
1731
|
+
base_url = self._get_url(base_url, url_variables)
|
|
1732
|
+
|
|
1733
|
+
request = models.AgentsAPIV1ConversationsRestartRequest(
|
|
1734
|
+
conversation_id=conversation_id,
|
|
1735
|
+
conversation_restart_request=models.ConversationRestartRequest(
|
|
1736
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
1737
|
+
stream=stream,
|
|
1738
|
+
store=store,
|
|
1739
|
+
handoff_execution=handoff_execution,
|
|
1740
|
+
from_entry_id=from_entry_id,
|
|
1741
|
+
completion_args=utils.get_pydantic_model(
|
|
1742
|
+
completion_args, Optional[models.CompletionArgs]
|
|
1743
|
+
),
|
|
1744
|
+
),
|
|
1745
|
+
)
|
|
1746
|
+
|
|
1747
|
+
req = self._build_request_async(
|
|
1748
|
+
method="POST",
|
|
1749
|
+
path="/v1/conversations/{conversation_id}/restart",
|
|
1750
|
+
base_url=base_url,
|
|
1751
|
+
url_variables=url_variables,
|
|
1752
|
+
request=request,
|
|
1753
|
+
request_body_required=True,
|
|
1754
|
+
request_has_path_params=True,
|
|
1755
|
+
request_has_query_params=True,
|
|
1756
|
+
user_agent_header="user-agent",
|
|
1757
|
+
accept_header_value="application/json",
|
|
1758
|
+
http_headers=http_headers,
|
|
1759
|
+
security=self.sdk_configuration.security,
|
|
1760
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
1761
|
+
request.conversation_restart_request,
|
|
1762
|
+
False,
|
|
1763
|
+
False,
|
|
1764
|
+
"json",
|
|
1765
|
+
models.ConversationRestartRequest,
|
|
1766
|
+
),
|
|
1767
|
+
timeout_ms=timeout_ms,
|
|
1768
|
+
)
|
|
1769
|
+
|
|
1770
|
+
if retries == UNSET:
|
|
1771
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
1772
|
+
retries = self.sdk_configuration.retry_config
|
|
1773
|
+
|
|
1774
|
+
retry_config = None
|
|
1775
|
+
if isinstance(retries, utils.RetryConfig):
|
|
1776
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
1777
|
+
|
|
1778
|
+
http_res = await self.do_request_async(
|
|
1779
|
+
hook_ctx=HookContext(
|
|
1780
|
+
base_url=base_url or "",
|
|
1781
|
+
operation_id="agents_api_v1_conversations_restart",
|
|
1782
|
+
oauth2_scopes=[],
|
|
1783
|
+
security_source=get_security_from_env(
|
|
1784
|
+
self.sdk_configuration.security, models.Security
|
|
1785
|
+
),
|
|
1786
|
+
),
|
|
1787
|
+
request=req,
|
|
1788
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1789
|
+
retry_config=retry_config,
|
|
1790
|
+
)
|
|
1791
|
+
|
|
1792
|
+
response_data: Any = None
|
|
1793
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
1794
|
+
return utils.unmarshal_json(http_res.text, models.ConversationResponse)
|
|
1795
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1796
|
+
response_data = utils.unmarshal_json(
|
|
1797
|
+
http_res.text, models.HTTPValidationErrorData
|
|
1798
|
+
)
|
|
1799
|
+
raise models.HTTPValidationError(data=response_data)
|
|
1800
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1801
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1802
|
+
raise models.SDKError(
|
|
1803
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1804
|
+
)
|
|
1805
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1806
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1807
|
+
raise models.SDKError(
|
|
1808
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1809
|
+
)
|
|
1810
|
+
|
|
1811
|
+
content_type = http_res.headers.get("Content-Type")
|
|
1812
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1813
|
+
raise models.SDKError(
|
|
1814
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1815
|
+
http_res.status_code,
|
|
1816
|
+
http_res_text,
|
|
1817
|
+
http_res,
|
|
1818
|
+
)
|
|
1819
|
+
|
|
1820
|
+
def start_stream(
|
|
1821
|
+
self,
|
|
1822
|
+
*,
|
|
1823
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
1824
|
+
stream: Optional[bool] = True,
|
|
1825
|
+
store: OptionalNullable[bool] = UNSET,
|
|
1826
|
+
handoff_execution: OptionalNullable[
|
|
1827
|
+
models.ConversationStreamRequestHandoffExecution
|
|
1828
|
+
] = UNSET,
|
|
1829
|
+
instructions: OptionalNullable[str] = UNSET,
|
|
1830
|
+
tools: OptionalNullable[
|
|
1831
|
+
Union[
|
|
1832
|
+
List[models.ConversationStreamRequestTools],
|
|
1833
|
+
List[models.ConversationStreamRequestToolsTypedDict],
|
|
1834
|
+
]
|
|
1835
|
+
] = UNSET,
|
|
1836
|
+
completion_args: OptionalNullable[
|
|
1837
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
1838
|
+
] = UNSET,
|
|
1839
|
+
name: OptionalNullable[str] = UNSET,
|
|
1840
|
+
description: OptionalNullable[str] = UNSET,
|
|
1841
|
+
agent_id: OptionalNullable[str] = UNSET,
|
|
1842
|
+
model: OptionalNullable[str] = UNSET,
|
|
1843
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
1844
|
+
server_url: Optional[str] = None,
|
|
1845
|
+
timeout_ms: Optional[int] = None,
|
|
1846
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
1847
|
+
) -> eventstreaming.EventStream[models.ConversationEvents]:
|
|
1848
|
+
r"""Create a conversation and append entries to it.
|
|
1849
|
+
|
|
1850
|
+
Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation.
|
|
1851
|
+
|
|
1852
|
+
:param inputs:
|
|
1853
|
+
:param stream:
|
|
1854
|
+
:param store:
|
|
1855
|
+
:param handoff_execution:
|
|
1856
|
+
:param instructions:
|
|
1857
|
+
:param tools:
|
|
1858
|
+
:param completion_args:
|
|
1859
|
+
:param name:
|
|
1860
|
+
:param description:
|
|
1861
|
+
:param agent_id:
|
|
1862
|
+
:param model:
|
|
1863
|
+
:param retries: Override the default retry configuration for this method
|
|
1864
|
+
:param server_url: Override the default server URL for this method
|
|
1865
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
1866
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
1867
|
+
"""
|
|
1868
|
+
base_url = None
|
|
1869
|
+
url_variables = None
|
|
1870
|
+
if timeout_ms is None:
|
|
1871
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
1872
|
+
|
|
1873
|
+
if server_url is not None:
|
|
1874
|
+
base_url = server_url
|
|
1875
|
+
else:
|
|
1876
|
+
base_url = self._get_url(base_url, url_variables)
|
|
1877
|
+
|
|
1878
|
+
request = models.ConversationStreamRequest(
|
|
1879
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
1880
|
+
stream=stream,
|
|
1881
|
+
store=store,
|
|
1882
|
+
handoff_execution=handoff_execution,
|
|
1883
|
+
instructions=instructions,
|
|
1884
|
+
tools=utils.get_pydantic_model(
|
|
1885
|
+
tools, OptionalNullable[List[models.ConversationStreamRequestTools]]
|
|
1886
|
+
),
|
|
1887
|
+
completion_args=utils.get_pydantic_model(
|
|
1888
|
+
completion_args, OptionalNullable[models.CompletionArgs]
|
|
1889
|
+
),
|
|
1890
|
+
name=name,
|
|
1891
|
+
description=description,
|
|
1892
|
+
agent_id=agent_id,
|
|
1893
|
+
model=model,
|
|
1894
|
+
)
|
|
1895
|
+
|
|
1896
|
+
req = self._build_request(
|
|
1897
|
+
method="POST",
|
|
1898
|
+
path="/v1/conversations#stream",
|
|
1899
|
+
base_url=base_url,
|
|
1900
|
+
url_variables=url_variables,
|
|
1901
|
+
request=request,
|
|
1902
|
+
request_body_required=True,
|
|
1903
|
+
request_has_path_params=False,
|
|
1904
|
+
request_has_query_params=True,
|
|
1905
|
+
user_agent_header="user-agent",
|
|
1906
|
+
accept_header_value="text/event-stream",
|
|
1907
|
+
http_headers=http_headers,
|
|
1908
|
+
security=self.sdk_configuration.security,
|
|
1909
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
1910
|
+
request, False, False, "json", models.ConversationStreamRequest
|
|
1911
|
+
),
|
|
1912
|
+
timeout_ms=timeout_ms,
|
|
1913
|
+
)
|
|
1914
|
+
|
|
1915
|
+
if retries == UNSET:
|
|
1916
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
1917
|
+
retries = self.sdk_configuration.retry_config
|
|
1918
|
+
|
|
1919
|
+
retry_config = None
|
|
1920
|
+
if isinstance(retries, utils.RetryConfig):
|
|
1921
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
1922
|
+
|
|
1923
|
+
http_res = self.do_request(
|
|
1924
|
+
hook_ctx=HookContext(
|
|
1925
|
+
base_url=base_url or "",
|
|
1926
|
+
operation_id="agents_api_v1_conversations_start_stream",
|
|
1927
|
+
oauth2_scopes=[],
|
|
1928
|
+
security_source=get_security_from_env(
|
|
1929
|
+
self.sdk_configuration.security, models.Security
|
|
1930
|
+
),
|
|
1931
|
+
),
|
|
1932
|
+
request=req,
|
|
1933
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
1934
|
+
stream=True,
|
|
1935
|
+
retry_config=retry_config,
|
|
1936
|
+
)
|
|
1937
|
+
|
|
1938
|
+
response_data: Any = None
|
|
1939
|
+
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
1940
|
+
return eventstreaming.EventStream(
|
|
1941
|
+
http_res,
|
|
1942
|
+
lambda raw: utils.unmarshal_json(raw, models.ConversationEvents),
|
|
1943
|
+
)
|
|
1944
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
1945
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1946
|
+
response_data = utils.unmarshal_json(
|
|
1947
|
+
http_res_text, models.HTTPValidationErrorData
|
|
1948
|
+
)
|
|
1949
|
+
raise models.HTTPValidationError(data=response_data)
|
|
1950
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
1951
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1952
|
+
raise models.SDKError(
|
|
1953
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1954
|
+
)
|
|
1955
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
1956
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1957
|
+
raise models.SDKError(
|
|
1958
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1959
|
+
)
|
|
1960
|
+
|
|
1961
|
+
content_type = http_res.headers.get("Content-Type")
|
|
1962
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
1963
|
+
raise models.SDKError(
|
|
1964
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1965
|
+
http_res.status_code,
|
|
1966
|
+
http_res_text,
|
|
1967
|
+
http_res,
|
|
1968
|
+
)
|
|
1969
|
+
|
|
1970
|
+
async def start_stream_async(
|
|
1971
|
+
self,
|
|
1972
|
+
*,
|
|
1973
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
1974
|
+
stream: Optional[bool] = True,
|
|
1975
|
+
store: OptionalNullable[bool] = UNSET,
|
|
1976
|
+
handoff_execution: OptionalNullable[
|
|
1977
|
+
models.ConversationStreamRequestHandoffExecution
|
|
1978
|
+
] = UNSET,
|
|
1979
|
+
instructions: OptionalNullable[str] = UNSET,
|
|
1980
|
+
tools: OptionalNullable[
|
|
1981
|
+
Union[
|
|
1982
|
+
List[models.ConversationStreamRequestTools],
|
|
1983
|
+
List[models.ConversationStreamRequestToolsTypedDict],
|
|
1984
|
+
]
|
|
1985
|
+
] = UNSET,
|
|
1986
|
+
completion_args: OptionalNullable[
|
|
1987
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
1988
|
+
] = UNSET,
|
|
1989
|
+
name: OptionalNullable[str] = UNSET,
|
|
1990
|
+
description: OptionalNullable[str] = UNSET,
|
|
1991
|
+
agent_id: OptionalNullable[str] = UNSET,
|
|
1992
|
+
model: OptionalNullable[str] = UNSET,
|
|
1993
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
1994
|
+
server_url: Optional[str] = None,
|
|
1995
|
+
timeout_ms: Optional[int] = None,
|
|
1996
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
1997
|
+
) -> eventstreaming.EventStreamAsync[models.ConversationEvents]:
|
|
1998
|
+
r"""Create a conversation and append entries to it.
|
|
1999
|
+
|
|
2000
|
+
Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation.
|
|
2001
|
+
|
|
2002
|
+
:param inputs:
|
|
2003
|
+
:param stream:
|
|
2004
|
+
:param store:
|
|
2005
|
+
:param handoff_execution:
|
|
2006
|
+
:param instructions:
|
|
2007
|
+
:param tools:
|
|
2008
|
+
:param completion_args:
|
|
2009
|
+
:param name:
|
|
2010
|
+
:param description:
|
|
2011
|
+
:param agent_id:
|
|
2012
|
+
:param model:
|
|
2013
|
+
:param retries: Override the default retry configuration for this method
|
|
2014
|
+
:param server_url: Override the default server URL for this method
|
|
2015
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
2016
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
2017
|
+
"""
|
|
2018
|
+
base_url = None
|
|
2019
|
+
url_variables = None
|
|
2020
|
+
if timeout_ms is None:
|
|
2021
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
2022
|
+
|
|
2023
|
+
if server_url is not None:
|
|
2024
|
+
base_url = server_url
|
|
2025
|
+
else:
|
|
2026
|
+
base_url = self._get_url(base_url, url_variables)
|
|
2027
|
+
|
|
2028
|
+
request = models.ConversationStreamRequest(
|
|
2029
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
2030
|
+
stream=stream,
|
|
2031
|
+
store=store,
|
|
2032
|
+
handoff_execution=handoff_execution,
|
|
2033
|
+
instructions=instructions,
|
|
2034
|
+
tools=utils.get_pydantic_model(
|
|
2035
|
+
tools, OptionalNullable[List[models.ConversationStreamRequestTools]]
|
|
2036
|
+
),
|
|
2037
|
+
completion_args=utils.get_pydantic_model(
|
|
2038
|
+
completion_args, OptionalNullable[models.CompletionArgs]
|
|
2039
|
+
),
|
|
2040
|
+
name=name,
|
|
2041
|
+
description=description,
|
|
2042
|
+
agent_id=agent_id,
|
|
2043
|
+
model=model,
|
|
2044
|
+
)
|
|
2045
|
+
|
|
2046
|
+
req = self._build_request_async(
|
|
2047
|
+
method="POST",
|
|
2048
|
+
path="/v1/conversations#stream",
|
|
2049
|
+
base_url=base_url,
|
|
2050
|
+
url_variables=url_variables,
|
|
2051
|
+
request=request,
|
|
2052
|
+
request_body_required=True,
|
|
2053
|
+
request_has_path_params=False,
|
|
2054
|
+
request_has_query_params=True,
|
|
2055
|
+
user_agent_header="user-agent",
|
|
2056
|
+
accept_header_value="text/event-stream",
|
|
2057
|
+
http_headers=http_headers,
|
|
2058
|
+
security=self.sdk_configuration.security,
|
|
2059
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
2060
|
+
request, False, False, "json", models.ConversationStreamRequest
|
|
2061
|
+
),
|
|
2062
|
+
timeout_ms=timeout_ms,
|
|
2063
|
+
)
|
|
2064
|
+
|
|
2065
|
+
if retries == UNSET:
|
|
2066
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
2067
|
+
retries = self.sdk_configuration.retry_config
|
|
2068
|
+
|
|
2069
|
+
retry_config = None
|
|
2070
|
+
if isinstance(retries, utils.RetryConfig):
|
|
2071
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
2072
|
+
|
|
2073
|
+
http_res = await self.do_request_async(
|
|
2074
|
+
hook_ctx=HookContext(
|
|
2075
|
+
base_url=base_url or "",
|
|
2076
|
+
operation_id="agents_api_v1_conversations_start_stream",
|
|
2077
|
+
oauth2_scopes=[],
|
|
2078
|
+
security_source=get_security_from_env(
|
|
2079
|
+
self.sdk_configuration.security, models.Security
|
|
2080
|
+
),
|
|
2081
|
+
),
|
|
2082
|
+
request=req,
|
|
2083
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
2084
|
+
stream=True,
|
|
2085
|
+
retry_config=retry_config,
|
|
2086
|
+
)
|
|
2087
|
+
|
|
2088
|
+
response_data: Any = None
|
|
2089
|
+
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
2090
|
+
return eventstreaming.EventStreamAsync(
|
|
2091
|
+
http_res,
|
|
2092
|
+
lambda raw: utils.unmarshal_json(raw, models.ConversationEvents),
|
|
2093
|
+
)
|
|
2094
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
2095
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2096
|
+
response_data = utils.unmarshal_json(
|
|
2097
|
+
http_res_text, models.HTTPValidationErrorData
|
|
2098
|
+
)
|
|
2099
|
+
raise models.HTTPValidationError(data=response_data)
|
|
2100
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
2101
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2102
|
+
raise models.SDKError(
|
|
2103
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
2104
|
+
)
|
|
2105
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
2106
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2107
|
+
raise models.SDKError(
|
|
2108
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
2109
|
+
)
|
|
2110
|
+
|
|
2111
|
+
content_type = http_res.headers.get("Content-Type")
|
|
2112
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2113
|
+
raise models.SDKError(
|
|
2114
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
2115
|
+
http_res.status_code,
|
|
2116
|
+
http_res_text,
|
|
2117
|
+
http_res,
|
|
2118
|
+
)
|
|
2119
|
+
|
|
2120
|
+
def append_stream(
|
|
2121
|
+
self,
|
|
2122
|
+
*,
|
|
2123
|
+
conversation_id: str,
|
|
2124
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
2125
|
+
stream: Optional[bool] = True,
|
|
2126
|
+
store: Optional[bool] = True,
|
|
2127
|
+
handoff_execution: Optional[
|
|
2128
|
+
models.ConversationAppendStreamRequestHandoffExecution
|
|
2129
|
+
] = "server",
|
|
2130
|
+
completion_args: Optional[
|
|
2131
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
2132
|
+
] = None,
|
|
2133
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
2134
|
+
server_url: Optional[str] = None,
|
|
2135
|
+
timeout_ms: Optional[int] = None,
|
|
2136
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
2137
|
+
) -> eventstreaming.EventStream[models.ConversationEvents]:
|
|
2138
|
+
r"""Append new entries to an existing conversation.
|
|
2139
|
+
|
|
2140
|
+
Run completion on the history of the conversation and the user entries. Return the new created entries.
|
|
2141
|
+
|
|
2142
|
+
:param conversation_id: ID of the conversation to which we append entries.
|
|
2143
|
+
:param inputs:
|
|
2144
|
+
:param stream:
|
|
2145
|
+
:param store: Whether to store the results into our servers or not.
|
|
2146
|
+
:param handoff_execution:
|
|
2147
|
+
:param completion_args: White-listed arguments from the completion API
|
|
2148
|
+
:param retries: Override the default retry configuration for this method
|
|
2149
|
+
:param server_url: Override the default server URL for this method
|
|
2150
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
2151
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
2152
|
+
"""
|
|
2153
|
+
base_url = None
|
|
2154
|
+
url_variables = None
|
|
2155
|
+
if timeout_ms is None:
|
|
2156
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
2157
|
+
|
|
2158
|
+
if server_url is not None:
|
|
2159
|
+
base_url = server_url
|
|
2160
|
+
else:
|
|
2161
|
+
base_url = self._get_url(base_url, url_variables)
|
|
2162
|
+
|
|
2163
|
+
request = models.AgentsAPIV1ConversationsAppendStreamRequest(
|
|
2164
|
+
conversation_id=conversation_id,
|
|
2165
|
+
conversation_append_stream_request=models.ConversationAppendStreamRequest(
|
|
2166
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
2167
|
+
stream=stream,
|
|
2168
|
+
store=store,
|
|
2169
|
+
handoff_execution=handoff_execution,
|
|
2170
|
+
completion_args=utils.get_pydantic_model(
|
|
2171
|
+
completion_args, Optional[models.CompletionArgs]
|
|
2172
|
+
),
|
|
2173
|
+
),
|
|
2174
|
+
)
|
|
2175
|
+
|
|
2176
|
+
req = self._build_request(
|
|
2177
|
+
method="POST",
|
|
2178
|
+
path="/v1/conversations/{conversation_id}#stream",
|
|
2179
|
+
base_url=base_url,
|
|
2180
|
+
url_variables=url_variables,
|
|
2181
|
+
request=request,
|
|
2182
|
+
request_body_required=True,
|
|
2183
|
+
request_has_path_params=True,
|
|
2184
|
+
request_has_query_params=True,
|
|
2185
|
+
user_agent_header="user-agent",
|
|
2186
|
+
accept_header_value="text/event-stream",
|
|
2187
|
+
http_headers=http_headers,
|
|
2188
|
+
security=self.sdk_configuration.security,
|
|
2189
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
2190
|
+
request.conversation_append_stream_request,
|
|
2191
|
+
False,
|
|
2192
|
+
False,
|
|
2193
|
+
"json",
|
|
2194
|
+
models.ConversationAppendStreamRequest,
|
|
2195
|
+
),
|
|
2196
|
+
timeout_ms=timeout_ms,
|
|
2197
|
+
)
|
|
2198
|
+
|
|
2199
|
+
if retries == UNSET:
|
|
2200
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
2201
|
+
retries = self.sdk_configuration.retry_config
|
|
2202
|
+
|
|
2203
|
+
retry_config = None
|
|
2204
|
+
if isinstance(retries, utils.RetryConfig):
|
|
2205
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
2206
|
+
|
|
2207
|
+
http_res = self.do_request(
|
|
2208
|
+
hook_ctx=HookContext(
|
|
2209
|
+
base_url=base_url or "",
|
|
2210
|
+
operation_id="agents_api_v1_conversations_append_stream",
|
|
2211
|
+
oauth2_scopes=[],
|
|
2212
|
+
security_source=get_security_from_env(
|
|
2213
|
+
self.sdk_configuration.security, models.Security
|
|
2214
|
+
),
|
|
2215
|
+
),
|
|
2216
|
+
request=req,
|
|
2217
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
2218
|
+
stream=True,
|
|
2219
|
+
retry_config=retry_config,
|
|
2220
|
+
)
|
|
2221
|
+
|
|
2222
|
+
response_data: Any = None
|
|
2223
|
+
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
2224
|
+
return eventstreaming.EventStream(
|
|
2225
|
+
http_res,
|
|
2226
|
+
lambda raw: utils.unmarshal_json(raw, models.ConversationEvents),
|
|
2227
|
+
)
|
|
2228
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
2229
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
2230
|
+
response_data = utils.unmarshal_json(
|
|
2231
|
+
http_res_text, models.HTTPValidationErrorData
|
|
2232
|
+
)
|
|
2233
|
+
raise models.HTTPValidationError(data=response_data)
|
|
2234
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
2235
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
2236
|
+
raise models.SDKError(
|
|
2237
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
2238
|
+
)
|
|
2239
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
2240
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
2241
|
+
raise models.SDKError(
|
|
2242
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
2243
|
+
)
|
|
2244
|
+
|
|
2245
|
+
content_type = http_res.headers.get("Content-Type")
|
|
2246
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
2247
|
+
raise models.SDKError(
|
|
2248
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
2249
|
+
http_res.status_code,
|
|
2250
|
+
http_res_text,
|
|
2251
|
+
http_res,
|
|
2252
|
+
)
|
|
2253
|
+
|
|
2254
|
+
async def append_stream_async(
|
|
2255
|
+
self,
|
|
2256
|
+
*,
|
|
2257
|
+
conversation_id: str,
|
|
2258
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
2259
|
+
stream: Optional[bool] = True,
|
|
2260
|
+
store: Optional[bool] = True,
|
|
2261
|
+
handoff_execution: Optional[
|
|
2262
|
+
models.ConversationAppendStreamRequestHandoffExecution
|
|
2263
|
+
] = "server",
|
|
2264
|
+
completion_args: Optional[
|
|
2265
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
2266
|
+
] = None,
|
|
2267
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
2268
|
+
server_url: Optional[str] = None,
|
|
2269
|
+
timeout_ms: Optional[int] = None,
|
|
2270
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
2271
|
+
) -> eventstreaming.EventStreamAsync[models.ConversationEvents]:
|
|
2272
|
+
r"""Append new entries to an existing conversation.
|
|
2273
|
+
|
|
2274
|
+
Run completion on the history of the conversation and the user entries. Return the new created entries.
|
|
2275
|
+
|
|
2276
|
+
:param conversation_id: ID of the conversation to which we append entries.
|
|
2277
|
+
:param inputs:
|
|
2278
|
+
:param stream:
|
|
2279
|
+
:param store: Whether to store the results into our servers or not.
|
|
2280
|
+
:param handoff_execution:
|
|
2281
|
+
:param completion_args: White-listed arguments from the completion API
|
|
2282
|
+
:param retries: Override the default retry configuration for this method
|
|
2283
|
+
:param server_url: Override the default server URL for this method
|
|
2284
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
2285
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
2286
|
+
"""
|
|
2287
|
+
base_url = None
|
|
2288
|
+
url_variables = None
|
|
2289
|
+
if timeout_ms is None:
|
|
2290
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
2291
|
+
|
|
2292
|
+
if server_url is not None:
|
|
2293
|
+
base_url = server_url
|
|
2294
|
+
else:
|
|
2295
|
+
base_url = self._get_url(base_url, url_variables)
|
|
2296
|
+
|
|
2297
|
+
request = models.AgentsAPIV1ConversationsAppendStreamRequest(
|
|
2298
|
+
conversation_id=conversation_id,
|
|
2299
|
+
conversation_append_stream_request=models.ConversationAppendStreamRequest(
|
|
2300
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
2301
|
+
stream=stream,
|
|
2302
|
+
store=store,
|
|
2303
|
+
handoff_execution=handoff_execution,
|
|
2304
|
+
completion_args=utils.get_pydantic_model(
|
|
2305
|
+
completion_args, Optional[models.CompletionArgs]
|
|
2306
|
+
),
|
|
2307
|
+
),
|
|
2308
|
+
)
|
|
2309
|
+
|
|
2310
|
+
req = self._build_request_async(
|
|
2311
|
+
method="POST",
|
|
2312
|
+
path="/v1/conversations/{conversation_id}#stream",
|
|
2313
|
+
base_url=base_url,
|
|
2314
|
+
url_variables=url_variables,
|
|
2315
|
+
request=request,
|
|
2316
|
+
request_body_required=True,
|
|
2317
|
+
request_has_path_params=True,
|
|
2318
|
+
request_has_query_params=True,
|
|
2319
|
+
user_agent_header="user-agent",
|
|
2320
|
+
accept_header_value="text/event-stream",
|
|
2321
|
+
http_headers=http_headers,
|
|
2322
|
+
security=self.sdk_configuration.security,
|
|
2323
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
2324
|
+
request.conversation_append_stream_request,
|
|
2325
|
+
False,
|
|
2326
|
+
False,
|
|
2327
|
+
"json",
|
|
2328
|
+
models.ConversationAppendStreamRequest,
|
|
2329
|
+
),
|
|
2330
|
+
timeout_ms=timeout_ms,
|
|
2331
|
+
)
|
|
2332
|
+
|
|
2333
|
+
if retries == UNSET:
|
|
2334
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
2335
|
+
retries = self.sdk_configuration.retry_config
|
|
2336
|
+
|
|
2337
|
+
retry_config = None
|
|
2338
|
+
if isinstance(retries, utils.RetryConfig):
|
|
2339
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
2340
|
+
|
|
2341
|
+
http_res = await self.do_request_async(
|
|
2342
|
+
hook_ctx=HookContext(
|
|
2343
|
+
base_url=base_url or "",
|
|
2344
|
+
operation_id="agents_api_v1_conversations_append_stream",
|
|
2345
|
+
oauth2_scopes=[],
|
|
2346
|
+
security_source=get_security_from_env(
|
|
2347
|
+
self.sdk_configuration.security, models.Security
|
|
2348
|
+
),
|
|
2349
|
+
),
|
|
2350
|
+
request=req,
|
|
2351
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
2352
|
+
stream=True,
|
|
2353
|
+
retry_config=retry_config,
|
|
2354
|
+
)
|
|
2355
|
+
|
|
2356
|
+
response_data: Any = None
|
|
2357
|
+
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
2358
|
+
return eventstreaming.EventStreamAsync(
|
|
2359
|
+
http_res,
|
|
2360
|
+
lambda raw: utils.unmarshal_json(raw, models.ConversationEvents),
|
|
2361
|
+
)
|
|
2362
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
2363
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2364
|
+
response_data = utils.unmarshal_json(
|
|
2365
|
+
http_res_text, models.HTTPValidationErrorData
|
|
2366
|
+
)
|
|
2367
|
+
raise models.HTTPValidationError(data=response_data)
|
|
2368
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
2369
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2370
|
+
raise models.SDKError(
|
|
2371
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
2372
|
+
)
|
|
2373
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
2374
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2375
|
+
raise models.SDKError(
|
|
2376
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
2377
|
+
)
|
|
2378
|
+
|
|
2379
|
+
content_type = http_res.headers.get("Content-Type")
|
|
2380
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2381
|
+
raise models.SDKError(
|
|
2382
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
2383
|
+
http_res.status_code,
|
|
2384
|
+
http_res_text,
|
|
2385
|
+
http_res,
|
|
2386
|
+
)
|
|
2387
|
+
|
|
2388
|
+
def restart_stream(
|
|
2389
|
+
self,
|
|
2390
|
+
*,
|
|
2391
|
+
conversation_id: str,
|
|
2392
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
2393
|
+
from_entry_id: str,
|
|
2394
|
+
stream: Optional[bool] = True,
|
|
2395
|
+
store: Optional[bool] = True,
|
|
2396
|
+
handoff_execution: Optional[
|
|
2397
|
+
models.ConversationRestartStreamRequestHandoffExecution
|
|
2398
|
+
] = "server",
|
|
2399
|
+
completion_args: Optional[
|
|
2400
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
2401
|
+
] = None,
|
|
2402
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
2403
|
+
server_url: Optional[str] = None,
|
|
2404
|
+
timeout_ms: Optional[int] = None,
|
|
2405
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
2406
|
+
) -> eventstreaming.EventStream[models.ConversationEvents]:
|
|
2407
|
+
r"""Restart a conversation starting from a given entry.
|
|
2408
|
+
|
|
2409
|
+
Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned.
|
|
2410
|
+
|
|
2411
|
+
:param conversation_id:
|
|
2412
|
+
:param inputs:
|
|
2413
|
+
:param from_entry_id:
|
|
2414
|
+
:param stream:
|
|
2415
|
+
:param store: Whether to store the results into our servers or not.
|
|
2416
|
+
:param handoff_execution:
|
|
2417
|
+
:param completion_args: White-listed arguments from the completion API
|
|
2418
|
+
:param retries: Override the default retry configuration for this method
|
|
2419
|
+
:param server_url: Override the default server URL for this method
|
|
2420
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
2421
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
2422
|
+
"""
|
|
2423
|
+
base_url = None
|
|
2424
|
+
url_variables = None
|
|
2425
|
+
if timeout_ms is None:
|
|
2426
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
2427
|
+
|
|
2428
|
+
if server_url is not None:
|
|
2429
|
+
base_url = server_url
|
|
2430
|
+
else:
|
|
2431
|
+
base_url = self._get_url(base_url, url_variables)
|
|
2432
|
+
|
|
2433
|
+
request = models.AgentsAPIV1ConversationsRestartStreamRequest(
|
|
2434
|
+
conversation_id=conversation_id,
|
|
2435
|
+
conversation_restart_stream_request=models.ConversationRestartStreamRequest(
|
|
2436
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
2437
|
+
stream=stream,
|
|
2438
|
+
store=store,
|
|
2439
|
+
handoff_execution=handoff_execution,
|
|
2440
|
+
from_entry_id=from_entry_id,
|
|
2441
|
+
completion_args=utils.get_pydantic_model(
|
|
2442
|
+
completion_args, Optional[models.CompletionArgs]
|
|
2443
|
+
),
|
|
2444
|
+
),
|
|
2445
|
+
)
|
|
2446
|
+
|
|
2447
|
+
req = self._build_request(
|
|
2448
|
+
method="POST",
|
|
2449
|
+
path="/v1/conversations/{conversation_id}/restart#stream",
|
|
2450
|
+
base_url=base_url,
|
|
2451
|
+
url_variables=url_variables,
|
|
2452
|
+
request=request,
|
|
2453
|
+
request_body_required=True,
|
|
2454
|
+
request_has_path_params=True,
|
|
2455
|
+
request_has_query_params=True,
|
|
2456
|
+
user_agent_header="user-agent",
|
|
2457
|
+
accept_header_value="text/event-stream",
|
|
2458
|
+
http_headers=http_headers,
|
|
2459
|
+
security=self.sdk_configuration.security,
|
|
2460
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
2461
|
+
request.conversation_restart_stream_request,
|
|
2462
|
+
False,
|
|
2463
|
+
False,
|
|
2464
|
+
"json",
|
|
2465
|
+
models.ConversationRestartStreamRequest,
|
|
2466
|
+
),
|
|
2467
|
+
timeout_ms=timeout_ms,
|
|
2468
|
+
)
|
|
2469
|
+
|
|
2470
|
+
if retries == UNSET:
|
|
2471
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
2472
|
+
retries = self.sdk_configuration.retry_config
|
|
2473
|
+
|
|
2474
|
+
retry_config = None
|
|
2475
|
+
if isinstance(retries, utils.RetryConfig):
|
|
2476
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
2477
|
+
|
|
2478
|
+
http_res = self.do_request(
|
|
2479
|
+
hook_ctx=HookContext(
|
|
2480
|
+
base_url=base_url or "",
|
|
2481
|
+
operation_id="agents_api_v1_conversations_restart_stream",
|
|
2482
|
+
oauth2_scopes=[],
|
|
2483
|
+
security_source=get_security_from_env(
|
|
2484
|
+
self.sdk_configuration.security, models.Security
|
|
2485
|
+
),
|
|
2486
|
+
),
|
|
2487
|
+
request=req,
|
|
2488
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
2489
|
+
stream=True,
|
|
2490
|
+
retry_config=retry_config,
|
|
2491
|
+
)
|
|
2492
|
+
|
|
2493
|
+
response_data: Any = None
|
|
2494
|
+
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
2495
|
+
return eventstreaming.EventStream(
|
|
2496
|
+
http_res,
|
|
2497
|
+
lambda raw: utils.unmarshal_json(raw, models.ConversationEvents),
|
|
2498
|
+
)
|
|
2499
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
2500
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
2501
|
+
response_data = utils.unmarshal_json(
|
|
2502
|
+
http_res_text, models.HTTPValidationErrorData
|
|
2503
|
+
)
|
|
2504
|
+
raise models.HTTPValidationError(data=response_data)
|
|
2505
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
2506
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
2507
|
+
raise models.SDKError(
|
|
2508
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
2509
|
+
)
|
|
2510
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
2511
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
2512
|
+
raise models.SDKError(
|
|
2513
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
2514
|
+
)
|
|
2515
|
+
|
|
2516
|
+
content_type = http_res.headers.get("Content-Type")
|
|
2517
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
2518
|
+
raise models.SDKError(
|
|
2519
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
2520
|
+
http_res.status_code,
|
|
2521
|
+
http_res_text,
|
|
2522
|
+
http_res,
|
|
2523
|
+
)
|
|
2524
|
+
|
|
2525
|
+
async def restart_stream_async(
|
|
2526
|
+
self,
|
|
2527
|
+
*,
|
|
2528
|
+
conversation_id: str,
|
|
2529
|
+
inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict],
|
|
2530
|
+
from_entry_id: str,
|
|
2531
|
+
stream: Optional[bool] = True,
|
|
2532
|
+
store: Optional[bool] = True,
|
|
2533
|
+
handoff_execution: Optional[
|
|
2534
|
+
models.ConversationRestartStreamRequestHandoffExecution
|
|
2535
|
+
] = "server",
|
|
2536
|
+
completion_args: Optional[
|
|
2537
|
+
Union[models.CompletionArgs, models.CompletionArgsTypedDict]
|
|
2538
|
+
] = None,
|
|
2539
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
2540
|
+
server_url: Optional[str] = None,
|
|
2541
|
+
timeout_ms: Optional[int] = None,
|
|
2542
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
2543
|
+
) -> eventstreaming.EventStreamAsync[models.ConversationEvents]:
|
|
2544
|
+
r"""Restart a conversation starting from a given entry.
|
|
2545
|
+
|
|
2546
|
+
Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned.
|
|
2547
|
+
|
|
2548
|
+
:param conversation_id:
|
|
2549
|
+
:param inputs:
|
|
2550
|
+
:param from_entry_id:
|
|
2551
|
+
:param stream:
|
|
2552
|
+
:param store: Whether to store the results into our servers or not.
|
|
2553
|
+
:param handoff_execution:
|
|
2554
|
+
:param completion_args: White-listed arguments from the completion API
|
|
2555
|
+
:param retries: Override the default retry configuration for this method
|
|
2556
|
+
:param server_url: Override the default server URL for this method
|
|
2557
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
2558
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
2559
|
+
"""
|
|
2560
|
+
base_url = None
|
|
2561
|
+
url_variables = None
|
|
2562
|
+
if timeout_ms is None:
|
|
2563
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
2564
|
+
|
|
2565
|
+
if server_url is not None:
|
|
2566
|
+
base_url = server_url
|
|
2567
|
+
else:
|
|
2568
|
+
base_url = self._get_url(base_url, url_variables)
|
|
2569
|
+
|
|
2570
|
+
request = models.AgentsAPIV1ConversationsRestartStreamRequest(
|
|
2571
|
+
conversation_id=conversation_id,
|
|
2572
|
+
conversation_restart_stream_request=models.ConversationRestartStreamRequest(
|
|
2573
|
+
inputs=utils.get_pydantic_model(inputs, models.ConversationInputs),
|
|
2574
|
+
stream=stream,
|
|
2575
|
+
store=store,
|
|
2576
|
+
handoff_execution=handoff_execution,
|
|
2577
|
+
from_entry_id=from_entry_id,
|
|
2578
|
+
completion_args=utils.get_pydantic_model(
|
|
2579
|
+
completion_args, Optional[models.CompletionArgs]
|
|
2580
|
+
),
|
|
2581
|
+
),
|
|
2582
|
+
)
|
|
2583
|
+
|
|
2584
|
+
req = self._build_request_async(
|
|
2585
|
+
method="POST",
|
|
2586
|
+
path="/v1/conversations/{conversation_id}/restart#stream",
|
|
2587
|
+
base_url=base_url,
|
|
2588
|
+
url_variables=url_variables,
|
|
2589
|
+
request=request,
|
|
2590
|
+
request_body_required=True,
|
|
2591
|
+
request_has_path_params=True,
|
|
2592
|
+
request_has_query_params=True,
|
|
2593
|
+
user_agent_header="user-agent",
|
|
2594
|
+
accept_header_value="text/event-stream",
|
|
2595
|
+
http_headers=http_headers,
|
|
2596
|
+
security=self.sdk_configuration.security,
|
|
2597
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
2598
|
+
request.conversation_restart_stream_request,
|
|
2599
|
+
False,
|
|
2600
|
+
False,
|
|
2601
|
+
"json",
|
|
2602
|
+
models.ConversationRestartStreamRequest,
|
|
2603
|
+
),
|
|
2604
|
+
timeout_ms=timeout_ms,
|
|
2605
|
+
)
|
|
2606
|
+
|
|
2607
|
+
if retries == UNSET:
|
|
2608
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
2609
|
+
retries = self.sdk_configuration.retry_config
|
|
2610
|
+
|
|
2611
|
+
retry_config = None
|
|
2612
|
+
if isinstance(retries, utils.RetryConfig):
|
|
2613
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
2614
|
+
|
|
2615
|
+
http_res = await self.do_request_async(
|
|
2616
|
+
hook_ctx=HookContext(
|
|
2617
|
+
base_url=base_url or "",
|
|
2618
|
+
operation_id="agents_api_v1_conversations_restart_stream",
|
|
2619
|
+
oauth2_scopes=[],
|
|
2620
|
+
security_source=get_security_from_env(
|
|
2621
|
+
self.sdk_configuration.security, models.Security
|
|
2622
|
+
),
|
|
2623
|
+
),
|
|
2624
|
+
request=req,
|
|
2625
|
+
error_status_codes=["422", "4XX", "5XX"],
|
|
2626
|
+
stream=True,
|
|
2627
|
+
retry_config=retry_config,
|
|
2628
|
+
)
|
|
2629
|
+
|
|
2630
|
+
response_data: Any = None
|
|
2631
|
+
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
2632
|
+
return eventstreaming.EventStreamAsync(
|
|
2633
|
+
http_res,
|
|
2634
|
+
lambda raw: utils.unmarshal_json(raw, models.ConversationEvents),
|
|
2635
|
+
)
|
|
2636
|
+
if utils.match_response(http_res, "422", "application/json"):
|
|
2637
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2638
|
+
response_data = utils.unmarshal_json(
|
|
2639
|
+
http_res_text, models.HTTPValidationErrorData
|
|
2640
|
+
)
|
|
2641
|
+
raise models.HTTPValidationError(data=response_data)
|
|
2642
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
2643
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2644
|
+
raise models.SDKError(
|
|
2645
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
2646
|
+
)
|
|
2647
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
2648
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2649
|
+
raise models.SDKError(
|
|
2650
|
+
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
2651
|
+
)
|
|
2652
|
+
|
|
2653
|
+
content_type = http_res.headers.get("Content-Type")
|
|
2654
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
2655
|
+
raise models.SDKError(
|
|
2656
|
+
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
2657
|
+
http_res.status_code,
|
|
2658
|
+
http_res_text,
|
|
2659
|
+
http_res,
|
|
2660
|
+
)
|