openai-agents 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +223 -0
- agents/_config.py +23 -0
- agents/_debug.py +17 -0
- agents/_run_impl.py +792 -0
- agents/_utils.py +61 -0
- agents/agent.py +159 -0
- agents/agent_output.py +144 -0
- agents/computer.py +107 -0
- agents/exceptions.py +63 -0
- agents/extensions/handoff_filters.py +67 -0
- agents/extensions/handoff_prompt.py +19 -0
- agents/function_schema.py +340 -0
- agents/guardrail.py +320 -0
- agents/handoffs.py +236 -0
- agents/items.py +246 -0
- agents/lifecycle.py +105 -0
- agents/logger.py +3 -0
- agents/model_settings.py +35 -0
- agents/models/__init__.py +0 -0
- agents/models/_openai_shared.py +34 -0
- agents/models/fake_id.py +5 -0
- agents/models/interface.py +107 -0
- agents/models/openai_chatcompletions.py +952 -0
- agents/models/openai_provider.py +65 -0
- agents/models/openai_responses.py +384 -0
- agents/result.py +220 -0
- agents/run.py +904 -0
- agents/run_context.py +26 -0
- agents/stream_events.py +58 -0
- agents/strict_schema.py +167 -0
- agents/tool.py +286 -0
- agents/tracing/__init__.py +97 -0
- agents/tracing/create.py +306 -0
- agents/tracing/logger.py +3 -0
- agents/tracing/processor_interface.py +69 -0
- agents/tracing/processors.py +261 -0
- agents/tracing/scope.py +45 -0
- agents/tracing/setup.py +211 -0
- agents/tracing/span_data.py +188 -0
- agents/tracing/spans.py +264 -0
- agents/tracing/traces.py +195 -0
- agents/tracing/util.py +17 -0
- agents/usage.py +22 -0
- agents/version.py +7 -0
- openai_agents-0.0.2.dist-info/METADATA +202 -0
- openai_agents-0.0.2.dist-info/RECORD +49 -0
- openai_agents-0.0.2.dist-info/licenses/LICENSE +21 -0
- openai-agents/example.py +0 -2
- openai_agents-0.0.1.dist-info/METADATA +0 -17
- openai_agents-0.0.1.dist-info/RECORD +0 -6
- openai_agents-0.0.1.dist-info/licenses/LICENSE +0 -20
- {openai-agents → agents/extensions}/__init__.py +0 -0
- {openai_agents-0.0.1.dist-info → openai_agents-0.0.2.dist-info}/WHEEL +0 -0
agents/run.py
ADDED
|
@@ -0,0 +1,904 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import copy
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import Any, cast
|
|
7
|
+
|
|
8
|
+
from openai.types.responses import ResponseCompletedEvent
|
|
9
|
+
|
|
10
|
+
from . import Model, _utils
|
|
11
|
+
from ._run_impl import (
|
|
12
|
+
NextStepFinalOutput,
|
|
13
|
+
NextStepHandoff,
|
|
14
|
+
NextStepRunAgain,
|
|
15
|
+
QueueCompleteSentinel,
|
|
16
|
+
RunImpl,
|
|
17
|
+
SingleStepResult,
|
|
18
|
+
TraceCtxManager,
|
|
19
|
+
get_model_tracing_impl,
|
|
20
|
+
)
|
|
21
|
+
from .agent import Agent
|
|
22
|
+
from .agent_output import AgentOutputSchema
|
|
23
|
+
from .exceptions import (
|
|
24
|
+
AgentsException,
|
|
25
|
+
InputGuardrailTripwireTriggered,
|
|
26
|
+
MaxTurnsExceeded,
|
|
27
|
+
ModelBehaviorError,
|
|
28
|
+
OutputGuardrailTripwireTriggered,
|
|
29
|
+
)
|
|
30
|
+
from .guardrail import InputGuardrail, InputGuardrailResult, OutputGuardrail, OutputGuardrailResult
|
|
31
|
+
from .handoffs import Handoff, HandoffInputFilter, handoff
|
|
32
|
+
from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
|
|
33
|
+
from .lifecycle import RunHooks
|
|
34
|
+
from .logger import logger
|
|
35
|
+
from .model_settings import ModelSettings
|
|
36
|
+
from .models.interface import ModelProvider
|
|
37
|
+
from .models.openai_provider import OpenAIProvider
|
|
38
|
+
from .result import RunResult, RunResultStreaming
|
|
39
|
+
from .run_context import RunContextWrapper, TContext
|
|
40
|
+
from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent
|
|
41
|
+
from .tracing import Span, SpanError, agent_span, get_current_trace, trace
|
|
42
|
+
from .tracing.span_data import AgentSpanData
|
|
43
|
+
from .usage import Usage
|
|
44
|
+
|
|
45
|
+
DEFAULT_MAX_TURNS = 10
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class RunConfig:
|
|
50
|
+
"""Configures settings for the entire agent run."""
|
|
51
|
+
|
|
52
|
+
model: str | Model | None = None
|
|
53
|
+
"""The model to use for the entire agent run. If set, will override the model set on every
|
|
54
|
+
agent. The model_provider passed in below must be able to resolve this model name.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
model_provider: ModelProvider = field(default_factory=OpenAIProvider)
|
|
58
|
+
"""The model provider to use when looking up string model names. Defaults to OpenAI."""
|
|
59
|
+
|
|
60
|
+
model_settings: ModelSettings | None = None
|
|
61
|
+
"""Configure global model settings. Any non-null values will override the agent-specific model
|
|
62
|
+
settings.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
handoff_input_filter: HandoffInputFilter | None = None
|
|
66
|
+
"""A global input filter to apply to all handoffs. If `Handoff.input_filter` is set, then that
|
|
67
|
+
will take precedence. The input filter allows you to edit the inputs that are sent to the new
|
|
68
|
+
agent. See the documentation in `Handoff.input_filter` for more details.
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
input_guardrails: list[InputGuardrail[Any]] | None = None
|
|
72
|
+
"""A list of input guardrails to run on the initial run input."""
|
|
73
|
+
|
|
74
|
+
output_guardrails: list[OutputGuardrail[Any]] | None = None
|
|
75
|
+
"""A list of output guardrails to run on the final output of the run."""
|
|
76
|
+
|
|
77
|
+
tracing_disabled: bool = False
|
|
78
|
+
"""Whether tracing is disabled for the agent run. If disabled, we will not trace the agent run.
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
trace_include_sensitive_data: bool = True
|
|
82
|
+
"""Whether we include potentially sensitive data (for example: inputs/outputs of tool calls or
|
|
83
|
+
LLM generations) in traces. If False, we'll still create spans for these events, but the
|
|
84
|
+
sensitive data will not be included.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
workflow_name: str = "Agent workflow"
|
|
88
|
+
"""The name of the run, used for tracing. Should be a logical name for the run, like
|
|
89
|
+
"Code generation workflow" or "Customer support agent".
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
trace_id: str | None = None
|
|
93
|
+
"""A custom trace ID to use for tracing. If not provided, we will generate a new trace ID."""
|
|
94
|
+
|
|
95
|
+
group_id: str | None = None
|
|
96
|
+
"""
|
|
97
|
+
A grouping identifier to use for tracing, to link multiple traces from the same conversation
|
|
98
|
+
or process. For example, you might use a chat thread ID.
|
|
99
|
+
"""
|
|
100
|
+
|
|
101
|
+
trace_metadata: dict[str, Any] | None = None
|
|
102
|
+
"""
|
|
103
|
+
An optional dictionary of additional metadata to include with the trace.
|
|
104
|
+
"""
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class Runner:
|
|
108
|
+
@classmethod
|
|
109
|
+
async def run(
|
|
110
|
+
cls,
|
|
111
|
+
starting_agent: Agent[TContext],
|
|
112
|
+
input: str | list[TResponseInputItem],
|
|
113
|
+
*,
|
|
114
|
+
context: TContext | None = None,
|
|
115
|
+
max_turns: int = DEFAULT_MAX_TURNS,
|
|
116
|
+
hooks: RunHooks[TContext] | None = None,
|
|
117
|
+
run_config: RunConfig | None = None,
|
|
118
|
+
) -> RunResult:
|
|
119
|
+
"""Run a workflow starting at the given agent. The agent will run in a loop until a final
|
|
120
|
+
output is generated. The loop runs like so:
|
|
121
|
+
1. The agent is invoked with the given input.
|
|
122
|
+
2. If there is a final output (i.e. the agent produces something of type
|
|
123
|
+
`agent.output_type`, the loop terminates.
|
|
124
|
+
3. If there's a handoff, we run the loop again, with the new agent.
|
|
125
|
+
4. Else, we run tool calls (if any), and re-run the loop.
|
|
126
|
+
|
|
127
|
+
In two cases, the agent may raise an exception:
|
|
128
|
+
1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised.
|
|
129
|
+
2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised.
|
|
130
|
+
|
|
131
|
+
Note that only the first agent's input guardrails are run.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
starting_agent: The starting agent to run.
|
|
135
|
+
input: The initial input to the agent. You can pass a single string for a user message,
|
|
136
|
+
or a list of input items.
|
|
137
|
+
context: The context to run the agent with.
|
|
138
|
+
max_turns: The maximum number of turns to run the agent for. A turn is defined as one
|
|
139
|
+
AI invocation (including any tool calls that might occur).
|
|
140
|
+
hooks: An object that receives callbacks on various lifecycle events.
|
|
141
|
+
run_config: Global settings for the entire agent run.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
A run result containing all the inputs, guardrail results and the output of the last
|
|
145
|
+
agent. Agents may perform handoffs, so we don't know the specific type of the output.
|
|
146
|
+
"""
|
|
147
|
+
if hooks is None:
|
|
148
|
+
hooks = RunHooks[Any]()
|
|
149
|
+
if run_config is None:
|
|
150
|
+
run_config = RunConfig()
|
|
151
|
+
|
|
152
|
+
with TraceCtxManager(
|
|
153
|
+
workflow_name=run_config.workflow_name,
|
|
154
|
+
trace_id=run_config.trace_id,
|
|
155
|
+
group_id=run_config.group_id,
|
|
156
|
+
metadata=run_config.trace_metadata,
|
|
157
|
+
disabled=run_config.tracing_disabled,
|
|
158
|
+
):
|
|
159
|
+
current_turn = 0
|
|
160
|
+
original_input: str | list[TResponseInputItem] = copy.deepcopy(input)
|
|
161
|
+
generated_items: list[RunItem] = []
|
|
162
|
+
model_responses: list[ModelResponse] = []
|
|
163
|
+
|
|
164
|
+
context_wrapper: RunContextWrapper[TContext] = RunContextWrapper(
|
|
165
|
+
context=context, # type: ignore
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
input_guardrail_results: list[InputGuardrailResult] = []
|
|
169
|
+
|
|
170
|
+
current_span: Span[AgentSpanData] | None = None
|
|
171
|
+
current_agent = starting_agent
|
|
172
|
+
should_run_agent_start_hooks = True
|
|
173
|
+
|
|
174
|
+
try:
|
|
175
|
+
while True:
|
|
176
|
+
# Start an agent span if we don't have one. This span is ended if the current
|
|
177
|
+
# agent changes, or if the agent loop ends.
|
|
178
|
+
if current_span is None:
|
|
179
|
+
handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)]
|
|
180
|
+
tool_names = [t.name for t in current_agent.tools]
|
|
181
|
+
if output_schema := cls._get_output_schema(current_agent):
|
|
182
|
+
output_type_name = output_schema.output_type_name()
|
|
183
|
+
else:
|
|
184
|
+
output_type_name = "str"
|
|
185
|
+
|
|
186
|
+
current_span = agent_span(
|
|
187
|
+
name=current_agent.name,
|
|
188
|
+
handoffs=handoff_names,
|
|
189
|
+
tools=tool_names,
|
|
190
|
+
output_type=output_type_name,
|
|
191
|
+
)
|
|
192
|
+
current_span.start(mark_as_current=True)
|
|
193
|
+
|
|
194
|
+
current_turn += 1
|
|
195
|
+
if current_turn > max_turns:
|
|
196
|
+
_utils.attach_error_to_span(
|
|
197
|
+
current_span,
|
|
198
|
+
SpanError(
|
|
199
|
+
message="Max turns exceeded",
|
|
200
|
+
data={"max_turns": max_turns},
|
|
201
|
+
),
|
|
202
|
+
)
|
|
203
|
+
raise MaxTurnsExceeded(f"Max turns ({max_turns}) exceeded")
|
|
204
|
+
|
|
205
|
+
logger.debug(
|
|
206
|
+
f"Running agent {current_agent.name} (turn {current_turn})",
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
if current_turn == 1:
|
|
210
|
+
input_guardrail_results, turn_result = await asyncio.gather(
|
|
211
|
+
cls._run_input_guardrails(
|
|
212
|
+
starting_agent,
|
|
213
|
+
starting_agent.input_guardrails
|
|
214
|
+
+ (run_config.input_guardrails or []),
|
|
215
|
+
copy.deepcopy(input),
|
|
216
|
+
context_wrapper,
|
|
217
|
+
),
|
|
218
|
+
cls._run_single_turn(
|
|
219
|
+
agent=current_agent,
|
|
220
|
+
original_input=original_input,
|
|
221
|
+
generated_items=generated_items,
|
|
222
|
+
hooks=hooks,
|
|
223
|
+
context_wrapper=context_wrapper,
|
|
224
|
+
run_config=run_config,
|
|
225
|
+
should_run_agent_start_hooks=should_run_agent_start_hooks,
|
|
226
|
+
),
|
|
227
|
+
)
|
|
228
|
+
else:
|
|
229
|
+
turn_result = await cls._run_single_turn(
|
|
230
|
+
agent=current_agent,
|
|
231
|
+
original_input=original_input,
|
|
232
|
+
generated_items=generated_items,
|
|
233
|
+
hooks=hooks,
|
|
234
|
+
context_wrapper=context_wrapper,
|
|
235
|
+
run_config=run_config,
|
|
236
|
+
should_run_agent_start_hooks=should_run_agent_start_hooks,
|
|
237
|
+
)
|
|
238
|
+
should_run_agent_start_hooks = False
|
|
239
|
+
|
|
240
|
+
model_responses.append(turn_result.model_response)
|
|
241
|
+
original_input = turn_result.original_input
|
|
242
|
+
generated_items = turn_result.generated_items
|
|
243
|
+
|
|
244
|
+
if isinstance(turn_result.next_step, NextStepFinalOutput):
|
|
245
|
+
output_guardrail_results = await cls._run_output_guardrails(
|
|
246
|
+
current_agent.output_guardrails + (run_config.output_guardrails or []),
|
|
247
|
+
current_agent,
|
|
248
|
+
turn_result.next_step.output,
|
|
249
|
+
context_wrapper,
|
|
250
|
+
)
|
|
251
|
+
return RunResult(
|
|
252
|
+
input=original_input,
|
|
253
|
+
new_items=generated_items,
|
|
254
|
+
raw_responses=model_responses,
|
|
255
|
+
final_output=turn_result.next_step.output,
|
|
256
|
+
_last_agent=current_agent,
|
|
257
|
+
input_guardrail_results=input_guardrail_results,
|
|
258
|
+
output_guardrail_results=output_guardrail_results,
|
|
259
|
+
)
|
|
260
|
+
elif isinstance(turn_result.next_step, NextStepHandoff):
|
|
261
|
+
current_agent = cast(Agent[TContext], turn_result.next_step.new_agent)
|
|
262
|
+
current_span.finish(reset_current=True)
|
|
263
|
+
current_span = None
|
|
264
|
+
should_run_agent_start_hooks = True
|
|
265
|
+
elif isinstance(turn_result.next_step, NextStepRunAgain):
|
|
266
|
+
pass
|
|
267
|
+
else:
|
|
268
|
+
raise AgentsException(
|
|
269
|
+
f"Unknown next step type: {type(turn_result.next_step)}"
|
|
270
|
+
)
|
|
271
|
+
finally:
|
|
272
|
+
if current_span:
|
|
273
|
+
current_span.finish(reset_current=True)
|
|
274
|
+
|
|
275
|
+
@classmethod
|
|
276
|
+
def run_sync(
|
|
277
|
+
cls,
|
|
278
|
+
starting_agent: Agent[TContext],
|
|
279
|
+
input: str | list[TResponseInputItem],
|
|
280
|
+
*,
|
|
281
|
+
context: TContext | None = None,
|
|
282
|
+
max_turns: int = DEFAULT_MAX_TURNS,
|
|
283
|
+
hooks: RunHooks[TContext] | None = None,
|
|
284
|
+
run_config: RunConfig | None = None,
|
|
285
|
+
) -> RunResult:
|
|
286
|
+
"""Run a workflow synchronously, starting at the given agent. Note that this just wraps the
|
|
287
|
+
`run` method, so it will not work if there's already an event loop (e.g. inside an async
|
|
288
|
+
function, or in a Jupyter notebook or async context like FastAPI). For those cases, use
|
|
289
|
+
the `run` method instead.
|
|
290
|
+
|
|
291
|
+
The agent will run in a loop until a final output is generated. The loop runs like so:
|
|
292
|
+
1. The agent is invoked with the given input.
|
|
293
|
+
2. If there is a final output (i.e. the agent produces something of type
|
|
294
|
+
`agent.output_type`, the loop terminates.
|
|
295
|
+
3. If there's a handoff, we run the loop again, with the new agent.
|
|
296
|
+
4. Else, we run tool calls (if any), and re-run the loop.
|
|
297
|
+
|
|
298
|
+
In two cases, the agent may raise an exception:
|
|
299
|
+
1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised.
|
|
300
|
+
2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised.
|
|
301
|
+
|
|
302
|
+
Note that only the first agent's input guardrails are run.
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
starting_agent: The starting agent to run.
|
|
306
|
+
input: The initial input to the agent. You can pass a single string for a user message,
|
|
307
|
+
or a list of input items.
|
|
308
|
+
context: The context to run the agent with.
|
|
309
|
+
max_turns: The maximum number of turns to run the agent for. A turn is defined as one
|
|
310
|
+
AI invocation (including any tool calls that might occur).
|
|
311
|
+
hooks: An object that receives callbacks on various lifecycle events.
|
|
312
|
+
run_config: Global settings for the entire agent run.
|
|
313
|
+
|
|
314
|
+
Returns:
|
|
315
|
+
A run result containing all the inputs, guardrail results and the output of the last
|
|
316
|
+
agent. Agents may perform handoffs, so we don't know the specific type of the output.
|
|
317
|
+
"""
|
|
318
|
+
return asyncio.get_event_loop().run_until_complete(
|
|
319
|
+
cls.run(
|
|
320
|
+
starting_agent,
|
|
321
|
+
input,
|
|
322
|
+
context=context,
|
|
323
|
+
max_turns=max_turns,
|
|
324
|
+
hooks=hooks,
|
|
325
|
+
run_config=run_config,
|
|
326
|
+
)
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
@classmethod
|
|
330
|
+
def run_streamed(
|
|
331
|
+
cls,
|
|
332
|
+
starting_agent: Agent[TContext],
|
|
333
|
+
input: str | list[TResponseInputItem],
|
|
334
|
+
context: TContext | None = None,
|
|
335
|
+
max_turns: int = DEFAULT_MAX_TURNS,
|
|
336
|
+
hooks: RunHooks[TContext] | None = None,
|
|
337
|
+
run_config: RunConfig | None = None,
|
|
338
|
+
) -> RunResultStreaming:
|
|
339
|
+
"""Run a workflow starting at the given agent in streaming mode. The returned result object
|
|
340
|
+
contains a method you can use to stream semantic events as they are generated.
|
|
341
|
+
|
|
342
|
+
The agent will run in a loop until a final output is generated. The loop runs like so:
|
|
343
|
+
1. The agent is invoked with the given input.
|
|
344
|
+
2. If there is a final output (i.e. the agent produces something of type
|
|
345
|
+
`agent.output_type`, the loop terminates.
|
|
346
|
+
3. If there's a handoff, we run the loop again, with the new agent.
|
|
347
|
+
4. Else, we run tool calls (if any), and re-run the loop.
|
|
348
|
+
|
|
349
|
+
In two cases, the agent may raise an exception:
|
|
350
|
+
1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised.
|
|
351
|
+
2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised.
|
|
352
|
+
|
|
353
|
+
Note that only the first agent's input guardrails are run.
|
|
354
|
+
|
|
355
|
+
Args:
|
|
356
|
+
starting_agent: The starting agent to run.
|
|
357
|
+
input: The initial input to the agent. You can pass a single string for a user message,
|
|
358
|
+
or a list of input items.
|
|
359
|
+
context: The context to run the agent with.
|
|
360
|
+
max_turns: The maximum number of turns to run the agent for. A turn is defined as one
|
|
361
|
+
AI invocation (including any tool calls that might occur).
|
|
362
|
+
hooks: An object that receives callbacks on various lifecycle events.
|
|
363
|
+
run_config: Global settings for the entire agent run.
|
|
364
|
+
|
|
365
|
+
Returns:
|
|
366
|
+
A result object that contains data about the run, as well as a method to stream events.
|
|
367
|
+
"""
|
|
368
|
+
if hooks is None:
|
|
369
|
+
hooks = RunHooks[Any]()
|
|
370
|
+
if run_config is None:
|
|
371
|
+
run_config = RunConfig()
|
|
372
|
+
|
|
373
|
+
# If there's already a trace, we don't create a new one. In addition, we can't end the
|
|
374
|
+
# trace here, because the actual work is done in `stream_events` and this method ends
|
|
375
|
+
# before that.
|
|
376
|
+
new_trace = (
|
|
377
|
+
None
|
|
378
|
+
if get_current_trace()
|
|
379
|
+
else trace(
|
|
380
|
+
workflow_name=run_config.workflow_name,
|
|
381
|
+
trace_id=run_config.trace_id,
|
|
382
|
+
group_id=run_config.group_id,
|
|
383
|
+
metadata=run_config.trace_metadata,
|
|
384
|
+
disabled=run_config.tracing_disabled,
|
|
385
|
+
)
|
|
386
|
+
)
|
|
387
|
+
# Need to start the trace here, because the current trace contextvar is captured at
|
|
388
|
+
# asyncio.create_task time
|
|
389
|
+
if new_trace:
|
|
390
|
+
new_trace.start(mark_as_current=True)
|
|
391
|
+
|
|
392
|
+
output_schema = cls._get_output_schema(starting_agent)
|
|
393
|
+
context_wrapper: RunContextWrapper[TContext] = RunContextWrapper(
|
|
394
|
+
context=context # type: ignore
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
streamed_result = RunResultStreaming(
|
|
398
|
+
input=copy.deepcopy(input),
|
|
399
|
+
new_items=[],
|
|
400
|
+
current_agent=starting_agent,
|
|
401
|
+
raw_responses=[],
|
|
402
|
+
final_output=None,
|
|
403
|
+
is_complete=False,
|
|
404
|
+
current_turn=0,
|
|
405
|
+
max_turns=max_turns,
|
|
406
|
+
input_guardrail_results=[],
|
|
407
|
+
output_guardrail_results=[],
|
|
408
|
+
_current_agent_output_schema=output_schema,
|
|
409
|
+
_trace=new_trace,
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
# Kick off the actual agent loop in the background and return the streamed result object.
|
|
413
|
+
streamed_result._run_impl_task = asyncio.create_task(
|
|
414
|
+
cls._run_streamed_impl(
|
|
415
|
+
starting_input=input,
|
|
416
|
+
streamed_result=streamed_result,
|
|
417
|
+
starting_agent=starting_agent,
|
|
418
|
+
max_turns=max_turns,
|
|
419
|
+
hooks=hooks,
|
|
420
|
+
context_wrapper=context_wrapper,
|
|
421
|
+
run_config=run_config,
|
|
422
|
+
)
|
|
423
|
+
)
|
|
424
|
+
return streamed_result
|
|
425
|
+
|
|
426
|
+
@classmethod
|
|
427
|
+
async def _run_input_guardrails_with_queue(
|
|
428
|
+
cls,
|
|
429
|
+
agent: Agent[Any],
|
|
430
|
+
guardrails: list[InputGuardrail[TContext]],
|
|
431
|
+
input: str | list[TResponseInputItem],
|
|
432
|
+
context: RunContextWrapper[TContext],
|
|
433
|
+
streamed_result: RunResultStreaming,
|
|
434
|
+
parent_span: Span[Any],
|
|
435
|
+
):
|
|
436
|
+
queue = streamed_result._input_guardrail_queue
|
|
437
|
+
|
|
438
|
+
# We'll run the guardrails and push them onto the queue as they complete
|
|
439
|
+
guardrail_tasks = [
|
|
440
|
+
asyncio.create_task(
|
|
441
|
+
RunImpl.run_single_input_guardrail(agent, guardrail, input, context)
|
|
442
|
+
)
|
|
443
|
+
for guardrail in guardrails
|
|
444
|
+
]
|
|
445
|
+
guardrail_results = []
|
|
446
|
+
try:
|
|
447
|
+
for done in asyncio.as_completed(guardrail_tasks):
|
|
448
|
+
result = await done
|
|
449
|
+
if result.output.tripwire_triggered:
|
|
450
|
+
_utils.attach_error_to_span(
|
|
451
|
+
parent_span,
|
|
452
|
+
SpanError(
|
|
453
|
+
message="Guardrail tripwire triggered",
|
|
454
|
+
data={
|
|
455
|
+
"guardrail": result.guardrail.get_name(),
|
|
456
|
+
"type": "input_guardrail",
|
|
457
|
+
},
|
|
458
|
+
),
|
|
459
|
+
)
|
|
460
|
+
queue.put_nowait(result)
|
|
461
|
+
guardrail_results.append(result)
|
|
462
|
+
except Exception:
|
|
463
|
+
for t in guardrail_tasks:
|
|
464
|
+
t.cancel()
|
|
465
|
+
raise
|
|
466
|
+
|
|
467
|
+
streamed_result.input_guardrail_results = guardrail_results
|
|
468
|
+
|
|
469
|
+
@classmethod
|
|
470
|
+
async def _run_streamed_impl(
|
|
471
|
+
cls,
|
|
472
|
+
starting_input: str | list[TResponseInputItem],
|
|
473
|
+
streamed_result: RunResultStreaming,
|
|
474
|
+
starting_agent: Agent[TContext],
|
|
475
|
+
max_turns: int,
|
|
476
|
+
hooks: RunHooks[TContext],
|
|
477
|
+
context_wrapper: RunContextWrapper[TContext],
|
|
478
|
+
run_config: RunConfig,
|
|
479
|
+
):
|
|
480
|
+
current_span: Span[AgentSpanData] | None = None
|
|
481
|
+
current_agent = starting_agent
|
|
482
|
+
current_turn = 0
|
|
483
|
+
should_run_agent_start_hooks = True
|
|
484
|
+
|
|
485
|
+
streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent))
|
|
486
|
+
|
|
487
|
+
try:
|
|
488
|
+
while True:
|
|
489
|
+
if streamed_result.is_complete:
|
|
490
|
+
break
|
|
491
|
+
|
|
492
|
+
# Start an agent span if we don't have one. This span is ended if the current
|
|
493
|
+
# agent changes, or if the agent loop ends.
|
|
494
|
+
if current_span is None:
|
|
495
|
+
handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)]
|
|
496
|
+
tool_names = [t.name for t in current_agent.tools]
|
|
497
|
+
if output_schema := cls._get_output_schema(current_agent):
|
|
498
|
+
output_type_name = output_schema.output_type_name()
|
|
499
|
+
else:
|
|
500
|
+
output_type_name = "str"
|
|
501
|
+
|
|
502
|
+
current_span = agent_span(
|
|
503
|
+
name=current_agent.name,
|
|
504
|
+
handoffs=handoff_names,
|
|
505
|
+
tools=tool_names,
|
|
506
|
+
output_type=output_type_name,
|
|
507
|
+
)
|
|
508
|
+
current_span.start(mark_as_current=True)
|
|
509
|
+
|
|
510
|
+
current_turn += 1
|
|
511
|
+
streamed_result.current_turn = current_turn
|
|
512
|
+
|
|
513
|
+
if current_turn > max_turns:
|
|
514
|
+
_utils.attach_error_to_span(
|
|
515
|
+
current_span,
|
|
516
|
+
SpanError(
|
|
517
|
+
message="Max turns exceeded",
|
|
518
|
+
data={"max_turns": max_turns},
|
|
519
|
+
),
|
|
520
|
+
)
|
|
521
|
+
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
522
|
+
break
|
|
523
|
+
|
|
524
|
+
if current_turn == 1:
|
|
525
|
+
# Run the input guardrails in the background and put the results on the queue
|
|
526
|
+
streamed_result._input_guardrails_task = asyncio.create_task(
|
|
527
|
+
cls._run_input_guardrails_with_queue(
|
|
528
|
+
starting_agent,
|
|
529
|
+
starting_agent.input_guardrails + (run_config.input_guardrails or []),
|
|
530
|
+
copy.deepcopy(ItemHelpers.input_to_new_input_list(starting_input)),
|
|
531
|
+
context_wrapper,
|
|
532
|
+
streamed_result,
|
|
533
|
+
current_span,
|
|
534
|
+
)
|
|
535
|
+
)
|
|
536
|
+
try:
|
|
537
|
+
turn_result = await cls._run_single_turn_streamed(
|
|
538
|
+
streamed_result,
|
|
539
|
+
current_agent,
|
|
540
|
+
hooks,
|
|
541
|
+
context_wrapper,
|
|
542
|
+
run_config,
|
|
543
|
+
should_run_agent_start_hooks,
|
|
544
|
+
)
|
|
545
|
+
should_run_agent_start_hooks = False
|
|
546
|
+
|
|
547
|
+
streamed_result.raw_responses = streamed_result.raw_responses + [
|
|
548
|
+
turn_result.model_response
|
|
549
|
+
]
|
|
550
|
+
streamed_result.input = turn_result.original_input
|
|
551
|
+
streamed_result.new_items = turn_result.generated_items
|
|
552
|
+
|
|
553
|
+
if isinstance(turn_result.next_step, NextStepHandoff):
|
|
554
|
+
current_agent = turn_result.next_step.new_agent
|
|
555
|
+
current_span.finish(reset_current=True)
|
|
556
|
+
current_span = None
|
|
557
|
+
should_run_agent_start_hooks = True
|
|
558
|
+
streamed_result._event_queue.put_nowait(
|
|
559
|
+
AgentUpdatedStreamEvent(new_agent=current_agent)
|
|
560
|
+
)
|
|
561
|
+
elif isinstance(turn_result.next_step, NextStepFinalOutput):
|
|
562
|
+
streamed_result._output_guardrails_task = asyncio.create_task(
|
|
563
|
+
cls._run_output_guardrails(
|
|
564
|
+
current_agent.output_guardrails
|
|
565
|
+
+ (run_config.output_guardrails or []),
|
|
566
|
+
current_agent,
|
|
567
|
+
turn_result.next_step.output,
|
|
568
|
+
context_wrapper,
|
|
569
|
+
)
|
|
570
|
+
)
|
|
571
|
+
|
|
572
|
+
try:
|
|
573
|
+
output_guardrail_results = await streamed_result._output_guardrails_task
|
|
574
|
+
except Exception:
|
|
575
|
+
# Exceptions will be checked in the stream_events loop
|
|
576
|
+
output_guardrail_results = []
|
|
577
|
+
|
|
578
|
+
streamed_result.output_guardrail_results = output_guardrail_results
|
|
579
|
+
streamed_result.final_output = turn_result.next_step.output
|
|
580
|
+
streamed_result.is_complete = True
|
|
581
|
+
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
582
|
+
elif isinstance(turn_result.next_step, NextStepRunAgain):
|
|
583
|
+
pass
|
|
584
|
+
except Exception as e:
|
|
585
|
+
if current_span:
|
|
586
|
+
_utils.attach_error_to_span(
|
|
587
|
+
current_span,
|
|
588
|
+
SpanError(
|
|
589
|
+
message="Error in agent run",
|
|
590
|
+
data={"error": str(e)},
|
|
591
|
+
),
|
|
592
|
+
)
|
|
593
|
+
streamed_result.is_complete = True
|
|
594
|
+
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
595
|
+
raise
|
|
596
|
+
|
|
597
|
+
streamed_result.is_complete = True
|
|
598
|
+
finally:
|
|
599
|
+
if current_span:
|
|
600
|
+
current_span.finish(reset_current=True)
|
|
601
|
+
|
|
602
|
+
@classmethod
|
|
603
|
+
async def _run_single_turn_streamed(
|
|
604
|
+
cls,
|
|
605
|
+
streamed_result: RunResultStreaming,
|
|
606
|
+
agent: Agent[TContext],
|
|
607
|
+
hooks: RunHooks[TContext],
|
|
608
|
+
context_wrapper: RunContextWrapper[TContext],
|
|
609
|
+
run_config: RunConfig,
|
|
610
|
+
should_run_agent_start_hooks: bool,
|
|
611
|
+
) -> SingleStepResult:
|
|
612
|
+
if should_run_agent_start_hooks:
|
|
613
|
+
await asyncio.gather(
|
|
614
|
+
hooks.on_agent_start(context_wrapper, agent),
|
|
615
|
+
(
|
|
616
|
+
agent.hooks.on_start(context_wrapper, agent)
|
|
617
|
+
if agent.hooks
|
|
618
|
+
else _utils.noop_coroutine()
|
|
619
|
+
),
|
|
620
|
+
)
|
|
621
|
+
|
|
622
|
+
output_schema = cls._get_output_schema(agent)
|
|
623
|
+
|
|
624
|
+
streamed_result.current_agent = agent
|
|
625
|
+
streamed_result._current_agent_output_schema = output_schema
|
|
626
|
+
|
|
627
|
+
system_prompt = await agent.get_system_prompt(context_wrapper)
|
|
628
|
+
|
|
629
|
+
handoffs = cls._get_handoffs(agent)
|
|
630
|
+
|
|
631
|
+
model = cls._get_model(agent, run_config)
|
|
632
|
+
model_settings = agent.model_settings.resolve(run_config.model_settings)
|
|
633
|
+
final_response: ModelResponse | None = None
|
|
634
|
+
|
|
635
|
+
input = ItemHelpers.input_to_new_input_list(streamed_result.input)
|
|
636
|
+
input.extend([item.to_input_item() for item in streamed_result.new_items])
|
|
637
|
+
|
|
638
|
+
# 1. Stream the output events
|
|
639
|
+
async for event in model.stream_response(
|
|
640
|
+
system_prompt,
|
|
641
|
+
input,
|
|
642
|
+
model_settings,
|
|
643
|
+
agent.tools,
|
|
644
|
+
output_schema,
|
|
645
|
+
handoffs,
|
|
646
|
+
get_model_tracing_impl(
|
|
647
|
+
run_config.tracing_disabled, run_config.trace_include_sensitive_data
|
|
648
|
+
),
|
|
649
|
+
):
|
|
650
|
+
if isinstance(event, ResponseCompletedEvent):
|
|
651
|
+
usage = (
|
|
652
|
+
Usage(
|
|
653
|
+
requests=1,
|
|
654
|
+
input_tokens=event.response.usage.input_tokens,
|
|
655
|
+
output_tokens=event.response.usage.output_tokens,
|
|
656
|
+
total_tokens=event.response.usage.total_tokens,
|
|
657
|
+
)
|
|
658
|
+
if event.response.usage
|
|
659
|
+
else Usage()
|
|
660
|
+
)
|
|
661
|
+
final_response = ModelResponse(
|
|
662
|
+
output=event.response.output,
|
|
663
|
+
usage=usage,
|
|
664
|
+
referenceable_id=event.response.id,
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
|
|
668
|
+
|
|
669
|
+
# 2. At this point, the streaming is complete for this turn of the agent loop.
|
|
670
|
+
if not final_response:
|
|
671
|
+
raise ModelBehaviorError("Model did not produce a final response!")
|
|
672
|
+
|
|
673
|
+
# 3. Now, we can process the turn as we do in the non-streaming case
|
|
674
|
+
single_step_result = await cls._get_single_step_result_from_response(
|
|
675
|
+
agent=agent,
|
|
676
|
+
original_input=streamed_result.input,
|
|
677
|
+
pre_step_items=streamed_result.new_items,
|
|
678
|
+
new_response=final_response,
|
|
679
|
+
output_schema=output_schema,
|
|
680
|
+
handoffs=handoffs,
|
|
681
|
+
hooks=hooks,
|
|
682
|
+
context_wrapper=context_wrapper,
|
|
683
|
+
run_config=run_config,
|
|
684
|
+
)
|
|
685
|
+
|
|
686
|
+
RunImpl.stream_step_result_to_queue(single_step_result, streamed_result._event_queue)
|
|
687
|
+
return single_step_result
|
|
688
|
+
|
|
689
|
+
@classmethod
|
|
690
|
+
async def _run_single_turn(
|
|
691
|
+
cls,
|
|
692
|
+
*,
|
|
693
|
+
agent: Agent[TContext],
|
|
694
|
+
original_input: str | list[TResponseInputItem],
|
|
695
|
+
generated_items: list[RunItem],
|
|
696
|
+
hooks: RunHooks[TContext],
|
|
697
|
+
context_wrapper: RunContextWrapper[TContext],
|
|
698
|
+
run_config: RunConfig,
|
|
699
|
+
should_run_agent_start_hooks: bool,
|
|
700
|
+
) -> SingleStepResult:
|
|
701
|
+
# Ensure we run the hooks before anything else
|
|
702
|
+
if should_run_agent_start_hooks:
|
|
703
|
+
await asyncio.gather(
|
|
704
|
+
hooks.on_agent_start(context_wrapper, agent),
|
|
705
|
+
(
|
|
706
|
+
agent.hooks.on_start(context_wrapper, agent)
|
|
707
|
+
if agent.hooks
|
|
708
|
+
else _utils.noop_coroutine()
|
|
709
|
+
),
|
|
710
|
+
)
|
|
711
|
+
|
|
712
|
+
system_prompt = await agent.get_system_prompt(context_wrapper)
|
|
713
|
+
|
|
714
|
+
output_schema = cls._get_output_schema(agent)
|
|
715
|
+
handoffs = cls._get_handoffs(agent)
|
|
716
|
+
input = ItemHelpers.input_to_new_input_list(original_input)
|
|
717
|
+
input.extend([generated_item.to_input_item() for generated_item in generated_items])
|
|
718
|
+
|
|
719
|
+
new_response = await cls._get_new_response(
|
|
720
|
+
agent,
|
|
721
|
+
system_prompt,
|
|
722
|
+
input,
|
|
723
|
+
output_schema,
|
|
724
|
+
handoffs,
|
|
725
|
+
context_wrapper,
|
|
726
|
+
run_config,
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
return await cls._get_single_step_result_from_response(
|
|
730
|
+
agent=agent,
|
|
731
|
+
original_input=original_input,
|
|
732
|
+
pre_step_items=generated_items,
|
|
733
|
+
new_response=new_response,
|
|
734
|
+
output_schema=output_schema,
|
|
735
|
+
handoffs=handoffs,
|
|
736
|
+
hooks=hooks,
|
|
737
|
+
context_wrapper=context_wrapper,
|
|
738
|
+
run_config=run_config,
|
|
739
|
+
)
|
|
740
|
+
|
|
741
|
+
@classmethod
|
|
742
|
+
async def _get_single_step_result_from_response(
|
|
743
|
+
cls,
|
|
744
|
+
*,
|
|
745
|
+
agent: Agent[TContext],
|
|
746
|
+
original_input: str | list[TResponseInputItem],
|
|
747
|
+
pre_step_items: list[RunItem],
|
|
748
|
+
new_response: ModelResponse,
|
|
749
|
+
output_schema: AgentOutputSchema | None,
|
|
750
|
+
handoffs: list[Handoff],
|
|
751
|
+
hooks: RunHooks[TContext],
|
|
752
|
+
context_wrapper: RunContextWrapper[TContext],
|
|
753
|
+
run_config: RunConfig,
|
|
754
|
+
) -> SingleStepResult:
|
|
755
|
+
processed_response = RunImpl.process_model_response(
|
|
756
|
+
agent=agent,
|
|
757
|
+
response=new_response,
|
|
758
|
+
output_schema=output_schema,
|
|
759
|
+
handoffs=handoffs,
|
|
760
|
+
)
|
|
761
|
+
return await RunImpl.execute_tools_and_side_effects(
|
|
762
|
+
agent=agent,
|
|
763
|
+
original_input=original_input,
|
|
764
|
+
pre_step_items=pre_step_items,
|
|
765
|
+
new_response=new_response,
|
|
766
|
+
processed_response=processed_response,
|
|
767
|
+
output_schema=output_schema,
|
|
768
|
+
hooks=hooks,
|
|
769
|
+
context_wrapper=context_wrapper,
|
|
770
|
+
run_config=run_config,
|
|
771
|
+
)
|
|
772
|
+
|
|
773
|
+
@classmethod
|
|
774
|
+
async def _run_input_guardrails(
|
|
775
|
+
cls,
|
|
776
|
+
agent: Agent[Any],
|
|
777
|
+
guardrails: list[InputGuardrail[TContext]],
|
|
778
|
+
input: str | list[TResponseInputItem],
|
|
779
|
+
context: RunContextWrapper[TContext],
|
|
780
|
+
) -> list[InputGuardrailResult]:
|
|
781
|
+
if not guardrails:
|
|
782
|
+
return []
|
|
783
|
+
|
|
784
|
+
guardrail_tasks = [
|
|
785
|
+
asyncio.create_task(
|
|
786
|
+
RunImpl.run_single_input_guardrail(agent, guardrail, input, context)
|
|
787
|
+
)
|
|
788
|
+
for guardrail in guardrails
|
|
789
|
+
]
|
|
790
|
+
|
|
791
|
+
guardrail_results = []
|
|
792
|
+
|
|
793
|
+
for done in asyncio.as_completed(guardrail_tasks):
|
|
794
|
+
result = await done
|
|
795
|
+
if result.output.tripwire_triggered:
|
|
796
|
+
# Cancel all guardrail tasks if a tripwire is triggered.
|
|
797
|
+
for t in guardrail_tasks:
|
|
798
|
+
t.cancel()
|
|
799
|
+
_utils.attach_error_to_current_span(
|
|
800
|
+
SpanError(
|
|
801
|
+
message="Guardrail tripwire triggered",
|
|
802
|
+
data={"guardrail": result.guardrail.get_name()},
|
|
803
|
+
)
|
|
804
|
+
)
|
|
805
|
+
raise InputGuardrailTripwireTriggered(result)
|
|
806
|
+
else:
|
|
807
|
+
guardrail_results.append(result)
|
|
808
|
+
|
|
809
|
+
return guardrail_results
|
|
810
|
+
|
|
811
|
+
@classmethod
|
|
812
|
+
async def _run_output_guardrails(
|
|
813
|
+
cls,
|
|
814
|
+
guardrails: list[OutputGuardrail[TContext]],
|
|
815
|
+
agent: Agent[TContext],
|
|
816
|
+
agent_output: Any,
|
|
817
|
+
context: RunContextWrapper[TContext],
|
|
818
|
+
) -> list[OutputGuardrailResult]:
|
|
819
|
+
if not guardrails:
|
|
820
|
+
return []
|
|
821
|
+
|
|
822
|
+
guardrail_tasks = [
|
|
823
|
+
asyncio.create_task(
|
|
824
|
+
RunImpl.run_single_output_guardrail(guardrail, agent, agent_output, context)
|
|
825
|
+
)
|
|
826
|
+
for guardrail in guardrails
|
|
827
|
+
]
|
|
828
|
+
|
|
829
|
+
guardrail_results = []
|
|
830
|
+
|
|
831
|
+
for done in asyncio.as_completed(guardrail_tasks):
|
|
832
|
+
result = await done
|
|
833
|
+
if result.output.tripwire_triggered:
|
|
834
|
+
# Cancel all guardrail tasks if a tripwire is triggered.
|
|
835
|
+
for t in guardrail_tasks:
|
|
836
|
+
t.cancel()
|
|
837
|
+
_utils.attach_error_to_current_span(
|
|
838
|
+
SpanError(
|
|
839
|
+
message="Guardrail tripwire triggered",
|
|
840
|
+
data={"guardrail": result.guardrail.get_name()},
|
|
841
|
+
)
|
|
842
|
+
)
|
|
843
|
+
raise OutputGuardrailTripwireTriggered(result)
|
|
844
|
+
else:
|
|
845
|
+
guardrail_results.append(result)
|
|
846
|
+
|
|
847
|
+
return guardrail_results
|
|
848
|
+
|
|
849
|
+
@classmethod
|
|
850
|
+
async def _get_new_response(
|
|
851
|
+
cls,
|
|
852
|
+
agent: Agent[TContext],
|
|
853
|
+
system_prompt: str | None,
|
|
854
|
+
input: list[TResponseInputItem],
|
|
855
|
+
output_schema: AgentOutputSchema | None,
|
|
856
|
+
handoffs: list[Handoff],
|
|
857
|
+
context_wrapper: RunContextWrapper[TContext],
|
|
858
|
+
run_config: RunConfig,
|
|
859
|
+
) -> ModelResponse:
|
|
860
|
+
model = cls._get_model(agent, run_config)
|
|
861
|
+
model_settings = agent.model_settings.resolve(run_config.model_settings)
|
|
862
|
+
new_response = await model.get_response(
|
|
863
|
+
system_instructions=system_prompt,
|
|
864
|
+
input=input,
|
|
865
|
+
model_settings=model_settings,
|
|
866
|
+
tools=agent.tools,
|
|
867
|
+
output_schema=output_schema,
|
|
868
|
+
handoffs=handoffs,
|
|
869
|
+
tracing=get_model_tracing_impl(
|
|
870
|
+
run_config.tracing_disabled, run_config.trace_include_sensitive_data
|
|
871
|
+
),
|
|
872
|
+
)
|
|
873
|
+
|
|
874
|
+
context_wrapper.usage.add(new_response.usage)
|
|
875
|
+
|
|
876
|
+
return new_response
|
|
877
|
+
|
|
878
|
+
@classmethod
|
|
879
|
+
def _get_output_schema(cls, agent: Agent[Any]) -> AgentOutputSchema | None:
|
|
880
|
+
if agent.output_type is None or agent.output_type is str:
|
|
881
|
+
return None
|
|
882
|
+
|
|
883
|
+
return AgentOutputSchema(agent.output_type)
|
|
884
|
+
|
|
885
|
+
@classmethod
|
|
886
|
+
def _get_handoffs(cls, agent: Agent[Any]) -> list[Handoff]:
|
|
887
|
+
handoffs = []
|
|
888
|
+
for handoff_item in agent.handoffs:
|
|
889
|
+
if isinstance(handoff_item, Handoff):
|
|
890
|
+
handoffs.append(handoff_item)
|
|
891
|
+
elif isinstance(handoff_item, Agent):
|
|
892
|
+
handoffs.append(handoff(handoff_item))
|
|
893
|
+
return handoffs
|
|
894
|
+
|
|
895
|
+
@classmethod
|
|
896
|
+
def _get_model(cls, agent: Agent[Any], run_config: RunConfig) -> Model:
|
|
897
|
+
if isinstance(run_config.model, Model):
|
|
898
|
+
return run_config.model
|
|
899
|
+
elif isinstance(run_config.model, str):
|
|
900
|
+
return run_config.model_provider.get_model(run_config.model)
|
|
901
|
+
elif isinstance(agent.model, Model):
|
|
902
|
+
return agent.model
|
|
903
|
+
|
|
904
|
+
return run_config.model_provider.get_model(agent.model)
|