openai-agents 0.0.16__py3-none-any.whl → 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +8 -0
- agents/_run_impl.py +11 -5
- agents/agent.py +33 -3
- agents/agent_output.py +1 -1
- agents/exceptions.py +38 -5
- agents/extensions/models/litellm_model.py +13 -2
- agents/extensions/visualization.py +35 -18
- agents/function_schema.py +7 -5
- agents/handoffs.py +3 -3
- agents/mcp/server.py +9 -9
- agents/mcp/util.py +1 -1
- agents/model_settings.py +15 -0
- agents/models/interface.py +6 -0
- agents/models/openai_chatcompletions.py +26 -6
- agents/models/openai_responses.py +10 -0
- agents/prompts.py +76 -0
- agents/repl.py +65 -0
- agents/result.py +43 -13
- agents/run.py +48 -8
- agents/stream_events.py +1 -0
- agents/tool.py +26 -5
- agents/tool_context.py +29 -0
- agents/tracing/processors.py +29 -3
- agents/util/_pretty_print.py +12 -0
- agents/voice/model.py +2 -0
- {openai_agents-0.0.16.dist-info → openai_agents-0.0.18.dist-info}/METADATA +6 -3
- {openai_agents-0.0.16.dist-info → openai_agents-0.0.18.dist-info}/RECORD +29 -26
- {openai_agents-0.0.16.dist-info → openai_agents-0.0.18.dist-info}/WHEEL +0 -0
- {openai_agents-0.0.16.dist-info → openai_agents-0.0.18.dist-info}/licenses/LICENSE +0 -0
|
@@ -9,6 +9,7 @@ from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
|
|
|
9
9
|
from openai.types import ChatModel
|
|
10
10
|
from openai.types.chat import ChatCompletion, ChatCompletionChunk
|
|
11
11
|
from openai.types.responses import Response
|
|
12
|
+
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
12
13
|
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
|
|
13
14
|
|
|
14
15
|
from .. import _debug
|
|
@@ -53,6 +54,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
53
54
|
handoffs: list[Handoff],
|
|
54
55
|
tracing: ModelTracing,
|
|
55
56
|
previous_response_id: str | None,
|
|
57
|
+
prompt: ResponsePromptParam | None = None,
|
|
56
58
|
) -> ModelResponse:
|
|
57
59
|
with generation_span(
|
|
58
60
|
model=str(self.model),
|
|
@@ -69,14 +71,25 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
69
71
|
span_generation,
|
|
70
72
|
tracing,
|
|
71
73
|
stream=False,
|
|
74
|
+
prompt=prompt,
|
|
72
75
|
)
|
|
73
76
|
|
|
77
|
+
first_choice = response.choices[0]
|
|
78
|
+
message = first_choice.message
|
|
79
|
+
|
|
74
80
|
if _debug.DONT_LOG_MODEL_DATA:
|
|
75
81
|
logger.debug("Received model response")
|
|
76
82
|
else:
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
83
|
+
if message is not None:
|
|
84
|
+
logger.debug(
|
|
85
|
+
"LLM resp:\n%s\n",
|
|
86
|
+
json.dumps(message.model_dump(), indent=2),
|
|
87
|
+
)
|
|
88
|
+
else:
|
|
89
|
+
logger.debug(
|
|
90
|
+
"LLM resp had no message. finish_reason: %s",
|
|
91
|
+
first_choice.finish_reason,
|
|
92
|
+
)
|
|
80
93
|
|
|
81
94
|
usage = (
|
|
82
95
|
Usage(
|
|
@@ -101,13 +114,15 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
101
114
|
else Usage()
|
|
102
115
|
)
|
|
103
116
|
if tracing.include_data():
|
|
104
|
-
span_generation.span_data.output =
|
|
117
|
+
span_generation.span_data.output = (
|
|
118
|
+
[message.model_dump()] if message is not None else []
|
|
119
|
+
)
|
|
105
120
|
span_generation.span_data.usage = {
|
|
106
121
|
"input_tokens": usage.input_tokens,
|
|
107
122
|
"output_tokens": usage.output_tokens,
|
|
108
123
|
}
|
|
109
124
|
|
|
110
|
-
items = Converter.message_to_output_items(
|
|
125
|
+
items = Converter.message_to_output_items(message) if message is not None else []
|
|
111
126
|
|
|
112
127
|
return ModelResponse(
|
|
113
128
|
output=items,
|
|
@@ -124,8 +139,8 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
124
139
|
output_schema: AgentOutputSchemaBase | None,
|
|
125
140
|
handoffs: list[Handoff],
|
|
126
141
|
tracing: ModelTracing,
|
|
127
|
-
*,
|
|
128
142
|
previous_response_id: str | None,
|
|
143
|
+
prompt: ResponsePromptParam | None = None,
|
|
129
144
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
130
145
|
"""
|
|
131
146
|
Yields a partial message as it is generated, as well as the usage information.
|
|
@@ -145,6 +160,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
145
160
|
span_generation,
|
|
146
161
|
tracing,
|
|
147
162
|
stream=True,
|
|
163
|
+
prompt=prompt,
|
|
148
164
|
)
|
|
149
165
|
|
|
150
166
|
final_response: Response | None = None
|
|
@@ -175,6 +191,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
175
191
|
span: Span[GenerationSpanData],
|
|
176
192
|
tracing: ModelTracing,
|
|
177
193
|
stream: Literal[True],
|
|
194
|
+
prompt: ResponsePromptParam | None = None,
|
|
178
195
|
) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
|
|
179
196
|
|
|
180
197
|
@overload
|
|
@@ -189,6 +206,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
189
206
|
span: Span[GenerationSpanData],
|
|
190
207
|
tracing: ModelTracing,
|
|
191
208
|
stream: Literal[False],
|
|
209
|
+
prompt: ResponsePromptParam | None = None,
|
|
192
210
|
) -> ChatCompletion: ...
|
|
193
211
|
|
|
194
212
|
async def _fetch_response(
|
|
@@ -202,6 +220,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
202
220
|
span: Span[GenerationSpanData],
|
|
203
221
|
tracing: ModelTracing,
|
|
204
222
|
stream: bool = False,
|
|
223
|
+
prompt: ResponsePromptParam | None = None,
|
|
205
224
|
) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]:
|
|
206
225
|
converted_messages = Converter.items_to_messages(input)
|
|
207
226
|
|
|
@@ -269,6 +288,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
269
288
|
extra_query=model_settings.extra_query,
|
|
270
289
|
extra_body=model_settings.extra_body,
|
|
271
290
|
metadata=self._non_null_or_not_given(model_settings.metadata),
|
|
291
|
+
**(model_settings.extra_args or {}),
|
|
272
292
|
)
|
|
273
293
|
|
|
274
294
|
if isinstance(ret, ChatCompletion):
|
|
@@ -17,6 +17,7 @@ from openai.types.responses import (
|
|
|
17
17
|
WebSearchToolParam,
|
|
18
18
|
response_create_params,
|
|
19
19
|
)
|
|
20
|
+
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
20
21
|
|
|
21
22
|
from .. import _debug
|
|
22
23
|
from ..agent_output import AgentOutputSchemaBase
|
|
@@ -74,6 +75,7 @@ class OpenAIResponsesModel(Model):
|
|
|
74
75
|
handoffs: list[Handoff],
|
|
75
76
|
tracing: ModelTracing,
|
|
76
77
|
previous_response_id: str | None,
|
|
78
|
+
prompt: ResponsePromptParam | None = None,
|
|
77
79
|
) -> ModelResponse:
|
|
78
80
|
with response_span(disabled=tracing.is_disabled()) as span_response:
|
|
79
81
|
try:
|
|
@@ -86,6 +88,7 @@ class OpenAIResponsesModel(Model):
|
|
|
86
88
|
handoffs,
|
|
87
89
|
previous_response_id,
|
|
88
90
|
stream=False,
|
|
91
|
+
prompt=prompt,
|
|
89
92
|
)
|
|
90
93
|
|
|
91
94
|
if _debug.DONT_LOG_MODEL_DATA:
|
|
@@ -141,6 +144,7 @@ class OpenAIResponsesModel(Model):
|
|
|
141
144
|
handoffs: list[Handoff],
|
|
142
145
|
tracing: ModelTracing,
|
|
143
146
|
previous_response_id: str | None,
|
|
147
|
+
prompt: ResponsePromptParam | None = None,
|
|
144
148
|
) -> AsyncIterator[ResponseStreamEvent]:
|
|
145
149
|
"""
|
|
146
150
|
Yields a partial message as it is generated, as well as the usage information.
|
|
@@ -156,6 +160,7 @@ class OpenAIResponsesModel(Model):
|
|
|
156
160
|
handoffs,
|
|
157
161
|
previous_response_id,
|
|
158
162
|
stream=True,
|
|
163
|
+
prompt=prompt,
|
|
159
164
|
)
|
|
160
165
|
|
|
161
166
|
final_response: Response | None = None
|
|
@@ -192,6 +197,7 @@ class OpenAIResponsesModel(Model):
|
|
|
192
197
|
handoffs: list[Handoff],
|
|
193
198
|
previous_response_id: str | None,
|
|
194
199
|
stream: Literal[True],
|
|
200
|
+
prompt: ResponsePromptParam | None = None,
|
|
195
201
|
) -> AsyncStream[ResponseStreamEvent]: ...
|
|
196
202
|
|
|
197
203
|
@overload
|
|
@@ -205,6 +211,7 @@ class OpenAIResponsesModel(Model):
|
|
|
205
211
|
handoffs: list[Handoff],
|
|
206
212
|
previous_response_id: str | None,
|
|
207
213
|
stream: Literal[False],
|
|
214
|
+
prompt: ResponsePromptParam | None = None,
|
|
208
215
|
) -> Response: ...
|
|
209
216
|
|
|
210
217
|
async def _fetch_response(
|
|
@@ -217,6 +224,7 @@ class OpenAIResponsesModel(Model):
|
|
|
217
224
|
handoffs: list[Handoff],
|
|
218
225
|
previous_response_id: str | None,
|
|
219
226
|
stream: Literal[True] | Literal[False] = False,
|
|
227
|
+
prompt: ResponsePromptParam | None = None,
|
|
220
228
|
) -> Response | AsyncStream[ResponseStreamEvent]:
|
|
221
229
|
list_input = ItemHelpers.input_to_new_input_list(input)
|
|
222
230
|
|
|
@@ -252,6 +260,7 @@ class OpenAIResponsesModel(Model):
|
|
|
252
260
|
input=list_input,
|
|
253
261
|
include=converted_tools.includes,
|
|
254
262
|
tools=converted_tools.tools,
|
|
263
|
+
prompt=self._non_null_or_not_given(prompt),
|
|
255
264
|
temperature=self._non_null_or_not_given(model_settings.temperature),
|
|
256
265
|
top_p=self._non_null_or_not_given(model_settings.top_p),
|
|
257
266
|
truncation=self._non_null_or_not_given(model_settings.truncation),
|
|
@@ -266,6 +275,7 @@ class OpenAIResponsesModel(Model):
|
|
|
266
275
|
store=self._non_null_or_not_given(model_settings.store),
|
|
267
276
|
reasoning=self._non_null_or_not_given(model_settings.reasoning),
|
|
268
277
|
metadata=self._non_null_or_not_given(model_settings.metadata),
|
|
278
|
+
**(model_settings.extra_args or {}),
|
|
269
279
|
)
|
|
270
280
|
|
|
271
281
|
def _get_client(self) -> AsyncOpenAI:
|
agents/prompts.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import inspect
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import TYPE_CHECKING, Any, Callable
|
|
6
|
+
|
|
7
|
+
from openai.types.responses.response_prompt_param import (
|
|
8
|
+
ResponsePromptParam,
|
|
9
|
+
Variables as ResponsesPromptVariables,
|
|
10
|
+
)
|
|
11
|
+
from typing_extensions import NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
from agents.util._types import MaybeAwaitable
|
|
14
|
+
|
|
15
|
+
from .exceptions import UserError
|
|
16
|
+
from .run_context import RunContextWrapper
|
|
17
|
+
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from .agent import Agent
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Prompt(TypedDict):
|
|
23
|
+
"""Prompt configuration to use for interacting with an OpenAI model."""
|
|
24
|
+
|
|
25
|
+
id: str
|
|
26
|
+
"""The unique ID of the prompt."""
|
|
27
|
+
|
|
28
|
+
version: NotRequired[str]
|
|
29
|
+
"""Optional version of the prompt."""
|
|
30
|
+
|
|
31
|
+
variables: NotRequired[dict[str, ResponsesPromptVariables]]
|
|
32
|
+
"""Optional variables to substitute into the prompt."""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class GenerateDynamicPromptData:
|
|
37
|
+
"""Inputs to a function that allows you to dynamically generate a prompt."""
|
|
38
|
+
|
|
39
|
+
context: RunContextWrapper[Any]
|
|
40
|
+
"""The run context."""
|
|
41
|
+
|
|
42
|
+
agent: Agent[Any]
|
|
43
|
+
"""The agent for which the prompt is being generated."""
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
DynamicPromptFunction = Callable[[GenerateDynamicPromptData], MaybeAwaitable[Prompt]]
|
|
47
|
+
"""A function that dynamically generates a prompt."""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class PromptUtil:
|
|
51
|
+
@staticmethod
|
|
52
|
+
async def to_model_input(
|
|
53
|
+
prompt: Prompt | DynamicPromptFunction | None,
|
|
54
|
+
context: RunContextWrapper[Any],
|
|
55
|
+
agent: Agent[Any],
|
|
56
|
+
) -> ResponsePromptParam | None:
|
|
57
|
+
if prompt is None:
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
resolved_prompt: Prompt
|
|
61
|
+
if isinstance(prompt, dict):
|
|
62
|
+
resolved_prompt = prompt
|
|
63
|
+
else:
|
|
64
|
+
func_result = prompt(GenerateDynamicPromptData(context=context, agent=agent))
|
|
65
|
+
if inspect.isawaitable(func_result):
|
|
66
|
+
resolved_prompt = await func_result
|
|
67
|
+
else:
|
|
68
|
+
resolved_prompt = func_result
|
|
69
|
+
if not isinstance(resolved_prompt, dict):
|
|
70
|
+
raise UserError("Dynamic prompt function must return a Prompt")
|
|
71
|
+
|
|
72
|
+
return {
|
|
73
|
+
"id": resolved_prompt["id"],
|
|
74
|
+
"version": resolved_prompt.get("version"),
|
|
75
|
+
"variables": resolved_prompt.get("variables"),
|
|
76
|
+
}
|
agents/repl.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from openai.types.responses.response_text_delta_event import ResponseTextDeltaEvent
|
|
6
|
+
|
|
7
|
+
from .agent import Agent
|
|
8
|
+
from .items import ItemHelpers, TResponseInputItem
|
|
9
|
+
from .result import RunResultBase
|
|
10
|
+
from .run import Runner
|
|
11
|
+
from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
|
|
15
|
+
"""Run a simple REPL loop with the given agent.
|
|
16
|
+
|
|
17
|
+
This utility allows quick manual testing and debugging of an agent from the
|
|
18
|
+
command line. Conversation state is preserved across turns. Enter ``exit``
|
|
19
|
+
or ``quit`` to stop the loop.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
agent: The starting agent to run.
|
|
23
|
+
stream: Whether to stream the agent output.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
current_agent = agent
|
|
27
|
+
input_items: list[TResponseInputItem] = []
|
|
28
|
+
while True:
|
|
29
|
+
try:
|
|
30
|
+
user_input = input(" > ")
|
|
31
|
+
except (EOFError, KeyboardInterrupt):
|
|
32
|
+
print()
|
|
33
|
+
break
|
|
34
|
+
if user_input.strip().lower() in {"exit", "quit"}:
|
|
35
|
+
break
|
|
36
|
+
if not user_input:
|
|
37
|
+
continue
|
|
38
|
+
|
|
39
|
+
input_items.append({"role": "user", "content": user_input})
|
|
40
|
+
|
|
41
|
+
result: RunResultBase
|
|
42
|
+
if stream:
|
|
43
|
+
result = Runner.run_streamed(current_agent, input=input_items)
|
|
44
|
+
async for event in result.stream_events():
|
|
45
|
+
if isinstance(event, RawResponsesStreamEvent):
|
|
46
|
+
if isinstance(event.data, ResponseTextDeltaEvent):
|
|
47
|
+
print(event.data.delta, end="", flush=True)
|
|
48
|
+
elif isinstance(event, RunItemStreamEvent):
|
|
49
|
+
if event.item.type == "tool_call_item":
|
|
50
|
+
print("\n[tool called]", flush=True)
|
|
51
|
+
elif event.item.type == "tool_call_output_item":
|
|
52
|
+
print(f"\n[tool output: {event.item.output}]", flush=True)
|
|
53
|
+
elif event.item.type == "message_output_item":
|
|
54
|
+
message = ItemHelpers.text_message_output(event.item)
|
|
55
|
+
print(message, end="", flush=True)
|
|
56
|
+
elif isinstance(event, AgentUpdatedStreamEvent):
|
|
57
|
+
print(f"\n[Agent updated: {event.new_agent.name}]", flush=True)
|
|
58
|
+
print()
|
|
59
|
+
else:
|
|
60
|
+
result = await Runner.run(current_agent, input_items)
|
|
61
|
+
if result.final_output is not None:
|
|
62
|
+
print(result.final_output)
|
|
63
|
+
|
|
64
|
+
current_agent = result.last_agent
|
|
65
|
+
input_items = result.to_input_list()
|
agents/result.py
CHANGED
|
@@ -11,14 +11,22 @@ from typing_extensions import TypeVar
|
|
|
11
11
|
from ._run_impl import QueueCompleteSentinel
|
|
12
12
|
from .agent import Agent
|
|
13
13
|
from .agent_output import AgentOutputSchemaBase
|
|
14
|
-
from .exceptions import
|
|
14
|
+
from .exceptions import (
|
|
15
|
+
AgentsException,
|
|
16
|
+
InputGuardrailTripwireTriggered,
|
|
17
|
+
MaxTurnsExceeded,
|
|
18
|
+
RunErrorDetails,
|
|
19
|
+
)
|
|
15
20
|
from .guardrail import InputGuardrailResult, OutputGuardrailResult
|
|
16
21
|
from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
|
|
17
22
|
from .logger import logger
|
|
18
23
|
from .run_context import RunContextWrapper
|
|
19
24
|
from .stream_events import StreamEvent
|
|
20
25
|
from .tracing import Trace
|
|
21
|
-
from .util._pretty_print import
|
|
26
|
+
from .util._pretty_print import (
|
|
27
|
+
pretty_print_result,
|
|
28
|
+
pretty_print_run_result_streaming,
|
|
29
|
+
)
|
|
22
30
|
|
|
23
31
|
if TYPE_CHECKING:
|
|
24
32
|
from ._run_impl import QueueCompleteSentinel
|
|
@@ -206,31 +214,53 @@ class RunResultStreaming(RunResultBase):
|
|
|
206
214
|
if self._stored_exception:
|
|
207
215
|
raise self._stored_exception
|
|
208
216
|
|
|
217
|
+
def _create_error_details(self) -> RunErrorDetails:
|
|
218
|
+
"""Return a `RunErrorDetails` object considering the current attributes of the class."""
|
|
219
|
+
return RunErrorDetails(
|
|
220
|
+
input=self.input,
|
|
221
|
+
new_items=self.new_items,
|
|
222
|
+
raw_responses=self.raw_responses,
|
|
223
|
+
last_agent=self.current_agent,
|
|
224
|
+
context_wrapper=self.context_wrapper,
|
|
225
|
+
input_guardrail_results=self.input_guardrail_results,
|
|
226
|
+
output_guardrail_results=self.output_guardrail_results,
|
|
227
|
+
)
|
|
228
|
+
|
|
209
229
|
def _check_errors(self):
|
|
210
230
|
if self.current_turn > self.max_turns:
|
|
211
|
-
|
|
231
|
+
max_turns_exc = MaxTurnsExceeded(f"Max turns ({self.max_turns}) exceeded")
|
|
232
|
+
max_turns_exc.run_data = self._create_error_details()
|
|
233
|
+
self._stored_exception = max_turns_exc
|
|
212
234
|
|
|
213
235
|
# Fetch all the completed guardrail results from the queue and raise if needed
|
|
214
236
|
while not self._input_guardrail_queue.empty():
|
|
215
237
|
guardrail_result = self._input_guardrail_queue.get_nowait()
|
|
216
238
|
if guardrail_result.output.tripwire_triggered:
|
|
217
|
-
|
|
239
|
+
tripwire_exc = InputGuardrailTripwireTriggered(guardrail_result)
|
|
240
|
+
tripwire_exc.run_data = self._create_error_details()
|
|
241
|
+
self._stored_exception = tripwire_exc
|
|
218
242
|
|
|
219
243
|
# Check the tasks for any exceptions
|
|
220
244
|
if self._run_impl_task and self._run_impl_task.done():
|
|
221
|
-
|
|
222
|
-
if
|
|
223
|
-
|
|
245
|
+
run_impl_exc = self._run_impl_task.exception()
|
|
246
|
+
if run_impl_exc and isinstance(run_impl_exc, Exception):
|
|
247
|
+
if isinstance(run_impl_exc, AgentsException) and run_impl_exc.run_data is None:
|
|
248
|
+
run_impl_exc.run_data = self._create_error_details()
|
|
249
|
+
self._stored_exception = run_impl_exc
|
|
224
250
|
|
|
225
251
|
if self._input_guardrails_task and self._input_guardrails_task.done():
|
|
226
|
-
|
|
227
|
-
if
|
|
228
|
-
|
|
252
|
+
in_guard_exc = self._input_guardrails_task.exception()
|
|
253
|
+
if in_guard_exc and isinstance(in_guard_exc, Exception):
|
|
254
|
+
if isinstance(in_guard_exc, AgentsException) and in_guard_exc.run_data is None:
|
|
255
|
+
in_guard_exc.run_data = self._create_error_details()
|
|
256
|
+
self._stored_exception = in_guard_exc
|
|
229
257
|
|
|
230
258
|
if self._output_guardrails_task and self._output_guardrails_task.done():
|
|
231
|
-
|
|
232
|
-
if
|
|
233
|
-
|
|
259
|
+
out_guard_exc = self._output_guardrails_task.exception()
|
|
260
|
+
if out_guard_exc and isinstance(out_guard_exc, Exception):
|
|
261
|
+
if isinstance(out_guard_exc, AgentsException) and out_guard_exc.run_data is None:
|
|
262
|
+
out_guard_exc.run_data = self._create_error_details()
|
|
263
|
+
self._stored_exception = out_guard_exc
|
|
234
264
|
|
|
235
265
|
def _cleanup_tasks(self):
|
|
236
266
|
if self._run_impl_task and not self._run_impl_task.done():
|
agents/run.py
CHANGED
|
@@ -6,6 +6,9 @@ from dataclasses import dataclass, field
|
|
|
6
6
|
from typing import Any, cast
|
|
7
7
|
|
|
8
8
|
from openai.types.responses import ResponseCompletedEvent
|
|
9
|
+
from openai.types.responses.response_prompt_param import (
|
|
10
|
+
ResponsePromptParam,
|
|
11
|
+
)
|
|
9
12
|
|
|
10
13
|
from ._run_impl import (
|
|
11
14
|
AgentToolUseTracker,
|
|
@@ -26,6 +29,7 @@ from .exceptions import (
|
|
|
26
29
|
MaxTurnsExceeded,
|
|
27
30
|
ModelBehaviorError,
|
|
28
31
|
OutputGuardrailTripwireTriggered,
|
|
32
|
+
RunErrorDetails,
|
|
29
33
|
)
|
|
30
34
|
from .guardrail import InputGuardrail, InputGuardrailResult, OutputGuardrail, OutputGuardrailResult
|
|
31
35
|
from .handoffs import Handoff, HandoffInputFilter, handoff
|
|
@@ -180,6 +184,8 @@ class Runner:
|
|
|
180
184
|
|
|
181
185
|
try:
|
|
182
186
|
while True:
|
|
187
|
+
all_tools = await cls._get_all_tools(current_agent, context_wrapper)
|
|
188
|
+
|
|
183
189
|
# Start an agent span if we don't have one. This span is ended if the current
|
|
184
190
|
# agent changes, or if the agent loop ends.
|
|
185
191
|
if current_span is None:
|
|
@@ -195,8 +201,6 @@ class Runner:
|
|
|
195
201
|
output_type=output_type_name,
|
|
196
202
|
)
|
|
197
203
|
current_span.start(mark_as_current=True)
|
|
198
|
-
|
|
199
|
-
all_tools = await cls._get_all_tools(current_agent)
|
|
200
204
|
current_span.span_data.tools = [t.name for t in all_tools]
|
|
201
205
|
|
|
202
206
|
current_turn += 1
|
|
@@ -283,6 +287,17 @@ class Runner:
|
|
|
283
287
|
raise AgentsException(
|
|
284
288
|
f"Unknown next step type: {type(turn_result.next_step)}"
|
|
285
289
|
)
|
|
290
|
+
except AgentsException as exc:
|
|
291
|
+
exc.run_data = RunErrorDetails(
|
|
292
|
+
input=original_input,
|
|
293
|
+
new_items=generated_items,
|
|
294
|
+
raw_responses=model_responses,
|
|
295
|
+
last_agent=current_agent,
|
|
296
|
+
context_wrapper=context_wrapper,
|
|
297
|
+
input_guardrail_results=input_guardrail_results,
|
|
298
|
+
output_guardrail_results=[],
|
|
299
|
+
)
|
|
300
|
+
raise
|
|
286
301
|
finally:
|
|
287
302
|
if current_span:
|
|
288
303
|
current_span.finish(reset_current=True)
|
|
@@ -513,6 +528,8 @@ class Runner:
|
|
|
513
528
|
if streamed_result.is_complete:
|
|
514
529
|
break
|
|
515
530
|
|
|
531
|
+
all_tools = await cls._get_all_tools(current_agent, context_wrapper)
|
|
532
|
+
|
|
516
533
|
# Start an agent span if we don't have one. This span is ended if the current
|
|
517
534
|
# agent changes, or if the agent loop ends.
|
|
518
535
|
if current_span is None:
|
|
@@ -528,8 +545,6 @@ class Runner:
|
|
|
528
545
|
output_type=output_type_name,
|
|
529
546
|
)
|
|
530
547
|
current_span.start(mark_as_current=True)
|
|
531
|
-
|
|
532
|
-
all_tools = await cls._get_all_tools(current_agent)
|
|
533
548
|
tool_names = [t.name for t in all_tools]
|
|
534
549
|
current_span.span_data.tools = tool_names
|
|
535
550
|
current_turn += 1
|
|
@@ -609,6 +624,19 @@ class Runner:
|
|
|
609
624
|
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
610
625
|
elif isinstance(turn_result.next_step, NextStepRunAgain):
|
|
611
626
|
pass
|
|
627
|
+
except AgentsException as exc:
|
|
628
|
+
streamed_result.is_complete = True
|
|
629
|
+
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
630
|
+
exc.run_data = RunErrorDetails(
|
|
631
|
+
input=streamed_result.input,
|
|
632
|
+
new_items=streamed_result.new_items,
|
|
633
|
+
raw_responses=streamed_result.raw_responses,
|
|
634
|
+
last_agent=current_agent,
|
|
635
|
+
context_wrapper=context_wrapper,
|
|
636
|
+
input_guardrail_results=streamed_result.input_guardrail_results,
|
|
637
|
+
output_guardrail_results=streamed_result.output_guardrail_results,
|
|
638
|
+
)
|
|
639
|
+
raise
|
|
612
640
|
except Exception as e:
|
|
613
641
|
if current_span:
|
|
614
642
|
_error_tracing.attach_error_to_span(
|
|
@@ -657,7 +685,10 @@ class Runner:
|
|
|
657
685
|
streamed_result.current_agent = agent
|
|
658
686
|
streamed_result._current_agent_output_schema = output_schema
|
|
659
687
|
|
|
660
|
-
system_prompt = await
|
|
688
|
+
system_prompt, prompt_config = await asyncio.gather(
|
|
689
|
+
agent.get_system_prompt(context_wrapper),
|
|
690
|
+
agent.get_prompt(context_wrapper),
|
|
691
|
+
)
|
|
661
692
|
|
|
662
693
|
handoffs = cls._get_handoffs(agent)
|
|
663
694
|
model = cls._get_model(agent, run_config)
|
|
@@ -681,6 +712,7 @@ class Runner:
|
|
|
681
712
|
run_config.tracing_disabled, run_config.trace_include_sensitive_data
|
|
682
713
|
),
|
|
683
714
|
previous_response_id=previous_response_id,
|
|
715
|
+
prompt=prompt_config,
|
|
684
716
|
):
|
|
685
717
|
if isinstance(event, ResponseCompletedEvent):
|
|
686
718
|
usage = (
|
|
@@ -752,7 +784,10 @@ class Runner:
|
|
|
752
784
|
),
|
|
753
785
|
)
|
|
754
786
|
|
|
755
|
-
system_prompt = await
|
|
787
|
+
system_prompt, prompt_config = await asyncio.gather(
|
|
788
|
+
agent.get_system_prompt(context_wrapper),
|
|
789
|
+
agent.get_prompt(context_wrapper),
|
|
790
|
+
)
|
|
756
791
|
|
|
757
792
|
output_schema = cls._get_output_schema(agent)
|
|
758
793
|
handoffs = cls._get_handoffs(agent)
|
|
@@ -770,6 +805,7 @@ class Runner:
|
|
|
770
805
|
run_config,
|
|
771
806
|
tool_use_tracker,
|
|
772
807
|
previous_response_id,
|
|
808
|
+
prompt_config,
|
|
773
809
|
)
|
|
774
810
|
|
|
775
811
|
return await cls._get_single_step_result_from_response(
|
|
@@ -913,6 +949,7 @@ class Runner:
|
|
|
913
949
|
run_config: RunConfig,
|
|
914
950
|
tool_use_tracker: AgentToolUseTracker,
|
|
915
951
|
previous_response_id: str | None,
|
|
952
|
+
prompt_config: ResponsePromptParam | None,
|
|
916
953
|
) -> ModelResponse:
|
|
917
954
|
model = cls._get_model(agent, run_config)
|
|
918
955
|
model_settings = agent.model_settings.resolve(run_config.model_settings)
|
|
@@ -929,6 +966,7 @@ class Runner:
|
|
|
929
966
|
run_config.tracing_disabled, run_config.trace_include_sensitive_data
|
|
930
967
|
),
|
|
931
968
|
previous_response_id=previous_response_id,
|
|
969
|
+
prompt=prompt_config,
|
|
932
970
|
)
|
|
933
971
|
|
|
934
972
|
context_wrapper.usage.add(new_response.usage)
|
|
@@ -955,8 +993,10 @@ class Runner:
|
|
|
955
993
|
return handoffs
|
|
956
994
|
|
|
957
995
|
@classmethod
|
|
958
|
-
async def _get_all_tools(
|
|
959
|
-
|
|
996
|
+
async def _get_all_tools(
|
|
997
|
+
cls, agent: Agent[Any], context_wrapper: RunContextWrapper[Any]
|
|
998
|
+
) -> list[Tool]:
|
|
999
|
+
return await agent.get_all_tools(context_wrapper)
|
|
960
1000
|
|
|
961
1001
|
@classmethod
|
|
962
1002
|
def _get_model(cls, agent: Agent[Any], run_config: RunConfig) -> Model:
|
agents/stream_events.py
CHANGED