openai-agents 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/items.py CHANGED
@@ -18,7 +18,22 @@ from openai.types.responses import (
18
18
  ResponseOutputText,
19
19
  ResponseStreamEvent,
20
20
  )
21
- from openai.types.responses.response_input_item_param import ComputerCallOutput, FunctionCallOutput
21
+ from openai.types.responses.response_code_interpreter_tool_call import (
22
+ ResponseCodeInterpreterToolCall,
23
+ )
24
+ from openai.types.responses.response_input_item_param import (
25
+ ComputerCallOutput,
26
+ FunctionCallOutput,
27
+ LocalShellCallOutput,
28
+ McpApprovalResponse,
29
+ )
30
+ from openai.types.responses.response_output_item import (
31
+ ImageGenerationCall,
32
+ LocalShellCall,
33
+ McpApprovalRequest,
34
+ McpCall,
35
+ McpListTools,
36
+ )
22
37
  from openai.types.responses.response_reasoning_item import ResponseReasoningItem
23
38
  from pydantic import BaseModel
24
39
  from typing_extensions import TypeAlias
@@ -108,6 +123,10 @@ ToolCallItemTypes: TypeAlias = Union[
108
123
  ResponseComputerToolCall,
109
124
  ResponseFileSearchToolCall,
110
125
  ResponseFunctionWebSearch,
126
+ McpCall,
127
+ ResponseCodeInterpreterToolCall,
128
+ ImageGenerationCall,
129
+ LocalShellCall,
111
130
  ]
112
131
  """A type that represents a tool call item."""
113
132
 
@@ -123,10 +142,12 @@ class ToolCallItem(RunItemBase[ToolCallItemTypes]):
123
142
 
124
143
 
125
144
  @dataclass
126
- class ToolCallOutputItem(RunItemBase[Union[FunctionCallOutput, ComputerCallOutput]]):
145
+ class ToolCallOutputItem(
146
+ RunItemBase[Union[FunctionCallOutput, ComputerCallOutput, LocalShellCallOutput]]
147
+ ):
127
148
  """Represents the output of a tool call."""
128
149
 
129
- raw_item: FunctionCallOutput | ComputerCallOutput
150
+ raw_item: FunctionCallOutput | ComputerCallOutput | LocalShellCallOutput
130
151
  """The raw item from the model."""
131
152
 
132
153
  output: Any
@@ -147,6 +168,36 @@ class ReasoningItem(RunItemBase[ResponseReasoningItem]):
147
168
  type: Literal["reasoning_item"] = "reasoning_item"
148
169
 
149
170
 
171
+ @dataclass
172
+ class MCPListToolsItem(RunItemBase[McpListTools]):
173
+ """Represents a call to an MCP server to list tools."""
174
+
175
+ raw_item: McpListTools
176
+ """The raw MCP list tools call."""
177
+
178
+ type: Literal["mcp_list_tools_item"] = "mcp_list_tools_item"
179
+
180
+
181
+ @dataclass
182
+ class MCPApprovalRequestItem(RunItemBase[McpApprovalRequest]):
183
+ """Represents a request for MCP approval."""
184
+
185
+ raw_item: McpApprovalRequest
186
+ """The raw MCP approval request."""
187
+
188
+ type: Literal["mcp_approval_request_item"] = "mcp_approval_request_item"
189
+
190
+
191
+ @dataclass
192
+ class MCPApprovalResponseItem(RunItemBase[McpApprovalResponse]):
193
+ """Represents a response to an MCP approval request."""
194
+
195
+ raw_item: McpApprovalResponse
196
+ """The raw MCP approval response."""
197
+
198
+ type: Literal["mcp_approval_response_item"] = "mcp_approval_response_item"
199
+
200
+
150
201
  RunItem: TypeAlias = Union[
151
202
  MessageOutputItem,
152
203
  HandoffCallItem,
@@ -154,6 +205,9 @@ RunItem: TypeAlias = Union[
154
205
  ToolCallItem,
155
206
  ToolCallOutputItem,
156
207
  ReasoningItem,
208
+ MCPListToolsItem,
209
+ MCPApprovalRequestItem,
210
+ MCPApprovalResponseItem,
157
211
  ]
158
212
  """An item generated by an agent."""
159
213
 
agents/mcp/server.py CHANGED
@@ -12,7 +12,7 @@ from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_cli
12
12
  from mcp.client.sse import sse_client
13
13
  from mcp.client.streamable_http import GetSessionIdCallback, streamablehttp_client
14
14
  from mcp.shared.message import SessionMessage
15
- from mcp.types import CallToolResult
15
+ from mcp.types import CallToolResult, InitializeResult
16
16
  from typing_extensions import NotRequired, TypedDict
17
17
 
18
18
  from ..exceptions import UserError
@@ -73,6 +73,7 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
73
73
  self.exit_stack: AsyncExitStack = AsyncExitStack()
74
74
  self._cleanup_lock: asyncio.Lock = asyncio.Lock()
75
75
  self.cache_tools_list = cache_tools_list
76
+ self.server_initialize_result: InitializeResult | None = None
76
77
 
77
78
  self.client_session_timeout_seconds = client_session_timeout_seconds
78
79
 
@@ -87,7 +88,7 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
87
88
  tuple[
88
89
  MemoryObjectReceiveStream[SessionMessage | Exception],
89
90
  MemoryObjectSendStream[SessionMessage],
90
- GetSessionIdCallback | None
91
+ GetSessionIdCallback | None,
91
92
  ]
92
93
  ]:
93
94
  """Create the streams for the server."""
@@ -122,7 +123,8 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
122
123
  else None,
123
124
  )
124
125
  )
125
- await session.initialize()
126
+ server_result = await session.initialize()
127
+ self.server_initialize_result = server_result
126
128
  self.session = session
127
129
  except Exception as e:
128
130
  logger.error(f"Error initializing MCP server: {e}")
@@ -241,7 +243,7 @@ class MCPServerStdio(_MCPServerWithClientSession):
241
243
  tuple[
242
244
  MemoryObjectReceiveStream[SessionMessage | Exception],
243
245
  MemoryObjectSendStream[SessionMessage],
244
- GetSessionIdCallback | None
246
+ GetSessionIdCallback | None,
245
247
  ]
246
248
  ]:
247
249
  """Create the streams for the server."""
@@ -312,7 +314,7 @@ class MCPServerSse(_MCPServerWithClientSession):
312
314
  tuple[
313
315
  MemoryObjectReceiveStream[SessionMessage | Exception],
314
316
  MemoryObjectSendStream[SessionMessage],
315
- GetSessionIdCallback | None
317
+ GetSessionIdCallback | None,
316
318
  ]
317
319
  ]:
318
320
  """Create the streams for the server."""
@@ -392,7 +394,7 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
392
394
  tuple[
393
395
  MemoryObjectReceiveStream[SessionMessage | Exception],
394
396
  MemoryObjectSendStream[SessionMessage],
395
- GetSessionIdCallback | None
397
+ GetSessionIdCallback | None,
396
398
  ]
397
399
  ]:
398
400
  """Create the streams for the server."""
@@ -401,7 +403,7 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
401
403
  headers=self.params.get("headers", None),
402
404
  timeout=self.params.get("timeout", timedelta(seconds=30)),
403
405
  sse_read_timeout=self.params.get("sse_read_timeout", timedelta(seconds=60 * 5)),
404
- terminate_on_close=self.params.get("terminate_on_close", True)
406
+ terminate_on_close=self.params.get("terminate_on_close", True),
405
407
  )
406
408
 
407
409
  @property
agents/mcp/util.py CHANGED
@@ -116,7 +116,7 @@ class MCPUtil:
116
116
  if len(result.content) == 1:
117
117
  tool_output = result.content[0].model_dump_json()
118
118
  elif len(result.content) > 1:
119
- tool_output = json.dumps([item.model_dump() for item in result.content])
119
+ tool_output = json.dumps([item.model_dump(mode="json") for item in result.content])
120
120
  else:
121
121
  logger.error(f"Errored MCP tool result: {result}")
122
122
  tool_output = "Error running tool."
@@ -38,6 +38,16 @@ class StreamingState:
38
38
  function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict)
39
39
 
40
40
 
41
+ class SequenceNumber:
42
+ def __init__(self):
43
+ self._sequence_number = 0
44
+
45
+ def get_and_increment(self) -> int:
46
+ num = self._sequence_number
47
+ self._sequence_number += 1
48
+ return num
49
+
50
+
41
51
  class ChatCmplStreamHandler:
42
52
  @classmethod
43
53
  async def handle_stream(
@@ -47,13 +57,14 @@ class ChatCmplStreamHandler:
47
57
  ) -> AsyncIterator[TResponseStreamEvent]:
48
58
  usage: CompletionUsage | None = None
49
59
  state = StreamingState()
50
-
60
+ sequence_number = SequenceNumber()
51
61
  async for chunk in stream:
52
62
  if not state.started:
53
63
  state.started = True
54
64
  yield ResponseCreatedEvent(
55
65
  response=response,
56
66
  type="response.created",
67
+ sequence_number=sequence_number.get_and_increment(),
57
68
  )
58
69
 
59
70
  # This is always set by the OpenAI API, but not by others e.g. LiteLLM
@@ -89,6 +100,7 @@ class ChatCmplStreamHandler:
89
100
  item=assistant_item,
90
101
  output_index=0,
91
102
  type="response.output_item.added",
103
+ sequence_number=sequence_number.get_and_increment(),
92
104
  )
93
105
  yield ResponseContentPartAddedEvent(
94
106
  content_index=state.text_content_index_and_output[0],
@@ -100,6 +112,7 @@ class ChatCmplStreamHandler:
100
112
  annotations=[],
101
113
  ),
102
114
  type="response.content_part.added",
115
+ sequence_number=sequence_number.get_and_increment(),
103
116
  )
104
117
  # Emit the delta for this segment of content
105
118
  yield ResponseTextDeltaEvent(
@@ -108,6 +121,7 @@ class ChatCmplStreamHandler:
108
121
  item_id=FAKE_RESPONSES_ID,
109
122
  output_index=0,
110
123
  type="response.output_text.delta",
124
+ sequence_number=sequence_number.get_and_increment(),
111
125
  )
112
126
  # Accumulate the text into the response part
113
127
  state.text_content_index_and_output[1].text += delta.content
@@ -134,6 +148,7 @@ class ChatCmplStreamHandler:
134
148
  item=assistant_item,
135
149
  output_index=0,
136
150
  type="response.output_item.added",
151
+ sequence_number=sequence_number.get_and_increment(),
137
152
  )
138
153
  yield ResponseContentPartAddedEvent(
139
154
  content_index=state.refusal_content_index_and_output[0],
@@ -145,6 +160,7 @@ class ChatCmplStreamHandler:
145
160
  annotations=[],
146
161
  ),
147
162
  type="response.content_part.added",
163
+ sequence_number=sequence_number.get_and_increment(),
148
164
  )
149
165
  # Emit the delta for this segment of refusal
150
166
  yield ResponseRefusalDeltaEvent(
@@ -153,6 +169,7 @@ class ChatCmplStreamHandler:
153
169
  item_id=FAKE_RESPONSES_ID,
154
170
  output_index=0,
155
171
  type="response.refusal.delta",
172
+ sequence_number=sequence_number.get_and_increment(),
156
173
  )
157
174
  # Accumulate the refusal string in the output part
158
175
  state.refusal_content_index_and_output[1].refusal += delta.refusal
@@ -190,6 +207,7 @@ class ChatCmplStreamHandler:
190
207
  output_index=0,
191
208
  part=state.text_content_index_and_output[1],
192
209
  type="response.content_part.done",
210
+ sequence_number=sequence_number.get_and_increment(),
193
211
  )
194
212
 
195
213
  if state.refusal_content_index_and_output:
@@ -201,6 +219,7 @@ class ChatCmplStreamHandler:
201
219
  output_index=0,
202
220
  part=state.refusal_content_index_and_output[1],
203
221
  type="response.content_part.done",
222
+ sequence_number=sequence_number.get_and_increment(),
204
223
  )
205
224
 
206
225
  # Actually send events for the function calls
@@ -216,6 +235,7 @@ class ChatCmplStreamHandler:
216
235
  ),
217
236
  output_index=function_call_starting_index,
218
237
  type="response.output_item.added",
238
+ sequence_number=sequence_number.get_and_increment(),
219
239
  )
220
240
  # Then, yield the args
221
241
  yield ResponseFunctionCallArgumentsDeltaEvent(
@@ -223,6 +243,7 @@ class ChatCmplStreamHandler:
223
243
  item_id=FAKE_RESPONSES_ID,
224
244
  output_index=function_call_starting_index,
225
245
  type="response.function_call_arguments.delta",
246
+ sequence_number=sequence_number.get_and_increment(),
226
247
  )
227
248
  # Finally, the ResponseOutputItemDone
228
249
  yield ResponseOutputItemDoneEvent(
@@ -235,6 +256,7 @@ class ChatCmplStreamHandler:
235
256
  ),
236
257
  output_index=function_call_starting_index,
237
258
  type="response.output_item.done",
259
+ sequence_number=sequence_number.get_and_increment(),
238
260
  )
239
261
 
240
262
  # Finally, send the Response completed event
@@ -258,6 +280,7 @@ class ChatCmplStreamHandler:
258
280
  item=assistant_msg,
259
281
  output_index=0,
260
282
  type="response.output_item.done",
283
+ sequence_number=sequence_number.get_and_increment(),
261
284
  )
262
285
 
263
286
  for function_call in state.function_calls.values():
@@ -289,4 +312,5 @@ class ChatCmplStreamHandler:
289
312
  yield ResponseCompletedEvent(
290
313
  response=final_response,
291
314
  type="response.completed",
315
+ sequence_number=sequence_number.get_and_increment(),
292
316
  )
@@ -9,6 +9,7 @@ from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
9
9
  from openai.types import ChatModel
10
10
  from openai.types.chat import ChatCompletion, ChatCompletionChunk
11
11
  from openai.types.responses import Response
12
+ from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
12
13
 
13
14
  from .. import _debug
14
15
  from ..agent_output import AgentOutputSchemaBase
@@ -70,12 +71,22 @@ class OpenAIChatCompletionsModel(Model):
70
71
  stream=False,
71
72
  )
72
73
 
74
+ first_choice = response.choices[0]
75
+ message = first_choice.message
76
+
73
77
  if _debug.DONT_LOG_MODEL_DATA:
74
78
  logger.debug("Received model response")
75
79
  else:
76
- logger.debug(
77
- f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n"
78
- )
80
+ if message is not None:
81
+ logger.debug(
82
+ "LLM resp:\n%s\n",
83
+ json.dumps(message.model_dump(), indent=2),
84
+ )
85
+ else:
86
+ logger.debug(
87
+ "LLM resp had no message. finish_reason: %s",
88
+ first_choice.finish_reason,
89
+ )
79
90
 
80
91
  usage = (
81
92
  Usage(
@@ -83,18 +94,32 @@ class OpenAIChatCompletionsModel(Model):
83
94
  input_tokens=response.usage.prompt_tokens,
84
95
  output_tokens=response.usage.completion_tokens,
85
96
  total_tokens=response.usage.total_tokens,
97
+ input_tokens_details=InputTokensDetails(
98
+ cached_tokens=getattr(
99
+ response.usage.prompt_tokens_details, "cached_tokens", 0
100
+ )
101
+ or 0,
102
+ ),
103
+ output_tokens_details=OutputTokensDetails(
104
+ reasoning_tokens=getattr(
105
+ response.usage.completion_tokens_details, "reasoning_tokens", 0
106
+ )
107
+ or 0,
108
+ ),
86
109
  )
87
110
  if response.usage
88
111
  else Usage()
89
112
  )
90
113
  if tracing.include_data():
91
- span_generation.span_data.output = [response.choices[0].message.model_dump()]
114
+ span_generation.span_data.output = (
115
+ [message.model_dump()] if message is not None else []
116
+ )
92
117
  span_generation.span_data.usage = {
93
118
  "input_tokens": usage.input_tokens,
94
119
  "output_tokens": usage.output_tokens,
95
120
  }
96
121
 
97
- items = Converter.message_to_output_items(response.choices[0].message)
122
+ items = Converter.message_to_output_items(message) if message is not None else []
98
123
 
99
124
  return ModelResponse(
100
125
  output=items,
@@ -252,7 +277,7 @@ class OpenAIChatCompletionsModel(Model):
252
277
  stream_options=self._non_null_or_not_given(stream_options),
253
278
  store=self._non_null_or_not_given(store),
254
279
  reasoning_effort=self._non_null_or_not_given(reasoning_effort),
255
- extra_headers={ **HEADERS, **(model_settings.extra_headers or {}) },
280
+ extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
256
281
  extra_query=model_settings.extra_query,
257
282
  extra_body=model_settings.extra_body,
258
283
  metadata=self._non_null_or_not_given(model_settings.metadata),
@@ -10,6 +10,7 @@ from openai.types import ChatModel
10
10
  from openai.types.responses import (
11
11
  Response,
12
12
  ResponseCompletedEvent,
13
+ ResponseIncludable,
13
14
  ResponseStreamEvent,
14
15
  ResponseTextConfigParam,
15
16
  ToolParam,
@@ -23,7 +24,17 @@ from ..exceptions import UserError
23
24
  from ..handoffs import Handoff
24
25
  from ..items import ItemHelpers, ModelResponse, TResponseInputItem
25
26
  from ..logger import logger
26
- from ..tool import ComputerTool, FileSearchTool, FunctionTool, Tool, WebSearchTool
27
+ from ..tool import (
28
+ CodeInterpreterTool,
29
+ ComputerTool,
30
+ FileSearchTool,
31
+ FunctionTool,
32
+ HostedMCPTool,
33
+ ImageGenerationTool,
34
+ LocalShellTool,
35
+ Tool,
36
+ WebSearchTool,
37
+ )
27
38
  from ..tracing import SpanError, response_span
28
39
  from ..usage import Usage
29
40
  from ..version import __version__
@@ -36,13 +47,6 @@ if TYPE_CHECKING:
36
47
  _USER_AGENT = f"Agents/Python {__version__}"
37
48
  _HEADERS = {"User-Agent": _USER_AGENT}
38
49
 
39
- # From the Responses API
40
- IncludeLiteral = Literal[
41
- "file_search_call.results",
42
- "message.input_image.image_url",
43
- "computer_call_output.output.image_url",
44
- ]
45
-
46
50
 
47
51
  class OpenAIResponsesModel(Model):
48
52
  """
@@ -98,6 +102,8 @@ class OpenAIResponsesModel(Model):
98
102
  input_tokens=response.usage.input_tokens,
99
103
  output_tokens=response.usage.output_tokens,
100
104
  total_tokens=response.usage.total_tokens,
105
+ input_tokens_details=response.usage.input_tokens_details,
106
+ output_tokens_details=response.usage.output_tokens_details,
101
107
  )
102
108
  if response.usage
103
109
  else Usage()
@@ -271,7 +277,7 @@ class OpenAIResponsesModel(Model):
271
277
  @dataclass
272
278
  class ConvertedTools:
273
279
  tools: list[ToolParam]
274
- includes: list[IncludeLiteral]
280
+ includes: list[ResponseIncludable]
275
281
 
276
282
 
277
283
  class Converter:
@@ -299,6 +305,18 @@ class Converter:
299
305
  return {
300
306
  "type": "computer_use_preview",
301
307
  }
308
+ elif tool_choice == "image_generation":
309
+ return {
310
+ "type": "image_generation",
311
+ }
312
+ elif tool_choice == "code_interpreter":
313
+ return {
314
+ "type": "code_interpreter",
315
+ }
316
+ elif tool_choice == "mcp":
317
+ return {
318
+ "type": "mcp",
319
+ }
302
320
  else:
303
321
  return {
304
322
  "type": "function",
@@ -328,7 +346,7 @@ class Converter:
328
346
  handoffs: list[Handoff[Any]],
329
347
  ) -> ConvertedTools:
330
348
  converted_tools: list[ToolParam] = []
331
- includes: list[IncludeLiteral] = []
349
+ includes: list[ResponseIncludable] = []
332
350
 
333
351
  computer_tools = [tool for tool in tools if isinstance(tool, ComputerTool)]
334
352
  if len(computer_tools) > 1:
@@ -346,7 +364,7 @@ class Converter:
346
364
  return ConvertedTools(tools=converted_tools, includes=includes)
347
365
 
348
366
  @classmethod
349
- def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, IncludeLiteral | None]:
367
+ def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, ResponseIncludable | None]:
350
368
  """Returns converted tool and includes"""
351
369
 
352
370
  if isinstance(tool, FunctionTool):
@@ -357,7 +375,7 @@ class Converter:
357
375
  "type": "function",
358
376
  "description": tool.description,
359
377
  }
360
- includes: IncludeLiteral | None = None
378
+ includes: ResponseIncludable | None = None
361
379
  elif isinstance(tool, WebSearchTool):
362
380
  ws: WebSearchToolParam = {
363
381
  "type": "web_search_preview",
@@ -387,7 +405,20 @@ class Converter:
387
405
  "display_height": tool.computer.dimensions[1],
388
406
  }
389
407
  includes = None
390
-
408
+ elif isinstance(tool, HostedMCPTool):
409
+ converted_tool = tool.tool_config
410
+ includes = None
411
+ elif isinstance(tool, ImageGenerationTool):
412
+ converted_tool = tool.tool_config
413
+ includes = None
414
+ elif isinstance(tool, CodeInterpreterTool):
415
+ converted_tool = tool.tool_config
416
+ includes = None
417
+ elif isinstance(tool, LocalShellTool):
418
+ converted_tool = {
419
+ "type": "local_shell",
420
+ }
421
+ includes = None
391
422
  else:
392
423
  raise UserError(f"Unknown tool type: {type(tool)}, tool")
393
424
 
agents/result.py CHANGED
@@ -11,14 +11,22 @@ from typing_extensions import TypeVar
11
11
  from ._run_impl import QueueCompleteSentinel
12
12
  from .agent import Agent
13
13
  from .agent_output import AgentOutputSchemaBase
14
- from .exceptions import InputGuardrailTripwireTriggered, MaxTurnsExceeded
14
+ from .exceptions import (
15
+ AgentsException,
16
+ InputGuardrailTripwireTriggered,
17
+ MaxTurnsExceeded,
18
+ RunErrorDetails,
19
+ )
15
20
  from .guardrail import InputGuardrailResult, OutputGuardrailResult
16
21
  from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
17
22
  from .logger import logger
18
23
  from .run_context import RunContextWrapper
19
24
  from .stream_events import StreamEvent
20
25
  from .tracing import Trace
21
- from .util._pretty_print import pretty_print_result, pretty_print_run_result_streaming
26
+ from .util._pretty_print import (
27
+ pretty_print_result,
28
+ pretty_print_run_result_streaming,
29
+ )
22
30
 
23
31
  if TYPE_CHECKING:
24
32
  from ._run_impl import QueueCompleteSentinel
@@ -206,31 +214,53 @@ class RunResultStreaming(RunResultBase):
206
214
  if self._stored_exception:
207
215
  raise self._stored_exception
208
216
 
217
+ def _create_error_details(self) -> RunErrorDetails:
218
+ """Return a `RunErrorDetails` object considering the current attributes of the class."""
219
+ return RunErrorDetails(
220
+ input=self.input,
221
+ new_items=self.new_items,
222
+ raw_responses=self.raw_responses,
223
+ last_agent=self.current_agent,
224
+ context_wrapper=self.context_wrapper,
225
+ input_guardrail_results=self.input_guardrail_results,
226
+ output_guardrail_results=self.output_guardrail_results,
227
+ )
228
+
209
229
  def _check_errors(self):
210
230
  if self.current_turn > self.max_turns:
211
- self._stored_exception = MaxTurnsExceeded(f"Max turns ({self.max_turns}) exceeded")
231
+ max_turns_exc = MaxTurnsExceeded(f"Max turns ({self.max_turns}) exceeded")
232
+ max_turns_exc.run_data = self._create_error_details()
233
+ self._stored_exception = max_turns_exc
212
234
 
213
235
  # Fetch all the completed guardrail results from the queue and raise if needed
214
236
  while not self._input_guardrail_queue.empty():
215
237
  guardrail_result = self._input_guardrail_queue.get_nowait()
216
238
  if guardrail_result.output.tripwire_triggered:
217
- self._stored_exception = InputGuardrailTripwireTriggered(guardrail_result)
239
+ tripwire_exc = InputGuardrailTripwireTriggered(guardrail_result)
240
+ tripwire_exc.run_data = self._create_error_details()
241
+ self._stored_exception = tripwire_exc
218
242
 
219
243
  # Check the tasks for any exceptions
220
244
  if self._run_impl_task and self._run_impl_task.done():
221
- exc = self._run_impl_task.exception()
222
- if exc and isinstance(exc, Exception):
223
- self._stored_exception = exc
245
+ run_impl_exc = self._run_impl_task.exception()
246
+ if run_impl_exc and isinstance(run_impl_exc, Exception):
247
+ if isinstance(run_impl_exc, AgentsException) and run_impl_exc.run_data is None:
248
+ run_impl_exc.run_data = self._create_error_details()
249
+ self._stored_exception = run_impl_exc
224
250
 
225
251
  if self._input_guardrails_task and self._input_guardrails_task.done():
226
- exc = self._input_guardrails_task.exception()
227
- if exc and isinstance(exc, Exception):
228
- self._stored_exception = exc
252
+ in_guard_exc = self._input_guardrails_task.exception()
253
+ if in_guard_exc and isinstance(in_guard_exc, Exception):
254
+ if isinstance(in_guard_exc, AgentsException) and in_guard_exc.run_data is None:
255
+ in_guard_exc.run_data = self._create_error_details()
256
+ self._stored_exception = in_guard_exc
229
257
 
230
258
  if self._output_guardrails_task and self._output_guardrails_task.done():
231
- exc = self._output_guardrails_task.exception()
232
- if exc and isinstance(exc, Exception):
233
- self._stored_exception = exc
259
+ out_guard_exc = self._output_guardrails_task.exception()
260
+ if out_guard_exc and isinstance(out_guard_exc, Exception):
261
+ if isinstance(out_guard_exc, AgentsException) and out_guard_exc.run_data is None:
262
+ out_guard_exc.run_data = self._create_error_details()
263
+ self._stored_exception = out_guard_exc
234
264
 
235
265
  def _cleanup_tasks(self):
236
266
  if self._run_impl_task and not self._run_impl_task.done():