openai-agents 0.0.15__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/__init__.py CHANGED
@@ -54,10 +54,19 @@ from .stream_events import (
54
54
  StreamEvent,
55
55
  )
56
56
  from .tool import (
57
+ CodeInterpreterTool,
57
58
  ComputerTool,
58
59
  FileSearchTool,
59
60
  FunctionTool,
60
61
  FunctionToolResult,
62
+ HostedMCPTool,
63
+ ImageGenerationTool,
64
+ LocalShellCommandRequest,
65
+ LocalShellExecutor,
66
+ LocalShellTool,
67
+ MCPToolApprovalFunction,
68
+ MCPToolApprovalFunctionResult,
69
+ MCPToolApprovalRequest,
61
70
  Tool,
62
71
  WebSearchTool,
63
72
  default_tool_error_function,
@@ -206,8 +215,17 @@ __all__ = [
206
215
  "FunctionToolResult",
207
216
  "ComputerTool",
208
217
  "FileSearchTool",
218
+ "CodeInterpreterTool",
219
+ "ImageGenerationTool",
220
+ "LocalShellCommandRequest",
221
+ "LocalShellExecutor",
222
+ "LocalShellTool",
209
223
  "Tool",
210
224
  "WebSearchTool",
225
+ "HostedMCPTool",
226
+ "MCPToolApprovalFunction",
227
+ "MCPToolApprovalRequest",
228
+ "MCPToolApprovalFunctionResult",
211
229
  "function_tool",
212
230
  "Usage",
213
231
  "add_trace_processor",
agents/_run_impl.py CHANGED
@@ -14,6 +14,9 @@ from openai.types.responses import (
14
14
  ResponseFunctionWebSearch,
15
15
  ResponseOutputMessage,
16
16
  )
17
+ from openai.types.responses.response_code_interpreter_tool_call import (
18
+ ResponseCodeInterpreterToolCall,
19
+ )
17
20
  from openai.types.responses.response_computer_tool_call import (
18
21
  ActionClick,
19
22
  ActionDoubleClick,
@@ -25,7 +28,13 @@ from openai.types.responses.response_computer_tool_call import (
25
28
  ActionType,
26
29
  ActionWait,
27
30
  )
28
- from openai.types.responses.response_input_param import ComputerCallOutput
31
+ from openai.types.responses.response_input_param import ComputerCallOutput, McpApprovalResponse
32
+ from openai.types.responses.response_output_item import (
33
+ ImageGenerationCall,
34
+ LocalShellCall,
35
+ McpApprovalRequest,
36
+ McpListTools,
37
+ )
29
38
  from openai.types.responses.response_reasoning_item import ResponseReasoningItem
30
39
 
31
40
  from .agent import Agent, ToolsToFinalOutputResult
@@ -38,6 +47,9 @@ from .items import (
38
47
  HandoffCallItem,
39
48
  HandoffOutputItem,
40
49
  ItemHelpers,
50
+ MCPApprovalRequestItem,
51
+ MCPApprovalResponseItem,
52
+ MCPListToolsItem,
41
53
  MessageOutputItem,
42
54
  ModelResponse,
43
55
  ReasoningItem,
@@ -52,7 +64,16 @@ from .model_settings import ModelSettings
52
64
  from .models.interface import ModelTracing
53
65
  from .run_context import RunContextWrapper, TContext
54
66
  from .stream_events import RunItemStreamEvent, StreamEvent
55
- from .tool import ComputerTool, FunctionTool, FunctionToolResult, Tool
67
+ from .tool import (
68
+ ComputerTool,
69
+ FunctionTool,
70
+ FunctionToolResult,
71
+ HostedMCPTool,
72
+ LocalShellCommandRequest,
73
+ LocalShellTool,
74
+ MCPToolApprovalRequest,
75
+ Tool,
76
+ )
56
77
  from .tracing import (
57
78
  SpanError,
58
79
  Trace,
@@ -112,15 +133,29 @@ class ToolRunComputerAction:
112
133
  computer_tool: ComputerTool
113
134
 
114
135
 
136
+ @dataclass
137
+ class ToolRunMCPApprovalRequest:
138
+ request_item: McpApprovalRequest
139
+ mcp_tool: HostedMCPTool
140
+
141
+
142
+ @dataclass
143
+ class ToolRunLocalShellCall:
144
+ tool_call: LocalShellCall
145
+ local_shell_tool: LocalShellTool
146
+
147
+
115
148
  @dataclass
116
149
  class ProcessedResponse:
117
150
  new_items: list[RunItem]
118
151
  handoffs: list[ToolRunHandoff]
119
152
  functions: list[ToolRunFunction]
120
153
  computer_actions: list[ToolRunComputerAction]
154
+ local_shell_calls: list[ToolRunLocalShellCall]
121
155
  tools_used: list[str] # Names of all tools used, including hosted tools
156
+ mcp_approval_requests: list[ToolRunMCPApprovalRequest] # Only requests with callbacks
122
157
 
123
- def has_tools_to_run(self) -> bool:
158
+ def has_tools_or_approvals_to_run(self) -> bool:
124
159
  # Handoffs, functions and computer actions need local processing
125
160
  # Hosted tools have already run, so there's nothing to do.
126
161
  return any(
@@ -128,6 +163,8 @@ class ProcessedResponse:
128
163
  self.handoffs,
129
164
  self.functions,
130
165
  self.computer_actions,
166
+ self.local_shell_calls,
167
+ self.mcp_approval_requests,
131
168
  ]
132
169
  )
133
170
 
@@ -226,7 +263,16 @@ class RunImpl:
226
263
  new_step_items.extend([result.run_item for result in function_results])
227
264
  new_step_items.extend(computer_results)
228
265
 
229
- # Second, check if there are any handoffs
266
+ # Next, run the MCP approval requests
267
+ if processed_response.mcp_approval_requests:
268
+ approval_results = await cls.execute_mcp_approval_requests(
269
+ agent=agent,
270
+ approval_requests=processed_response.mcp_approval_requests,
271
+ context_wrapper=context_wrapper,
272
+ )
273
+ new_step_items.extend(approval_results)
274
+
275
+ # Next, check if there are any handoffs
230
276
  if run_handoffs := processed_response.handoffs:
231
277
  return await cls.execute_handoffs(
232
278
  agent=agent,
@@ -240,7 +286,7 @@ class RunImpl:
240
286
  run_config=run_config,
241
287
  )
242
288
 
243
- # Third, we'll check if the tool use should result in a final output
289
+ # Next, we'll check if the tool use should result in a final output
244
290
  check_tool_use = await cls._check_for_final_output_from_tools(
245
291
  agent=agent,
246
292
  tool_results=function_results,
@@ -295,7 +341,7 @@ class RunImpl:
295
341
  )
296
342
  elif (
297
343
  not output_schema or output_schema.is_plain_text()
298
- ) and not processed_response.has_tools_to_run():
344
+ ) and not processed_response.has_tools_or_approvals_to_run():
299
345
  return await cls.execute_final_output(
300
346
  agent=agent,
301
347
  original_input=original_input,
@@ -343,10 +389,20 @@ class RunImpl:
343
389
  run_handoffs = []
344
390
  functions = []
345
391
  computer_actions = []
392
+ local_shell_calls = []
393
+ mcp_approval_requests = []
346
394
  tools_used: list[str] = []
347
395
  handoff_map = {handoff.tool_name: handoff for handoff in handoffs}
348
396
  function_map = {tool.name: tool for tool in all_tools if isinstance(tool, FunctionTool)}
349
397
  computer_tool = next((tool for tool in all_tools if isinstance(tool, ComputerTool)), None)
398
+ local_shell_tool = next(
399
+ (tool for tool in all_tools if isinstance(tool, LocalShellTool)), None
400
+ )
401
+ hosted_mcp_server_map = {
402
+ tool.tool_config["server_label"]: tool
403
+ for tool in all_tools
404
+ if isinstance(tool, HostedMCPTool)
405
+ }
350
406
 
351
407
  for output in response.output:
352
408
  if isinstance(output, ResponseOutputMessage):
@@ -375,6 +431,54 @@ class RunImpl:
375
431
  computer_actions.append(
376
432
  ToolRunComputerAction(tool_call=output, computer_tool=computer_tool)
377
433
  )
434
+ elif isinstance(output, McpApprovalRequest):
435
+ items.append(MCPApprovalRequestItem(raw_item=output, agent=agent))
436
+ if output.server_label not in hosted_mcp_server_map:
437
+ _error_tracing.attach_error_to_current_span(
438
+ SpanError(
439
+ message="MCP server label not found",
440
+ data={"server_label": output.server_label},
441
+ )
442
+ )
443
+ raise ModelBehaviorError(f"MCP server label {output.server_label} not found")
444
+ else:
445
+ server = hosted_mcp_server_map[output.server_label]
446
+ if server.on_approval_request:
447
+ mcp_approval_requests.append(
448
+ ToolRunMCPApprovalRequest(
449
+ request_item=output,
450
+ mcp_tool=server,
451
+ )
452
+ )
453
+ else:
454
+ logger.warning(
455
+ f"MCP server {output.server_label} has no on_approval_request hook"
456
+ )
457
+ elif isinstance(output, McpListTools):
458
+ items.append(MCPListToolsItem(raw_item=output, agent=agent))
459
+ elif isinstance(output, ImageGenerationCall):
460
+ items.append(ToolCallItem(raw_item=output, agent=agent))
461
+ tools_used.append("image_generation")
462
+ elif isinstance(output, ResponseCodeInterpreterToolCall):
463
+ items.append(ToolCallItem(raw_item=output, agent=agent))
464
+ tools_used.append("code_interpreter")
465
+ elif isinstance(output, LocalShellCall):
466
+ items.append(ToolCallItem(raw_item=output, agent=agent))
467
+ tools_used.append("local_shell")
468
+ if not local_shell_tool:
469
+ _error_tracing.attach_error_to_current_span(
470
+ SpanError(
471
+ message="Local shell tool not found",
472
+ data={},
473
+ )
474
+ )
475
+ raise ModelBehaviorError(
476
+ "Model produced local shell call without a local shell tool."
477
+ )
478
+ local_shell_calls.append(
479
+ ToolRunLocalShellCall(tool_call=output, local_shell_tool=local_shell_tool)
480
+ )
481
+
378
482
  elif not isinstance(output, ResponseFunctionToolCall):
379
483
  logger.warning(f"Unexpected output type, ignoring: {type(output)}")
380
484
  continue
@@ -416,7 +520,9 @@ class RunImpl:
416
520
  handoffs=run_handoffs,
417
521
  functions=functions,
418
522
  computer_actions=computer_actions,
523
+ local_shell_calls=local_shell_calls,
419
524
  tools_used=tools_used,
525
+ mcp_approval_requests=mcp_approval_requests,
420
526
  )
421
527
 
422
528
  @classmethod
@@ -489,6 +595,30 @@ class RunImpl:
489
595
  for tool_run, result in zip(tool_runs, results)
490
596
  ]
491
597
 
598
+ @classmethod
599
+ async def execute_local_shell_calls(
600
+ cls,
601
+ *,
602
+ agent: Agent[TContext],
603
+ calls: list[ToolRunLocalShellCall],
604
+ context_wrapper: RunContextWrapper[TContext],
605
+ hooks: RunHooks[TContext],
606
+ config: RunConfig,
607
+ ) -> list[RunItem]:
608
+ results: list[RunItem] = []
609
+ # Need to run these serially, because each call can affect the local shell state
610
+ for call in calls:
611
+ results.append(
612
+ await LocalShellAction.execute(
613
+ agent=agent,
614
+ call=call,
615
+ hooks=hooks,
616
+ context_wrapper=context_wrapper,
617
+ config=config,
618
+ )
619
+ )
620
+ return results
621
+
492
622
  @classmethod
493
623
  async def execute_computer_actions(
494
624
  cls,
@@ -643,6 +773,40 @@ class RunImpl:
643
773
  next_step=NextStepHandoff(new_agent),
644
774
  )
645
775
 
776
+ @classmethod
777
+ async def execute_mcp_approval_requests(
778
+ cls,
779
+ *,
780
+ agent: Agent[TContext],
781
+ approval_requests: list[ToolRunMCPApprovalRequest],
782
+ context_wrapper: RunContextWrapper[TContext],
783
+ ) -> list[RunItem]:
784
+ async def run_single_approval(approval_request: ToolRunMCPApprovalRequest) -> RunItem:
785
+ callback = approval_request.mcp_tool.on_approval_request
786
+ assert callback is not None, "Callback is required for MCP approval requests"
787
+ maybe_awaitable_result = callback(
788
+ MCPToolApprovalRequest(context_wrapper, approval_request.request_item)
789
+ )
790
+ if inspect.isawaitable(maybe_awaitable_result):
791
+ result = await maybe_awaitable_result
792
+ else:
793
+ result = maybe_awaitable_result
794
+ reason = result.get("reason", None)
795
+ raw_item: McpApprovalResponse = {
796
+ "approval_request_id": approval_request.request_item.id,
797
+ "approve": result["approve"],
798
+ "type": "mcp_approval_response",
799
+ }
800
+ if not result["approve"] and reason:
801
+ raw_item["reason"] = reason
802
+ return MCPApprovalResponseItem(
803
+ raw_item=raw_item,
804
+ agent=agent,
805
+ )
806
+
807
+ tasks = [run_single_approval(approval_request) for approval_request in approval_requests]
808
+ return await asyncio.gather(*tasks)
809
+
646
810
  @classmethod
647
811
  async def execute_final_output(
648
812
  cls,
@@ -727,6 +891,11 @@ class RunImpl:
727
891
  event = RunItemStreamEvent(item=item, name="tool_output")
728
892
  elif isinstance(item, ReasoningItem):
729
893
  event = RunItemStreamEvent(item=item, name="reasoning_item_created")
894
+ elif isinstance(item, MCPApprovalRequestItem):
895
+ event = RunItemStreamEvent(item=item, name="mcp_approval_requested")
896
+ elif isinstance(item, MCPListToolsItem):
897
+ event = RunItemStreamEvent(item=item, name="mcp_list_tools")
898
+
730
899
  else:
731
900
  logger.warning(f"Unexpected item type: {type(item)}")
732
901
  event = None
@@ -919,3 +1088,54 @@ class ComputerAction:
919
1088
  await computer.wait()
920
1089
 
921
1090
  return await computer.screenshot()
1091
+
1092
+
1093
+ class LocalShellAction:
1094
+ @classmethod
1095
+ async def execute(
1096
+ cls,
1097
+ *,
1098
+ agent: Agent[TContext],
1099
+ call: ToolRunLocalShellCall,
1100
+ hooks: RunHooks[TContext],
1101
+ context_wrapper: RunContextWrapper[TContext],
1102
+ config: RunConfig,
1103
+ ) -> RunItem:
1104
+ await asyncio.gather(
1105
+ hooks.on_tool_start(context_wrapper, agent, call.local_shell_tool),
1106
+ (
1107
+ agent.hooks.on_tool_start(context_wrapper, agent, call.local_shell_tool)
1108
+ if agent.hooks
1109
+ else _coro.noop_coroutine()
1110
+ ),
1111
+ )
1112
+
1113
+ request = LocalShellCommandRequest(
1114
+ ctx_wrapper=context_wrapper,
1115
+ data=call.tool_call,
1116
+ )
1117
+ output = call.local_shell_tool.executor(request)
1118
+ if inspect.isawaitable(output):
1119
+ result = await output
1120
+ else:
1121
+ result = output
1122
+
1123
+ await asyncio.gather(
1124
+ hooks.on_tool_end(context_wrapper, agent, call.local_shell_tool, result),
1125
+ (
1126
+ agent.hooks.on_tool_end(context_wrapper, agent, call.local_shell_tool, result)
1127
+ if agent.hooks
1128
+ else _coro.noop_coroutine()
1129
+ ),
1130
+ )
1131
+
1132
+ return ToolCallOutputItem(
1133
+ agent=agent,
1134
+ output=output,
1135
+ raw_item={
1136
+ "type": "local_shell_call_output",
1137
+ "id": call.tool_call.call_id,
1138
+ "output": result,
1139
+ # "id": "out" + call.tool_call.id, # TODO remove this, it should be optional
1140
+ },
1141
+ )
@@ -6,6 +6,7 @@ from collections.abc import AsyncIterator
6
6
  from typing import Any, Literal, cast, overload
7
7
 
8
8
  import litellm.types
9
+ from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
9
10
 
10
11
  from agents.exceptions import ModelBehaviorError
11
12
 
@@ -107,6 +108,16 @@ class LitellmModel(Model):
107
108
  input_tokens=response_usage.prompt_tokens,
108
109
  output_tokens=response_usage.completion_tokens,
109
110
  total_tokens=response_usage.total_tokens,
111
+ input_tokens_details=InputTokensDetails(
112
+ cached_tokens=getattr(
113
+ response_usage.prompt_tokens_details, "cached_tokens", 0
114
+ )
115
+ ),
116
+ output_tokens_details=OutputTokensDetails(
117
+ reasoning_tokens=getattr(
118
+ response_usage.completion_tokens_details, "reasoning_tokens", 0
119
+ )
120
+ ),
110
121
  )
111
122
  if response.usage
112
123
  else Usage()
agents/items.py CHANGED
@@ -18,7 +18,22 @@ from openai.types.responses import (
18
18
  ResponseOutputText,
19
19
  ResponseStreamEvent,
20
20
  )
21
- from openai.types.responses.response_input_item_param import ComputerCallOutput, FunctionCallOutput
21
+ from openai.types.responses.response_code_interpreter_tool_call import (
22
+ ResponseCodeInterpreterToolCall,
23
+ )
24
+ from openai.types.responses.response_input_item_param import (
25
+ ComputerCallOutput,
26
+ FunctionCallOutput,
27
+ LocalShellCallOutput,
28
+ McpApprovalResponse,
29
+ )
30
+ from openai.types.responses.response_output_item import (
31
+ ImageGenerationCall,
32
+ LocalShellCall,
33
+ McpApprovalRequest,
34
+ McpCall,
35
+ McpListTools,
36
+ )
22
37
  from openai.types.responses.response_reasoning_item import ResponseReasoningItem
23
38
  from pydantic import BaseModel
24
39
  from typing_extensions import TypeAlias
@@ -108,6 +123,10 @@ ToolCallItemTypes: TypeAlias = Union[
108
123
  ResponseComputerToolCall,
109
124
  ResponseFileSearchToolCall,
110
125
  ResponseFunctionWebSearch,
126
+ McpCall,
127
+ ResponseCodeInterpreterToolCall,
128
+ ImageGenerationCall,
129
+ LocalShellCall,
111
130
  ]
112
131
  """A type that represents a tool call item."""
113
132
 
@@ -123,10 +142,12 @@ class ToolCallItem(RunItemBase[ToolCallItemTypes]):
123
142
 
124
143
 
125
144
  @dataclass
126
- class ToolCallOutputItem(RunItemBase[Union[FunctionCallOutput, ComputerCallOutput]]):
145
+ class ToolCallOutputItem(
146
+ RunItemBase[Union[FunctionCallOutput, ComputerCallOutput, LocalShellCallOutput]]
147
+ ):
127
148
  """Represents the output of a tool call."""
128
149
 
129
- raw_item: FunctionCallOutput | ComputerCallOutput
150
+ raw_item: FunctionCallOutput | ComputerCallOutput | LocalShellCallOutput
130
151
  """The raw item from the model."""
131
152
 
132
153
  output: Any
@@ -147,6 +168,36 @@ class ReasoningItem(RunItemBase[ResponseReasoningItem]):
147
168
  type: Literal["reasoning_item"] = "reasoning_item"
148
169
 
149
170
 
171
+ @dataclass
172
+ class MCPListToolsItem(RunItemBase[McpListTools]):
173
+ """Represents a call to an MCP server to list tools."""
174
+
175
+ raw_item: McpListTools
176
+ """The raw MCP list tools call."""
177
+
178
+ type: Literal["mcp_list_tools_item"] = "mcp_list_tools_item"
179
+
180
+
181
+ @dataclass
182
+ class MCPApprovalRequestItem(RunItemBase[McpApprovalRequest]):
183
+ """Represents a request for MCP approval."""
184
+
185
+ raw_item: McpApprovalRequest
186
+ """The raw MCP approval request."""
187
+
188
+ type: Literal["mcp_approval_request_item"] = "mcp_approval_request_item"
189
+
190
+
191
+ @dataclass
192
+ class MCPApprovalResponseItem(RunItemBase[McpApprovalResponse]):
193
+ """Represents a response to an MCP approval request."""
194
+
195
+ raw_item: McpApprovalResponse
196
+ """The raw MCP approval response."""
197
+
198
+ type: Literal["mcp_approval_response_item"] = "mcp_approval_response_item"
199
+
200
+
150
201
  RunItem: TypeAlias = Union[
151
202
  MessageOutputItem,
152
203
  HandoffCallItem,
@@ -154,6 +205,9 @@ RunItem: TypeAlias = Union[
154
205
  ToolCallItem,
155
206
  ToolCallOutputItem,
156
207
  ReasoningItem,
208
+ MCPListToolsItem,
209
+ MCPApprovalRequestItem,
210
+ MCPApprovalResponseItem,
157
211
  ]
158
212
  """An item generated by an agent."""
159
213
 
agents/mcp/server.py CHANGED
@@ -12,7 +12,7 @@ from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_cli
12
12
  from mcp.client.sse import sse_client
13
13
  from mcp.client.streamable_http import GetSessionIdCallback, streamablehttp_client
14
14
  from mcp.shared.message import SessionMessage
15
- from mcp.types import CallToolResult
15
+ from mcp.types import CallToolResult, InitializeResult
16
16
  from typing_extensions import NotRequired, TypedDict
17
17
 
18
18
  from ..exceptions import UserError
@@ -73,6 +73,7 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
73
73
  self.exit_stack: AsyncExitStack = AsyncExitStack()
74
74
  self._cleanup_lock: asyncio.Lock = asyncio.Lock()
75
75
  self.cache_tools_list = cache_tools_list
76
+ self.server_initialize_result: InitializeResult | None = None
76
77
 
77
78
  self.client_session_timeout_seconds = client_session_timeout_seconds
78
79
 
@@ -122,7 +123,8 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
122
123
  else None,
123
124
  )
124
125
  )
125
- await session.initialize()
126
+ server_result = await session.initialize()
127
+ self.server_initialize_result = server_result
126
128
  self.session = session
127
129
  except Exception as e:
128
130
  logger.error(f"Error initializing MCP server: {e}")
@@ -38,6 +38,16 @@ class StreamingState:
38
38
  function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict)
39
39
 
40
40
 
41
+ class SequenceNumber:
42
+ def __init__(self):
43
+ self._sequence_number = 0
44
+
45
+ def get_and_increment(self) -> int:
46
+ num = self._sequence_number
47
+ self._sequence_number += 1
48
+ return num
49
+
50
+
41
51
  class ChatCmplStreamHandler:
42
52
  @classmethod
43
53
  async def handle_stream(
@@ -47,13 +57,14 @@ class ChatCmplStreamHandler:
47
57
  ) -> AsyncIterator[TResponseStreamEvent]:
48
58
  usage: CompletionUsage | None = None
49
59
  state = StreamingState()
50
-
60
+ sequence_number = SequenceNumber()
51
61
  async for chunk in stream:
52
62
  if not state.started:
53
63
  state.started = True
54
64
  yield ResponseCreatedEvent(
55
65
  response=response,
56
66
  type="response.created",
67
+ sequence_number=sequence_number.get_and_increment(),
57
68
  )
58
69
 
59
70
  # This is always set by the OpenAI API, but not by others e.g. LiteLLM
@@ -89,6 +100,7 @@ class ChatCmplStreamHandler:
89
100
  item=assistant_item,
90
101
  output_index=0,
91
102
  type="response.output_item.added",
103
+ sequence_number=sequence_number.get_and_increment(),
92
104
  )
93
105
  yield ResponseContentPartAddedEvent(
94
106
  content_index=state.text_content_index_and_output[0],
@@ -100,6 +112,7 @@ class ChatCmplStreamHandler:
100
112
  annotations=[],
101
113
  ),
102
114
  type="response.content_part.added",
115
+ sequence_number=sequence_number.get_and_increment(),
103
116
  )
104
117
  # Emit the delta for this segment of content
105
118
  yield ResponseTextDeltaEvent(
@@ -108,6 +121,7 @@ class ChatCmplStreamHandler:
108
121
  item_id=FAKE_RESPONSES_ID,
109
122
  output_index=0,
110
123
  type="response.output_text.delta",
124
+ sequence_number=sequence_number.get_and_increment(),
111
125
  )
112
126
  # Accumulate the text into the response part
113
127
  state.text_content_index_and_output[1].text += delta.content
@@ -134,6 +148,7 @@ class ChatCmplStreamHandler:
134
148
  item=assistant_item,
135
149
  output_index=0,
136
150
  type="response.output_item.added",
151
+ sequence_number=sequence_number.get_and_increment(),
137
152
  )
138
153
  yield ResponseContentPartAddedEvent(
139
154
  content_index=state.refusal_content_index_and_output[0],
@@ -145,6 +160,7 @@ class ChatCmplStreamHandler:
145
160
  annotations=[],
146
161
  ),
147
162
  type="response.content_part.added",
163
+ sequence_number=sequence_number.get_and_increment(),
148
164
  )
149
165
  # Emit the delta for this segment of refusal
150
166
  yield ResponseRefusalDeltaEvent(
@@ -153,6 +169,7 @@ class ChatCmplStreamHandler:
153
169
  item_id=FAKE_RESPONSES_ID,
154
170
  output_index=0,
155
171
  type="response.refusal.delta",
172
+ sequence_number=sequence_number.get_and_increment(),
156
173
  )
157
174
  # Accumulate the refusal string in the output part
158
175
  state.refusal_content_index_and_output[1].refusal += delta.refusal
@@ -190,6 +207,7 @@ class ChatCmplStreamHandler:
190
207
  output_index=0,
191
208
  part=state.text_content_index_and_output[1],
192
209
  type="response.content_part.done",
210
+ sequence_number=sequence_number.get_and_increment(),
193
211
  )
194
212
 
195
213
  if state.refusal_content_index_and_output:
@@ -201,6 +219,7 @@ class ChatCmplStreamHandler:
201
219
  output_index=0,
202
220
  part=state.refusal_content_index_and_output[1],
203
221
  type="response.content_part.done",
222
+ sequence_number=sequence_number.get_and_increment(),
204
223
  )
205
224
 
206
225
  # Actually send events for the function calls
@@ -216,6 +235,7 @@ class ChatCmplStreamHandler:
216
235
  ),
217
236
  output_index=function_call_starting_index,
218
237
  type="response.output_item.added",
238
+ sequence_number=sequence_number.get_and_increment(),
219
239
  )
220
240
  # Then, yield the args
221
241
  yield ResponseFunctionCallArgumentsDeltaEvent(
@@ -223,6 +243,7 @@ class ChatCmplStreamHandler:
223
243
  item_id=FAKE_RESPONSES_ID,
224
244
  output_index=function_call_starting_index,
225
245
  type="response.function_call_arguments.delta",
246
+ sequence_number=sequence_number.get_and_increment(),
226
247
  )
227
248
  # Finally, the ResponseOutputItemDone
228
249
  yield ResponseOutputItemDoneEvent(
@@ -235,6 +256,7 @@ class ChatCmplStreamHandler:
235
256
  ),
236
257
  output_index=function_call_starting_index,
237
258
  type="response.output_item.done",
259
+ sequence_number=sequence_number.get_and_increment(),
238
260
  )
239
261
 
240
262
  # Finally, send the Response completed event
@@ -258,6 +280,7 @@ class ChatCmplStreamHandler:
258
280
  item=assistant_msg,
259
281
  output_index=0,
260
282
  type="response.output_item.done",
283
+ sequence_number=sequence_number.get_and_increment(),
261
284
  )
262
285
 
263
286
  for function_call in state.function_calls.values():
@@ -289,4 +312,5 @@ class ChatCmplStreamHandler:
289
312
  yield ResponseCompletedEvent(
290
313
  response=final_response,
291
314
  type="response.completed",
315
+ sequence_number=sequence_number.get_and_increment(),
292
316
  )
@@ -9,6 +9,7 @@ from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
9
9
  from openai.types import ChatModel
10
10
  from openai.types.chat import ChatCompletion, ChatCompletionChunk
11
11
  from openai.types.responses import Response
12
+ from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
12
13
 
13
14
  from .. import _debug
14
15
  from ..agent_output import AgentOutputSchemaBase
@@ -83,6 +84,18 @@ class OpenAIChatCompletionsModel(Model):
83
84
  input_tokens=response.usage.prompt_tokens,
84
85
  output_tokens=response.usage.completion_tokens,
85
86
  total_tokens=response.usage.total_tokens,
87
+ input_tokens_details=InputTokensDetails(
88
+ cached_tokens=getattr(
89
+ response.usage.prompt_tokens_details, "cached_tokens", 0
90
+ )
91
+ or 0,
92
+ ),
93
+ output_tokens_details=OutputTokensDetails(
94
+ reasoning_tokens=getattr(
95
+ response.usage.completion_tokens_details, "reasoning_tokens", 0
96
+ )
97
+ or 0,
98
+ ),
86
99
  )
87
100
  if response.usage
88
101
  else Usage()
@@ -252,7 +265,7 @@ class OpenAIChatCompletionsModel(Model):
252
265
  stream_options=self._non_null_or_not_given(stream_options),
253
266
  store=self._non_null_or_not_given(store),
254
267
  reasoning_effort=self._non_null_or_not_given(reasoning_effort),
255
- extra_headers={ **HEADERS, **(model_settings.extra_headers or {}) },
268
+ extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
256
269
  extra_query=model_settings.extra_query,
257
270
  extra_body=model_settings.extra_body,
258
271
  metadata=self._non_null_or_not_given(model_settings.metadata),
@@ -10,6 +10,7 @@ from openai.types import ChatModel
10
10
  from openai.types.responses import (
11
11
  Response,
12
12
  ResponseCompletedEvent,
13
+ ResponseIncludable,
13
14
  ResponseStreamEvent,
14
15
  ResponseTextConfigParam,
15
16
  ToolParam,
@@ -23,7 +24,17 @@ from ..exceptions import UserError
23
24
  from ..handoffs import Handoff
24
25
  from ..items import ItemHelpers, ModelResponse, TResponseInputItem
25
26
  from ..logger import logger
26
- from ..tool import ComputerTool, FileSearchTool, FunctionTool, Tool, WebSearchTool
27
+ from ..tool import (
28
+ CodeInterpreterTool,
29
+ ComputerTool,
30
+ FileSearchTool,
31
+ FunctionTool,
32
+ HostedMCPTool,
33
+ ImageGenerationTool,
34
+ LocalShellTool,
35
+ Tool,
36
+ WebSearchTool,
37
+ )
27
38
  from ..tracing import SpanError, response_span
28
39
  from ..usage import Usage
29
40
  from ..version import __version__
@@ -36,13 +47,6 @@ if TYPE_CHECKING:
36
47
  _USER_AGENT = f"Agents/Python {__version__}"
37
48
  _HEADERS = {"User-Agent": _USER_AGENT}
38
49
 
39
- # From the Responses API
40
- IncludeLiteral = Literal[
41
- "file_search_call.results",
42
- "message.input_image.image_url",
43
- "computer_call_output.output.image_url",
44
- ]
45
-
46
50
 
47
51
  class OpenAIResponsesModel(Model):
48
52
  """
@@ -98,6 +102,8 @@ class OpenAIResponsesModel(Model):
98
102
  input_tokens=response.usage.input_tokens,
99
103
  output_tokens=response.usage.output_tokens,
100
104
  total_tokens=response.usage.total_tokens,
105
+ input_tokens_details=response.usage.input_tokens_details,
106
+ output_tokens_details=response.usage.output_tokens_details,
101
107
  )
102
108
  if response.usage
103
109
  else Usage()
@@ -271,7 +277,7 @@ class OpenAIResponsesModel(Model):
271
277
  @dataclass
272
278
  class ConvertedTools:
273
279
  tools: list[ToolParam]
274
- includes: list[IncludeLiteral]
280
+ includes: list[ResponseIncludable]
275
281
 
276
282
 
277
283
  class Converter:
@@ -299,6 +305,18 @@ class Converter:
299
305
  return {
300
306
  "type": "computer_use_preview",
301
307
  }
308
+ elif tool_choice == "image_generation":
309
+ return {
310
+ "type": "image_generation",
311
+ }
312
+ elif tool_choice == "code_interpreter":
313
+ return {
314
+ "type": "code_interpreter",
315
+ }
316
+ elif tool_choice == "mcp":
317
+ return {
318
+ "type": "mcp",
319
+ }
302
320
  else:
303
321
  return {
304
322
  "type": "function",
@@ -328,7 +346,7 @@ class Converter:
328
346
  handoffs: list[Handoff[Any]],
329
347
  ) -> ConvertedTools:
330
348
  converted_tools: list[ToolParam] = []
331
- includes: list[IncludeLiteral] = []
349
+ includes: list[ResponseIncludable] = []
332
350
 
333
351
  computer_tools = [tool for tool in tools if isinstance(tool, ComputerTool)]
334
352
  if len(computer_tools) > 1:
@@ -346,7 +364,7 @@ class Converter:
346
364
  return ConvertedTools(tools=converted_tools, includes=includes)
347
365
 
348
366
  @classmethod
349
- def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, IncludeLiteral | None]:
367
+ def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, ResponseIncludable | None]:
350
368
  """Returns converted tool and includes"""
351
369
 
352
370
  if isinstance(tool, FunctionTool):
@@ -357,7 +375,7 @@ class Converter:
357
375
  "type": "function",
358
376
  "description": tool.description,
359
377
  }
360
- includes: IncludeLiteral | None = None
378
+ includes: ResponseIncludable | None = None
361
379
  elif isinstance(tool, WebSearchTool):
362
380
  ws: WebSearchToolParam = {
363
381
  "type": "web_search_preview",
@@ -387,7 +405,20 @@ class Converter:
387
405
  "display_height": tool.computer.dimensions[1],
388
406
  }
389
407
  includes = None
390
-
408
+ elif isinstance(tool, HostedMCPTool):
409
+ converted_tool = tool.tool_config
410
+ includes = None
411
+ elif isinstance(tool, ImageGenerationTool):
412
+ converted_tool = tool.tool_config
413
+ includes = None
414
+ elif isinstance(tool, CodeInterpreterTool):
415
+ converted_tool = tool.tool_config
416
+ includes = None
417
+ elif isinstance(tool, LocalShellTool):
418
+ converted_tool = {
419
+ "type": "local_shell",
420
+ }
421
+ includes = None
391
422
  else:
392
423
  raise UserError(f"Unknown tool type: {type(tool)}, tool")
393
424
 
agents/run.py CHANGED
@@ -689,6 +689,8 @@ class Runner:
689
689
  input_tokens=event.response.usage.input_tokens,
690
690
  output_tokens=event.response.usage.output_tokens,
691
691
  total_tokens=event.response.usage.total_tokens,
692
+ input_tokens_details=event.response.usage.input_tokens_details,
693
+ output_tokens_details=event.response.usage.output_tokens_details,
692
694
  )
693
695
  if event.response.usage
694
696
  else Usage()
agents/stream_events.py CHANGED
@@ -35,6 +35,8 @@ class RunItemStreamEvent:
35
35
  "tool_called",
36
36
  "tool_output",
37
37
  "reasoning_item_created",
38
+ "mcp_approval_requested",
39
+ "mcp_list_tools",
38
40
  ]
39
41
  """The name of the event."""
40
42
 
agents/tool.py CHANGED
@@ -7,9 +7,11 @@ from dataclasses import dataclass
7
7
  from typing import Any, Callable, Literal, Union, overload
8
8
 
9
9
  from openai.types.responses.file_search_tool_param import Filters, RankingOptions
10
+ from openai.types.responses.response_output_item import LocalShellCall, McpApprovalRequest
11
+ from openai.types.responses.tool_param import CodeInterpreter, ImageGeneration, Mcp
10
12
  from openai.types.responses.web_search_tool_param import UserLocation
11
13
  from pydantic import ValidationError
12
- from typing_extensions import Concatenate, ParamSpec
14
+ from typing_extensions import Concatenate, NotRequired, ParamSpec, TypedDict
13
15
 
14
16
  from . import _debug
15
17
  from .computer import AsyncComputer, Computer
@@ -130,7 +132,115 @@ class ComputerTool:
130
132
  return "computer_use_preview"
131
133
 
132
134
 
133
- Tool = Union[FunctionTool, FileSearchTool, WebSearchTool, ComputerTool]
135
+ @dataclass
136
+ class MCPToolApprovalRequest:
137
+ """A request to approve a tool call."""
138
+
139
+ ctx_wrapper: RunContextWrapper[Any]
140
+ """The run context."""
141
+
142
+ data: McpApprovalRequest
143
+ """The data from the MCP tool approval request."""
144
+
145
+
146
+ class MCPToolApprovalFunctionResult(TypedDict):
147
+ """The result of an MCP tool approval function."""
148
+
149
+ approve: bool
150
+ """Whether to approve the tool call."""
151
+
152
+ reason: NotRequired[str]
153
+ """An optional reason, if rejected."""
154
+
155
+
156
+ MCPToolApprovalFunction = Callable[
157
+ [MCPToolApprovalRequest], MaybeAwaitable[MCPToolApprovalFunctionResult]
158
+ ]
159
+ """A function that approves or rejects a tool call."""
160
+
161
+
162
+ @dataclass
163
+ class HostedMCPTool:
164
+ """A tool that allows the LLM to use a remote MCP server. The LLM will automatically list and
165
+ call tools, without requiring a a round trip back to your code.
166
+ If you want to run MCP servers locally via stdio, in a VPC or other non-publicly-accessible
167
+ environment, or you just prefer to run tool calls locally, then you can instead use the servers
168
+ in `agents.mcp` and pass `Agent(mcp_servers=[...])` to the agent."""
169
+
170
+ tool_config: Mcp
171
+ """The MCP tool config, which includes the server URL and other settings."""
172
+
173
+ on_approval_request: MCPToolApprovalFunction | None = None
174
+ """An optional function that will be called if approval is requested for an MCP tool. If not
175
+ provided, you will need to manually add approvals/rejections to the input and call
176
+ `Runner.run(...)` again."""
177
+
178
+ @property
179
+ def name(self):
180
+ return "hosted_mcp"
181
+
182
+
183
+ @dataclass
184
+ class CodeInterpreterTool:
185
+ """A tool that allows the LLM to execute code in a sandboxed environment."""
186
+
187
+ tool_config: CodeInterpreter
188
+ """The tool config, which includes the container and other settings."""
189
+
190
+ @property
191
+ def name(self):
192
+ return "code_interpreter"
193
+
194
+
195
+ @dataclass
196
+ class ImageGenerationTool:
197
+ """A tool that allows the LLM to generate images."""
198
+
199
+ tool_config: ImageGeneration
200
+ """The tool config, which image generation settings."""
201
+
202
+ @property
203
+ def name(self):
204
+ return "image_generation"
205
+
206
+
207
+ @dataclass
208
+ class LocalShellCommandRequest:
209
+ """A request to execute a command on a shell."""
210
+
211
+ ctx_wrapper: RunContextWrapper[Any]
212
+ """The run context."""
213
+
214
+ data: LocalShellCall
215
+ """The data from the local shell tool call."""
216
+
217
+
218
+ LocalShellExecutor = Callable[[LocalShellCommandRequest], MaybeAwaitable[str]]
219
+ """A function that executes a command on a shell."""
220
+
221
+
222
+ @dataclass
223
+ class LocalShellTool:
224
+ """A tool that allows the LLM to execute commands on a shell."""
225
+
226
+ executor: LocalShellExecutor
227
+ """A function that executes a command on a shell."""
228
+
229
+ @property
230
+ def name(self):
231
+ return "local_shell"
232
+
233
+
234
+ Tool = Union[
235
+ FunctionTool,
236
+ FileSearchTool,
237
+ WebSearchTool,
238
+ ComputerTool,
239
+ HostedMCPTool,
240
+ LocalShellTool,
241
+ ImageGenerationTool,
242
+ CodeInterpreterTool,
243
+ ]
134
244
  """A tool that can be used in an agent."""
135
245
 
136
246
 
agents/usage.py CHANGED
@@ -1,4 +1,6 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
+
3
+ from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
2
4
 
3
5
 
4
6
  @dataclass
@@ -9,9 +11,18 @@ class Usage:
9
11
  input_tokens: int = 0
10
12
  """Total input tokens sent, across all requests."""
11
13
 
14
+ input_tokens_details: InputTokensDetails = field(
15
+ default_factory=lambda: InputTokensDetails(cached_tokens=0)
16
+ )
17
+ """Details about the input tokens, matching responses API usage details."""
12
18
  output_tokens: int = 0
13
19
  """Total output tokens received, across all requests."""
14
20
 
21
+ output_tokens_details: OutputTokensDetails = field(
22
+ default_factory=lambda: OutputTokensDetails(reasoning_tokens=0)
23
+ )
24
+ """Details about the output tokens, matching responses API usage details."""
25
+
15
26
  total_tokens: int = 0
16
27
  """Total tokens sent and received, across all requests."""
17
28
 
@@ -20,3 +31,12 @@ class Usage:
20
31
  self.input_tokens += other.input_tokens if other.input_tokens else 0
21
32
  self.output_tokens += other.output_tokens if other.output_tokens else 0
22
33
  self.total_tokens += other.total_tokens if other.total_tokens else 0
34
+ self.input_tokens_details = InputTokensDetails(
35
+ cached_tokens=self.input_tokens_details.cached_tokens
36
+ + other.input_tokens_details.cached_tokens
37
+ )
38
+
39
+ self.output_tokens_details = OutputTokensDetails(
40
+ reasoning_tokens=self.output_tokens_details.reasoning_tokens
41
+ + other.output_tokens_details.reasoning_tokens
42
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.15
3
+ Version: 0.0.16
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -20,7 +20,7 @@ Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.9
21
21
  Requires-Dist: griffe<2,>=1.5.6
22
22
  Requires-Dist: mcp<2,>=1.8.0; python_version >= '3.10'
23
- Requires-Dist: openai>=1.76.0
23
+ Requires-Dist: openai>=1.81.0
24
24
  Requires-Dist: pydantic<3,>=2.10
25
25
  Requires-Dist: requests<3,>=2.0
26
26
  Requires-Dist: types-requests<3,>=2.0
@@ -1,7 +1,7 @@
1
- agents/__init__.py,sha256=T0Irpxfnd2SDSQ0aD5YvhwtAGTnzybnFb7qcvSsO5qY,6906
1
+ agents/__init__.py,sha256=VXDkkC21o3n0JbKx8YIDwHBzTqwsTJ2JsDmL2p7THuU,7394
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
- agents/_run_impl.py,sha256=7gKaBEDwQs7DqlcC3hSbU1I04wisvClXE0c3pZ9sGEM,34167
4
+ agents/_run_impl.py,sha256=1aowNDgxwqPeFsxEQ6P2a7hrlniM_U3lRuKujUoyhuo,42569
5
5
  agents/agent.py,sha256=aTC49v9sQJm0gv5a3hW8xCgtMhk2TfjycBP8JyeOJ84,10571
6
6
  agents/agent_output.py,sha256=fEK1Yn0XfMrLXZRsBcSig-YDZZ0kZpCgATdwZ-eHYqQ,7127
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
@@ -9,40 +9,40 @@ agents/exceptions.py,sha256=F3AltRt27PGdhbFqKBhRJL9eHqoN4SQx7oxBn0GWmhs,1856
9
9
  agents/function_schema.py,sha256=k4GTdxf5bvcisVr9b4xSdTGzkB5oP3XZnJMdouABCsw,12909
10
10
  agents/guardrail.py,sha256=vWWcApo9s_6aHapQ5AMko08MqC8Jrlk-J5iqIRctCDQ,9291
11
11
  agents/handoffs.py,sha256=wRg-HBGKBZev88mOg_mfv6CR8T2kewZM8eX3tb71l1g,9043
12
- agents/items.py,sha256=6Xnf6a2tIgM8Pz3T2Xr6J8wgok8fI-KhyKW1XdfHBJU,8306
12
+ agents/items.py,sha256=lXFc_gKLEqwXIcyMKk4Q-6Rjry0MWD93xlvk4Y1W970,9695
13
13
  agents/lifecycle.py,sha256=wYFG6PLSKQ7bICKVbB8oGtdoJNINGq9obh2RSKlAkDE,2938
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
15
  agents/model_settings.py,sha256=7s9YjfHBVz1f1a-V3dd-8eMe-IAgfDXhQgChI27Kz00,3326
16
16
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
17
17
  agents/result.py,sha256=dhtOaLIoOp5PYC4qcsgoU5qg2yvdI_VKdCy6i2qth7k,9305
18
- agents/run.py,sha256=kPxqU6KjFzkuVdH-UbKPUhJrvl70ChMnQbfI4UDD3fw,40276
18
+ agents/run.py,sha256=5E3hx0mc3Gs3QcCCuNIddlRt4TJ9e_3629X5MSeOGX0,40454
19
19
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
20
- agents/stream_events.py,sha256=ULgBEcL_H4vklZoxhpY2yomeoxVF0UiXvswsFsjFv4s,1547
20
+ agents/stream_events.py,sha256=2zDCbJvUKnDfaogGltL6Yh9WvHxtTJf_z_IS0nvKjf0,1607
21
21
  agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
22
- agents/tool.py,sha256=XKeR1khfbaPbyO8DiGsn8WMO_Hkbrmm9NQzGeRsKcPs,11641
23
- agents/usage.py,sha256=-MZOmSDVdWxA2V_yVVnmUcwVcLdvYFccv0HXZ7Ow3_A,733
22
+ agents/tool.py,sha256=CnGYyvDDYjIqGDSpDqUtbQtzeQrXdWJONMsFFmCpWHk,14711
23
+ agents/usage.py,sha256=GB83eElU-DVkdutGObGDSX5vJNy8ssu3Xbpp5LlHfwU,1643
24
24
  agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
25
25
  agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  agents/extensions/handoff_filters.py,sha256=2cXxu1JROez96CpTiGuT9PIuaIrIE8ksP01fX83krKM,1977
27
27
  agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
28
28
  agents/extensions/visualization.py,sha256=AQFC7kQlZqTI6QVkyDHrF_DodCytrrhcLg35nfRd_JA,4256
29
29
  agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- agents/extensions/models/litellm_model.py,sha256=JV9DfS6gj9QR0NMFyX4o3Dq5QqJNQ3BksBh7ijrgWoA,13776
30
+ agents/extensions/models/litellm_model.py,sha256=lLWLXmq5pUXAElyPSUKSmRbHPAiKjbG9aWvUSgTcS9s,14402
31
31
  agents/extensions/models/litellm_provider.py,sha256=wTm00Anq8YoNb9AnyT0JOunDG-HCDm_98ORNy7aNJdw,928
32
32
  agents/mcp/__init__.py,sha256=_aDpMTvYCe1IezOEasZ0vmombBM8r7BD8lpXiKi-UlM,499
33
- agents/mcp/server.py,sha256=f4F5DSH3TfMJ1SvruRqJT_wYbWhKQbRzVhdYdAjnOnk,15748
33
+ agents/mcp/server.py,sha256=kIpQktrsNQUF0pNkERyfEcu-oYDvseBbsye_PzRmHnM,15910
34
34
  agents/mcp/util.py,sha256=dIEdYDMc7Sjp-DFQnvoc4VWU-B7Heyx0I41bcW7RlEg,5232
35
35
  agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
37
37
  agents/models/chatcmpl_converter.py,sha256=Sae-ITlhQz8_SiFiSat7Z-lavqIuczduOXR_PF_f6cs,18126
38
38
  agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
39
- agents/models/chatcmpl_stream_handler.py,sha256=VjskdeGnepn0iJbxsqNZrexcuAYAV1zd5hwt0lU8E7I,12452
39
+ agents/models/chatcmpl_stream_handler.py,sha256=sDl8O7AKxpWxAq7-bgCUClD5JySUnbQ8RTPc0HeDElM,13713
40
40
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
41
41
  agents/models/interface.py,sha256=eEpiIBn9MxsmXUK1HPpn3c7TYPduBYC7tsWnDHSYJHo,3553
42
42
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
43
- agents/models/openai_chatcompletions.py,sha256=QiUOdd4gQ7f-uslm4SqRlv9bt3T1oFL87EnqVYlWw4A,10390
43
+ agents/models/openai_chatcompletions.py,sha256=obO209PFcf-qXDXmpoIagRmiLMVqsCoG1Amtvu9JfX0,11034
44
44
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
45
- agents/models/openai_responses.py,sha256=-hwXW7gXYOs4EbVrFhsil-tWb63gtLj_vaGQ9HXf6nE,14331
45
+ agents/models/openai_responses.py,sha256=JFajISS-sYYxKhb66tZ5cYPEqIYOj6ap762Z-87c7fE,15368
46
46
  agents/tracing/__init__.py,sha256=-hJeEiNvgyQdEXpFTrr_qu_XYREvIrF5KyePDtovSak,2804
47
47
  agents/tracing/create.py,sha256=kkMf2pp5Te20YkiSvf3Xj3J9qMibQCjEAxZs1Lr_kTE,18124
48
48
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
@@ -76,7 +76,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
76
76
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
77
77
  agents/voice/models/openai_stt.py,sha256=rRsldkvkPhH4T0waX1dhccEqIwmPYh-teK_LRvBgiNI,16882
78
78
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
79
- openai_agents-0.0.15.dist-info/METADATA,sha256=aWuj1znysjWdTLEA_Qtou1aOsPBNRRVFXiPcIoJ4o9c,8163
80
- openai_agents-0.0.15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
81
- openai_agents-0.0.15.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
82
- openai_agents-0.0.15.dist-info/RECORD,,
79
+ openai_agents-0.0.16.dist-info/METADATA,sha256=EVGuugo_KTS-mZ0Luqa-wXEsgu8loIFS2lXwfzdJyR8,8163
80
+ openai_agents-0.0.16.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
81
+ openai_agents-0.0.16.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
82
+ openai_agents-0.0.16.dist-info/RECORD,,