openai-agents 0.2.8__py3-none-any.whl → 0.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (37) hide show
  1. agents/__init__.py +3 -1
  2. agents/_run_impl.py +44 -7
  3. agents/agent.py +36 -4
  4. agents/extensions/memory/__init__.py +15 -0
  5. agents/extensions/memory/sqlalchemy_session.py +312 -0
  6. agents/extensions/models/litellm_model.py +11 -6
  7. agents/extensions/models/litellm_provider.py +3 -1
  8. agents/function_schema.py +2 -2
  9. agents/handoffs.py +3 -3
  10. agents/lifecycle.py +40 -1
  11. agents/mcp/server.py +59 -8
  12. agents/memory/__init__.py +9 -2
  13. agents/memory/openai_conversations_session.py +94 -0
  14. agents/memory/session.py +0 -270
  15. agents/memory/sqlite_session.py +275 -0
  16. agents/model_settings.py +8 -3
  17. agents/models/__init__.py +13 -0
  18. agents/models/chatcmpl_converter.py +5 -0
  19. agents/models/chatcmpl_stream_handler.py +81 -17
  20. agents/models/default_models.py +58 -0
  21. agents/models/interface.py +4 -0
  22. agents/models/openai_chatcompletions.py +4 -2
  23. agents/models/openai_provider.py +3 -1
  24. agents/models/openai_responses.py +24 -10
  25. agents/realtime/config.py +3 -0
  26. agents/realtime/events.py +11 -0
  27. agents/realtime/model_events.py +10 -0
  28. agents/realtime/openai_realtime.py +39 -5
  29. agents/realtime/session.py +7 -0
  30. agents/repl.py +7 -3
  31. agents/run.py +132 -7
  32. agents/tool.py +9 -1
  33. agents/tracing/processors.py +2 -2
  34. {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/METADATA +16 -14
  35. {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/RECORD +37 -32
  36. {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/WHEEL +0 -0
  37. {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/licenses/LICENSE +0 -0
agents/run.py CHANGED
@@ -2,10 +2,14 @@ from __future__ import annotations
2
2
 
3
3
  import asyncio
4
4
  import inspect
5
+ import os
5
6
  from dataclasses import dataclass, field
6
- from typing import Any, Callable, Generic, cast
7
+ from typing import Any, Callable, Generic, cast, get_args
7
8
 
8
- from openai.types.responses import ResponseCompletedEvent
9
+ from openai.types.responses import (
10
+ ResponseCompletedEvent,
11
+ ResponseOutputItemAddedEvent,
12
+ )
9
13
  from openai.types.responses.response_prompt_param import (
10
14
  ResponsePromptParam,
11
15
  )
@@ -40,7 +44,14 @@ from .guardrail import (
40
44
  OutputGuardrailResult,
41
45
  )
42
46
  from .handoffs import Handoff, HandoffInputFilter, handoff
43
- from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
47
+ from .items import (
48
+ ItemHelpers,
49
+ ModelResponse,
50
+ RunItem,
51
+ ToolCallItem,
52
+ ToolCallItemTypes,
53
+ TResponseInputItem,
54
+ )
44
55
  from .lifecycle import RunHooks
45
56
  from .logger import logger
46
57
  from .memory import Session
@@ -49,7 +60,7 @@ from .models.interface import Model, ModelProvider
49
60
  from .models.multi_provider import MultiProvider
50
61
  from .result import RunResult, RunResultStreaming
51
62
  from .run_context import RunContextWrapper, TContext
52
- from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent
63
+ from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
53
64
  from .tool import Tool
54
65
  from .tracing import Span, SpanError, agent_span, get_current_trace, trace
55
66
  from .tracing.span_data import AgentSpanData
@@ -81,6 +92,12 @@ def get_default_agent_runner() -> AgentRunner:
81
92
  return DEFAULT_AGENT_RUNNER
82
93
 
83
94
 
95
+ def _default_trace_include_sensitive_data() -> bool:
96
+ """Returns the default value for trace_include_sensitive_data based on environment variable."""
97
+ val = os.getenv("OPENAI_AGENTS_TRACE_INCLUDE_SENSITIVE_DATA", "true")
98
+ return val.strip().lower() in ("1", "true", "yes", "on")
99
+
100
+
84
101
  @dataclass
85
102
  class ModelInputData:
86
103
  """Container for the data that will be sent to the model."""
@@ -135,7 +152,9 @@ class RunConfig:
135
152
  """Whether tracing is disabled for the agent run. If disabled, we will not trace the agent run.
136
153
  """
137
154
 
138
- trace_include_sensitive_data: bool = True
155
+ trace_include_sensitive_data: bool = field(
156
+ default_factory=_default_trace_include_sensitive_data
157
+ )
139
158
  """Whether we include potentially sensitive data (for example: inputs/outputs of tool calls or
140
159
  LLM generations) in traces. If False, we'll still create spans for these events, but the
141
160
  sensitive data will not be included.
@@ -189,6 +208,9 @@ class RunOptions(TypedDict, Generic[TContext]):
189
208
  previous_response_id: NotRequired[str | None]
190
209
  """The ID of the previous response, if any."""
191
210
 
211
+ conversation_id: NotRequired[str | None]
212
+ """The ID of the stored conversation, if any."""
213
+
192
214
  session: NotRequired[Session | None]
193
215
  """The session for the run."""
194
216
 
@@ -205,6 +227,7 @@ class Runner:
205
227
  hooks: RunHooks[TContext] | None = None,
206
228
  run_config: RunConfig | None = None,
207
229
  previous_response_id: str | None = None,
230
+ conversation_id: str | None = None,
208
231
  session: Session | None = None,
209
232
  ) -> RunResult:
210
233
  """Run a workflow starting at the given agent. The agent will run in a loop until a final
@@ -229,6 +252,13 @@ class Runner:
229
252
  run_config: Global settings for the entire agent run.
230
253
  previous_response_id: The ID of the previous response, if using OpenAI models via the
231
254
  Responses API, this allows you to skip passing in input from the previous turn.
255
+ conversation_id: The conversation ID (https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).
256
+ If provided, the conversation will be used to read and write items.
257
+ Every agent will have access to the conversation history so far,
258
+ and it's output items will be written to the conversation.
259
+ We recommend only using this if you are exclusively using OpenAI models;
260
+ other model providers don't write to the Conversation object,
261
+ so you'll end up having partial conversations stored.
232
262
  Returns:
233
263
  A run result containing all the inputs, guardrail results and the output of the last
234
264
  agent. Agents may perform handoffs, so we don't know the specific type of the output.
@@ -242,6 +272,7 @@ class Runner:
242
272
  hooks=hooks,
243
273
  run_config=run_config,
244
274
  previous_response_id=previous_response_id,
275
+ conversation_id=conversation_id,
245
276
  session=session,
246
277
  )
247
278
 
@@ -256,6 +287,7 @@ class Runner:
256
287
  hooks: RunHooks[TContext] | None = None,
257
288
  run_config: RunConfig | None = None,
258
289
  previous_response_id: str | None = None,
290
+ conversation_id: str | None = None,
259
291
  session: Session | None = None,
260
292
  ) -> RunResult:
261
293
  """Run a workflow synchronously, starting at the given agent. Note that this just wraps the
@@ -283,6 +315,7 @@ class Runner:
283
315
  run_config: Global settings for the entire agent run.
284
316
  previous_response_id: The ID of the previous response, if using OpenAI models via the
285
317
  Responses API, this allows you to skip passing in input from the previous turn.
318
+ conversation_id: The ID of the stored conversation, if any.
286
319
  Returns:
287
320
  A run result containing all the inputs, guardrail results and the output of the last
288
321
  agent. Agents may perform handoffs, so we don't know the specific type of the output.
@@ -296,6 +329,7 @@ class Runner:
296
329
  hooks=hooks,
297
330
  run_config=run_config,
298
331
  previous_response_id=previous_response_id,
332
+ conversation_id=conversation_id,
299
333
  session=session,
300
334
  )
301
335
 
@@ -309,6 +343,7 @@ class Runner:
309
343
  hooks: RunHooks[TContext] | None = None,
310
344
  run_config: RunConfig | None = None,
311
345
  previous_response_id: str | None = None,
346
+ conversation_id: str | None = None,
312
347
  session: Session | None = None,
313
348
  ) -> RunResultStreaming:
314
349
  """Run a workflow starting at the given agent in streaming mode. The returned result object
@@ -334,6 +369,7 @@ class Runner:
334
369
  run_config: Global settings for the entire agent run.
335
370
  previous_response_id: The ID of the previous response, if using OpenAI models via the
336
371
  Responses API, this allows you to skip passing in input from the previous turn.
372
+ conversation_id: The ID of the stored conversation, if any.
337
373
  Returns:
338
374
  A result object that contains data about the run, as well as a method to stream events.
339
375
  """
@@ -346,6 +382,7 @@ class Runner:
346
382
  hooks=hooks,
347
383
  run_config=run_config,
348
384
  previous_response_id=previous_response_id,
385
+ conversation_id=conversation_id,
349
386
  session=session,
350
387
  )
351
388
 
@@ -367,6 +404,7 @@ class AgentRunner:
367
404
  hooks = kwargs.get("hooks")
368
405
  run_config = kwargs.get("run_config")
369
406
  previous_response_id = kwargs.get("previous_response_id")
407
+ conversation_id = kwargs.get("conversation_id")
370
408
  session = kwargs.get("session")
371
409
  if hooks is None:
372
410
  hooks = RunHooks[Any]()
@@ -459,6 +497,7 @@ class AgentRunner:
459
497
  should_run_agent_start_hooks=should_run_agent_start_hooks,
460
498
  tool_use_tracker=tool_use_tracker,
461
499
  previous_response_id=previous_response_id,
500
+ conversation_id=conversation_id,
462
501
  ),
463
502
  )
464
503
  else:
@@ -473,6 +512,7 @@ class AgentRunner:
473
512
  should_run_agent_start_hooks=should_run_agent_start_hooks,
474
513
  tool_use_tracker=tool_use_tracker,
475
514
  previous_response_id=previous_response_id,
515
+ conversation_id=conversation_id,
476
516
  )
477
517
  should_run_agent_start_hooks = False
478
518
 
@@ -539,6 +579,7 @@ class AgentRunner:
539
579
  hooks = kwargs.get("hooks")
540
580
  run_config = kwargs.get("run_config")
541
581
  previous_response_id = kwargs.get("previous_response_id")
582
+ conversation_id = kwargs.get("conversation_id")
542
583
  session = kwargs.get("session")
543
584
 
544
585
  return asyncio.get_event_loop().run_until_complete(
@@ -551,6 +592,7 @@ class AgentRunner:
551
592
  hooks=hooks,
552
593
  run_config=run_config,
553
594
  previous_response_id=previous_response_id,
595
+ conversation_id=conversation_id,
554
596
  )
555
597
  )
556
598
 
@@ -565,6 +607,7 @@ class AgentRunner:
565
607
  hooks = kwargs.get("hooks")
566
608
  run_config = kwargs.get("run_config")
567
609
  previous_response_id = kwargs.get("previous_response_id")
610
+ conversation_id = kwargs.get("conversation_id")
568
611
  session = kwargs.get("session")
569
612
 
570
613
  if hooks is None:
@@ -619,6 +662,7 @@ class AgentRunner:
619
662
  context_wrapper=context_wrapper,
620
663
  run_config=run_config,
621
664
  previous_response_id=previous_response_id,
665
+ conversation_id=conversation_id,
622
666
  session=session,
623
667
  )
624
668
  )
@@ -719,6 +763,7 @@ class AgentRunner:
719
763
  context_wrapper: RunContextWrapper[TContext],
720
764
  run_config: RunConfig,
721
765
  previous_response_id: str | None,
766
+ conversation_id: str | None,
722
767
  session: Session | None,
723
768
  ):
724
769
  if streamed_result.trace:
@@ -802,6 +847,7 @@ class AgentRunner:
802
847
  tool_use_tracker,
803
848
  all_tools,
804
849
  previous_response_id,
850
+ conversation_id,
805
851
  )
806
852
  should_run_agent_start_hooks = False
807
853
 
@@ -904,7 +950,10 @@ class AgentRunner:
904
950
  tool_use_tracker: AgentToolUseTracker,
905
951
  all_tools: list[Tool],
906
952
  previous_response_id: str | None,
953
+ conversation_id: str | None,
907
954
  ) -> SingleStepResult:
955
+ emitted_tool_call_ids: set[str] = set()
956
+
908
957
  if should_run_agent_start_hooks:
909
958
  await asyncio.gather(
910
959
  hooks.on_agent_start(context_wrapper, agent),
@@ -935,6 +984,7 @@ class AgentRunner:
935
984
  input = ItemHelpers.input_to_new_input_list(streamed_result.input)
936
985
  input.extend([item.to_input_item() for item in streamed_result.new_items])
937
986
 
987
+ # THIS IS THE RESOLVED CONFLICT BLOCK
938
988
  filtered = await cls._maybe_filter_model_input(
939
989
  agent=agent,
940
990
  run_config=run_config,
@@ -943,6 +993,12 @@ class AgentRunner:
943
993
  system_instructions=system_prompt,
944
994
  )
945
995
 
996
+ # Call hook just before the model is invoked, with the correct system_prompt.
997
+ if agent.hooks:
998
+ await agent.hooks.on_llm_start(
999
+ context_wrapper, agent, filtered.instructions, filtered.input
1000
+ )
1001
+
946
1002
  # 1. Stream the output events
947
1003
  async for event in model.stream_response(
948
1004
  filtered.instructions,
@@ -955,6 +1011,7 @@ class AgentRunner:
955
1011
  run_config.tracing_disabled, run_config.trace_include_sensitive_data
956
1012
  ),
957
1013
  previous_response_id=previous_response_id,
1014
+ conversation_id=conversation_id,
958
1015
  prompt=prompt_config,
959
1016
  ):
960
1017
  if isinstance(event, ResponseCompletedEvent):
@@ -977,16 +1034,40 @@ class AgentRunner:
977
1034
  )
978
1035
  context_wrapper.usage.add(usage)
979
1036
 
1037
+ if isinstance(event, ResponseOutputItemAddedEvent):
1038
+ output_item = event.item
1039
+
1040
+ if isinstance(output_item, _TOOL_CALL_TYPES):
1041
+ call_id: str | None = getattr(
1042
+ output_item, "call_id", getattr(output_item, "id", None)
1043
+ )
1044
+
1045
+ if call_id and call_id not in emitted_tool_call_ids:
1046
+ emitted_tool_call_ids.add(call_id)
1047
+
1048
+ tool_item = ToolCallItem(
1049
+ raw_item=cast(ToolCallItemTypes, output_item),
1050
+ agent=agent,
1051
+ )
1052
+ streamed_result._event_queue.put_nowait(
1053
+ RunItemStreamEvent(item=tool_item, name="tool_called")
1054
+ )
1055
+
980
1056
  streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
981
1057
 
1058
+ # Call hook just after the model response is finalized.
1059
+ if agent.hooks and final_response is not None:
1060
+ await agent.hooks.on_llm_end(context_wrapper, agent, final_response)
1061
+
982
1062
  # 2. At this point, the streaming is complete for this turn of the agent loop.
983
1063
  if not final_response:
984
1064
  raise ModelBehaviorError("Model did not produce a final response!")
985
1065
 
986
1066
  # 3. Now, we can process the turn as we do in the non-streaming case
987
- return await cls._get_single_step_result_from_streamed_response(
1067
+ single_step_result = await cls._get_single_step_result_from_response(
988
1068
  agent=agent,
989
- streamed_result=streamed_result,
1069
+ original_input=streamed_result.input,
1070
+ pre_step_items=streamed_result.new_items,
990
1071
  new_response=final_response,
991
1072
  output_schema=output_schema,
992
1073
  all_tools=all_tools,
@@ -997,6 +1078,34 @@ class AgentRunner:
997
1078
  tool_use_tracker=tool_use_tracker,
998
1079
  )
999
1080
 
1081
+ if emitted_tool_call_ids:
1082
+ import dataclasses as _dc
1083
+
1084
+ filtered_items = [
1085
+ item
1086
+ for item in single_step_result.new_step_items
1087
+ if not (
1088
+ isinstance(item, ToolCallItem)
1089
+ and (
1090
+ call_id := getattr(
1091
+ item.raw_item, "call_id", getattr(item.raw_item, "id", None)
1092
+ )
1093
+ )
1094
+ and call_id in emitted_tool_call_ids
1095
+ )
1096
+ ]
1097
+
1098
+ single_step_result_filtered = _dc.replace(
1099
+ single_step_result, new_step_items=filtered_items
1100
+ )
1101
+
1102
+ RunImpl.stream_step_result_to_queue(
1103
+ single_step_result_filtered, streamed_result._event_queue
1104
+ )
1105
+ else:
1106
+ RunImpl.stream_step_result_to_queue(single_step_result, streamed_result._event_queue)
1107
+ return single_step_result
1108
+
1000
1109
  @classmethod
1001
1110
  async def _run_single_turn(
1002
1111
  cls,
@@ -1011,6 +1120,7 @@ class AgentRunner:
1011
1120
  should_run_agent_start_hooks: bool,
1012
1121
  tool_use_tracker: AgentToolUseTracker,
1013
1122
  previous_response_id: str | None,
1123
+ conversation_id: str | None,
1014
1124
  ) -> SingleStepResult:
1015
1125
  # Ensure we run the hooks before anything else
1016
1126
  if should_run_agent_start_hooks:
@@ -1044,6 +1154,7 @@ class AgentRunner:
1044
1154
  run_config,
1045
1155
  tool_use_tracker,
1046
1156
  previous_response_id,
1157
+ conversation_id,
1047
1158
  prompt_config,
1048
1159
  )
1049
1160
 
@@ -1238,6 +1349,7 @@ class AgentRunner:
1238
1349
  run_config: RunConfig,
1239
1350
  tool_use_tracker: AgentToolUseTracker,
1240
1351
  previous_response_id: str | None,
1352
+ conversation_id: str | None,
1241
1353
  prompt_config: ResponsePromptParam | None,
1242
1354
  ) -> ModelResponse:
1243
1355
  # Allow user to modify model input right before the call, if configured
@@ -1252,6 +1364,14 @@ class AgentRunner:
1252
1364
  model = cls._get_model(agent, run_config)
1253
1365
  model_settings = agent.model_settings.resolve(run_config.model_settings)
1254
1366
  model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
1367
+ # If the agent has hooks, we need to call them before and after the LLM call
1368
+ if agent.hooks:
1369
+ await agent.hooks.on_llm_start(
1370
+ context_wrapper,
1371
+ agent,
1372
+ filtered.instructions, # Use filtered instructions
1373
+ filtered.input, # Use filtered input
1374
+ )
1255
1375
 
1256
1376
  new_response = await model.get_response(
1257
1377
  system_instructions=filtered.instructions,
@@ -1264,8 +1384,12 @@ class AgentRunner:
1264
1384
  run_config.tracing_disabled, run_config.trace_include_sensitive_data
1265
1385
  ),
1266
1386
  previous_response_id=previous_response_id,
1387
+ conversation_id=conversation_id,
1267
1388
  prompt=prompt_config,
1268
1389
  )
1390
+ # If the agent has hooks, we need to call them after the LLM call
1391
+ if agent.hooks:
1392
+ await agent.hooks.on_llm_end(context_wrapper, agent, new_response)
1269
1393
 
1270
1394
  context_wrapper.usage.add(new_response.usage)
1271
1395
 
@@ -1375,6 +1499,7 @@ class AgentRunner:
1375
1499
 
1376
1500
 
1377
1501
  DEFAULT_AGENT_RUNNER = AgentRunner()
1502
+ _TOOL_CALL_TYPES: tuple[type, ...] = get_args(ToolCallItemTypes)
1378
1503
 
1379
1504
 
1380
1505
  def _copy_str_or_list(input: str | list[TResponseInputItem]) -> str | list[TResponseInputItem]:
agents/tool.py CHANGED
@@ -12,6 +12,7 @@ from openai.types.responses.response_computer_tool_call import (
12
12
  ResponseComputerToolCall,
13
13
  )
14
14
  from openai.types.responses.response_output_item import LocalShellCall, McpApprovalRequest
15
+ from openai.types.responses.tool import WebSearchToolFilters
15
16
  from openai.types.responses.tool_param import CodeInterpreter, ImageGeneration, Mcp
16
17
  from openai.types.responses.web_search_tool_param import UserLocation
17
18
  from pydantic import ValidationError
@@ -133,6 +134,9 @@ class WebSearchTool:
133
134
  user_location: UserLocation | None = None
134
135
  """Optional location for the search. Lets you customize results to be relevant to a location."""
135
136
 
137
+ filters: WebSearchToolFilters | None = None
138
+ """A filter to apply based on file attributes."""
139
+
136
140
  search_context_size: Literal["low", "medium", "high"] = "medium"
137
141
  """The amount of context to use for the search."""
138
142
 
@@ -264,7 +268,11 @@ LocalShellExecutor = Callable[[LocalShellCommandRequest], MaybeAwaitable[str]]
264
268
 
265
269
  @dataclass
266
270
  class LocalShellTool:
267
- """A tool that allows the LLM to execute commands on a shell."""
271
+ """A tool that allows the LLM to execute commands on a shell.
272
+
273
+ For more details, see:
274
+ https://platform.openai.com/docs/guides/tools-local-shell
275
+ """
268
276
 
269
277
  executor: LocalShellExecutor
270
278
  """A function that executes a command on a shell."""
@@ -70,8 +70,8 @@ class BackendSpanExporter(TracingExporter):
70
70
  client.
71
71
  """
72
72
  # Clear the cached property if it exists
73
- if 'api_key' in self.__dict__:
74
- del self.__dict__['api_key']
73
+ if "api_key" in self.__dict__:
74
+ del self.__dict__["api_key"]
75
75
 
76
76
  # Update the private attribute
77
77
  self._api_key = api_key
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.2.8
3
+ Version: 0.2.10
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -21,7 +21,7 @@ Classifier: Typing :: Typed
21
21
  Requires-Python: >=3.9
22
22
  Requires-Dist: griffe<2,>=1.5.6
23
23
  Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
24
- Requires-Dist: openai<2,>=1.99.6
24
+ Requires-Dist: openai<2,>=1.102.0
25
25
  Requires-Dist: pydantic<3,>=2.10
26
26
  Requires-Dist: requests<3,>=2.0
27
27
  Requires-Dist: types-requests<3,>=2.0
@@ -30,6 +30,9 @@ Provides-Extra: litellm
30
30
  Requires-Dist: litellm<2,>=1.67.4.post1; extra == 'litellm'
31
31
  Provides-Extra: realtime
32
32
  Requires-Dist: websockets<16,>=15.0; extra == 'realtime'
33
+ Provides-Extra: sqlalchemy
34
+ Requires-Dist: asyncpg>=0.29.0; extra == 'sqlalchemy'
35
+ Requires-Dist: sqlalchemy>=2.0; extra == 'sqlalchemy'
33
36
  Provides-Extra: viz
34
37
  Requires-Dist: graphviz>=0.17; extra == 'viz'
35
38
  Provides-Extra: voice
@@ -58,29 +61,28 @@ Explore the [examples](examples) directory to see the SDK in action, and read ou
58
61
 
59
62
  ## Get started
60
63
 
61
- 1. Set up your Python environment
64
+ To get started, set up your Python environment (Python 3.9 or newer required), and then install OpenAI Agents SDK package.
62
65
 
63
- - Option A: Using venv (traditional method)
66
+ ### venv
64
67
 
65
68
  ```bash
66
- python -m venv env
67
- source env/bin/activate # On Windows: env\Scripts\activate
69
+ python -m venv .venv
70
+ source .venv/bin/activate # On Windows: .venv\Scripts\activate
71
+ pip install openai-agents
68
72
  ```
69
73
 
70
- - Option B: Using uv (recommended)
74
+ For voice support, install with the optional `voice` group: `pip install 'openai-agents[voice]'`.
71
75
 
72
- ```bash
73
- uv venv
74
- source .venv/bin/activate # On Windows: .venv\Scripts\activate
75
- ```
76
+ ### uv
76
77
 
77
- 2. Install Agents SDK
78
+ If you're familiar with [uv](https://docs.astral.sh/uv/), using the tool would be even similar:
78
79
 
79
80
  ```bash
80
- pip install openai-agents
81
+ uv init
82
+ uv add openai-agents
81
83
  ```
82
84
 
83
- For voice support, install with the optional `voice` group: `pip install 'openai-agents[voice]'`.
85
+ For voice support, install with the optional `voice` group: `uv add 'openai-agents[voice]'`.
84
86
 
85
87
  ## Hello world example
86
88
 
@@ -1,27 +1,27 @@
1
- agents/__init__.py,sha256=YXcfllpLrUjafU_5KwIZvVEdUzcjZYhatqCS5tb03UQ,7908
1
+ agents/__init__.py,sha256=Kr6-8HItTfnz5HFS9x7PVD99v_Lu9VxlE27UjX3BH8M,8000
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
- agents/_run_impl.py,sha256=8Bc8YIHzv8Qf40tUAcHV5qqUkGSUxSraNkV0Y5xLFFQ,44894
5
- agents/agent.py,sha256=jn_nV38eVLK3QYh7dUmKO1AocQOCCPaHEERaSVt0l8g,17574
4
+ agents/_run_impl.py,sha256=cEGeEQDc3NCNGwwfpKWnTCXgxyznSmWgDpQkRq-6JgM,46461
5
+ agents/agent.py,sha256=IINVHZyO5iFTN3rf94YB9Hv3hUIOouVUFt9cagSJwvQ,19120
6
6
  agents/agent_output.py,sha256=teTFK8unUN3esXhmEBO0bQGYQm1Axd5rYleDt9TFDgw,7153
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
9
- agents/function_schema.py,sha256=yZ3PEOmfy836Me_W4QlItMeFq2j4BtpuI2FmQswbIcQ,13590
9
+ agents/function_schema.py,sha256=jXdpjl90lODRzdoOR_kUmEbfA3T8Dfa7kkSV8xWQDDo,13558
10
10
  agents/guardrail.py,sha256=7P-kd9rKPhgB8rtI31MCV5ho4ZrEaNCQxHvE8IK3EOk,9582
11
- agents/handoffs.py,sha256=31-rQ-iMWlWNd93ivgTTSMGkqlariXrNfWI_udMWt7s,11409
11
+ agents/handoffs.py,sha256=kDTM3nj3E_0khiJPMJAIN00gektMTRNbaYSbc5ZCnBM,11411
12
12
  agents/items.py,sha256=aHo7KTXZLBcHSrKHWDaBB6L7XmBCAIekG5e0xOIhkyM,9828
13
- agents/lifecycle.py,sha256=sJwESHBHbml7rSYH360-P6x1bLyENcQWm4bT4rQcbuo,3129
13
+ agents/lifecycle.py,sha256=hGsqzumOSaal6oAjTqTfvBXl-ShAOkC42sthJigB5Fg,4308
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
- agents/model_settings.py,sha256=7zGEGxfXtRHlst9qYngYJc5mkr2l_mi5YuQDGiQ-qXM,6485
15
+ agents/model_settings.py,sha256=7Ul-Xg-aNVXIbK6V4Rm2t5EEfNR0tsy_A9ac_wFqLLk,6828
16
16
  agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
17
17
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
18
- agents/repl.py,sha256=FKZlkGfw6QxItTkjFkCAQwXuV_pn69DIamGd3PiKQFk,2361
18
+ agents/repl.py,sha256=NX0BE5YDnmGQ2rdQsmLm3CKkQZ5m4GC95xXmUsAXJVs,2539
19
19
  agents/result.py,sha256=YCGYHoc5X1_vLKu5QiK6F8C1ZXI3tTfLXaZoqbYgUMA,10753
20
- agents/run.py,sha256=lPp-nZWKA0gg0E2ace94zwFo-FmQK7Fj_cXcrvAoFlQ,55978
20
+ agents/run.py,sha256=sEqyIa5MAbOFOmDv5qp2BrJN8TizwiyiSo_q19Ca2gc,61298
21
21
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
22
22
  agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
23
23
  agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
24
- agents/tool.py,sha256=CWjwssw4TSnvvQaxo42mUkA2Y5sZzM_h3QTq8zJwIRs,16750
24
+ agents/tool.py,sha256=jOmz-EMmROCERdGgOQaJyvY6ZyyNz93ByBc4y-loisQ,17009
25
25
  agents/tool_context.py,sha256=lbnctijZeanXAThddkklF7vDrXK1Ie2_wx6JZPCOihI,1434
26
26
  agents/usage.py,sha256=Tb5udGd3DPgD0JBdRD8fDctTE4M-zKML5uRn8ZG1yBc,1675
27
27
  agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
@@ -29,45 +29,50 @@ agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
29
29
  agents/extensions/handoff_filters.py,sha256=Bzkjb1SmIHoibgO26oesNO2Qdx2avfDGkHrSTb-XAr0,2029
30
30
  agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
31
31
  agents/extensions/visualization.py,sha256=sf9D_C-HMwkbWdZccTZvvMPRy_NSiwbm48tRJlESQBI,5144
32
+ agents/extensions/memory/__init__.py,sha256=Yionp3G3pj53zenHPZUHhR9aIDVEpu0d_PcvdytBRes,534
33
+ agents/extensions/memory/sqlalchemy_session.py,sha256=H0aykdB4lUikmzKgwWQqI1PSYZBvHA4TDnaj9rP4HDI,11583
32
34
  agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
- agents/extensions/models/litellm_model.py,sha256=4m6MVYaa-pJzXuBNRZGv0vw2R73R32B0EAZ1kXanVVw,15692
34
- agents/extensions/models/litellm_provider.py,sha256=wTm00Anq8YoNb9AnyT0JOunDG-HCDm_98ORNy7aNJdw,928
35
+ agents/extensions/models/litellm_model.py,sha256=cDLOcZVojLx-CZbipVJ5dn4RnXdegrXSPROVR-5Lxdc,15883
36
+ agents/extensions/models/litellm_provider.py,sha256=ZHgh1nMoEvA7NpawkzLh3JDuDFtwXUV94Rs7UrwWqAk,1083
35
37
  agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
36
- agents/mcp/server.py,sha256=mTXQL4om5oA2fYevk63SUlwDri-RcUleUH_4hFrA0QM,24266
38
+ agents/mcp/server.py,sha256=4T58xiWCLiCm6JoUy_3jYWz5A8ZNsHiV1hIxjahoedU,26624
37
39
  agents/mcp/util.py,sha256=YVdPst1wWkTwbeshs-FYbr_MtrYJwO_4NzhSwj5aE5c,8239
38
- agents/memory/__init__.py,sha256=bo2Rb3PqwSCo9PhBVVJOjvjMM1TfytuDPAFEDADYwwA,84
39
- agents/memory/session.py,sha256=9RQ1I7qGh_9DzsyUd9srSPrxRBlw7jks-67NxYqKvvs,13060
40
- agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
40
+ agents/memory/__init__.py,sha256=H6Nh01MRlinnEFb2Ca0ezqerIzAHuJ1izeXeYhkPrUM,255
41
+ agents/memory/openai_conversations_session.py,sha256=Xiod8qk-qX4-348FvLLIG35fy0m_JiQkNqNF_woc_a8,3341
42
+ agents/memory/session.py,sha256=pyyFn3r07ydhoy7vn1e_ky9OUa1a_7DKoEo8aZz2d3s,3038
43
+ agents/memory/sqlite_session.py,sha256=6HGzSL70mQgutITIPZUC2x2Qtj6U4hXiZTceu3Da7TM,10130
44
+ agents/models/__init__.py,sha256=E0XVqWayVAsFqxucDLBW30siaqfNQsVrAnfidG_C3ok,287
41
45
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
42
- agents/models/chatcmpl_converter.py,sha256=fdA-4_O7GabTCFZJOrtI6TdxFvjS4Bn4vf2RwVC9yNA,20012
46
+ agents/models/chatcmpl_converter.py,sha256=fZHui5V0KwTr27L_Io-4iQxPXr0ZoEMOv1_kJNxW-y8,20320
43
47
  agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
44
- agents/models/chatcmpl_stream_handler.py,sha256=XUoMnNEcSqK6IRMI6GPH8CwMCXi6NhbfHfpCY3SXJOM,24124
48
+ agents/models/chatcmpl_stream_handler.py,sha256=LEaQ6N9AgzOSczo3fTrv-ckkb1guBGiuNeHBAaoinCE,27429
49
+ agents/models/default_models.py,sha256=mlvBePn8H4UkHo7lN-wh7A3k2ciLgBUFKpROQxzdTfs,2098
45
50
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
46
- agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
51
+ agents/models/interface.py,sha256=-AFUHC8iRuGZmtQwguDw4s-M4OPL2y2mct4TAmWvVrU,4057
47
52
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
48
- agents/models/openai_chatcompletions.py,sha256=lJJZCdWiZ0jTUp77OD1Zs6tSLZ7k8v1j_D2gB2Nw12Y,13179
49
- agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
50
- agents/models/openai_responses.py,sha256=BnlN9hH6J4LKWBuM0lDfhvRgAb8IjQJuk5Hfd3OJ8G0,17330
53
+ agents/models/openai_chatcompletions.py,sha256=abwQj8CqUHa2KB4NC_DVeJhZOygyAnr4N4maZtYhchU,13321
54
+ agents/models/openai_provider.py,sha256=vBu3mlgDBrI_cZVVmfnWBHoPlJlsmld3lfdX8sNQQAM,3624
55
+ agents/models/openai_responses.py,sha256=XFR8gl25V4ZiyCIqExU3Xk4Lxtxh-afcFSuVJCeYwII,18255
51
56
  agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
52
57
  agents/realtime/__init__.py,sha256=7qvzK8QJuHRnPHxDgDj21v8-lnSN4Uurg9znwJv_Tqg,4923
53
58
  agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfPP54d78,1765
54
59
  agents/realtime/_util.py,sha256=uawurhWKi3_twNFcZ5Yn1mVvv0RKl4IoyCSag8hGxrE,313
55
60
  agents/realtime/agent.py,sha256=yZDgycnLFtJcfl7UHak5GEyL2vdBGxegfqEiuuzGPEk,4027
56
- agents/realtime/config.py,sha256=FMLT2BdxjOCHmBnvd35sZk68U4jEXypngMRAPkm-irk,5828
57
- agents/realtime/events.py,sha256=YnyXmkc2rkIAcCDoW5yxylMYeXeaq_QTlyRR5u5VsaM,5534
61
+ agents/realtime/config.py,sha256=49ZsKY9ySBFRfiL3RGWW1aVNhahzmoNATb3Buj2npJk,5963
62
+ agents/realtime/events.py,sha256=eANiNNyYlp_1Ybdl-MOwXRVTDtrK9hfgn6iw0xNxnaY,5889
58
63
  agents/realtime/handoffs.py,sha256=avLFix5kEutel57IRcddssGiVHzGptOzWL9OqPaLVh8,6702
59
64
  agents/realtime/items.py,sha256=psT6AH65qmngmPsgwk6CXacVo5tEDYq0Za3EitHFpTA,5052
60
65
  agents/realtime/model.py,sha256=RJBA8-Dkd2JTqGzbKacoX4dN_qTWn_p7npL73To3ymw,6143
61
- agents/realtime/model_events.py,sha256=X7UrUU_g4u5gWaf2mUesJJ-Ik1Z1QE0Z-ZP7kDmX1t0,4034
66
+ agents/realtime/model_events.py,sha256=YixBKmzlCrhtzCosj0SysyZpyHbZ90455gDr4Kr7Ey8,4338
62
67
  agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
63
- agents/realtime/openai_realtime.py,sha256=20yHhG-KAGeXY0M0ucty0wpRqXSUVLvoL5cs663NGrI,30201
68
+ agents/realtime/openai_realtime.py,sha256=cVOxBYNwqn38WTSVK-A2E2pHrw0bGSmren2q7670pPU,31361
64
69
  agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
65
- agents/realtime/session.py,sha256=aGifsl_4LYGZBwXneaOo-H_46fdQs-CAtQD6DmJY2Uo,26560
70
+ agents/realtime/session.py,sha256=hPIxQSsVh5whkgYnEpxk_AgvG3suuDVnpPyqVoPJBRM,26822
66
71
  agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
67
72
  agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
68
73
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
69
74
  agents/tracing/processor_interface.py,sha256=e1mWcIAoQFHID1BapcrAZ6MxZg98bPVYgbOPclVoCXc,1660
70
- agents/tracing/processors.py,sha256=IKZ_dfQmcs8OaMqNbzWRtimY4nm1xfNRjVguWl6I8SY,11432
75
+ agents/tracing/processors.py,sha256=yAGJ2aIlhU5kvdDLBx1R9_Qsbuq3296b0743GYbSnYM,11432
71
76
  agents/tracing/provider.py,sha256=a8bOZtBUih13Gjq8OtyIcx3AWJmCErc43gqPrccx_5k,10098
72
77
  agents/tracing/scope.py,sha256=u17_m8RPpGvbHrTkaO_kDi5ROBWhfOAIgBe7suiaRD4,1445
73
78
  agents/tracing/setup.py,sha256=2h9TH1GAKcXKM1U99dOKKR3XlHp8JKzh2JG3DQPKyhY,612
@@ -97,7 +102,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
97
102
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
98
103
  agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
99
104
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
100
- openai_agents-0.2.8.dist-info/METADATA,sha256=MqNemwQZlvUlnyqVHU1Bxor4liYOsBa4VcE_UJllEas,12104
101
- openai_agents-0.2.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
- openai_agents-0.2.8.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
103
- openai_agents-0.2.8.dist-info/RECORD,,
105
+ openai_agents-0.2.10.dist-info/METADATA,sha256=1imjcLQahGjZnwblAnDOdGVUJBXcLkG-3IG1xYlAEfw,12381
106
+ openai_agents-0.2.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
107
+ openai_agents-0.2.10.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
108
+ openai_agents-0.2.10.dist-info/RECORD,,