openai-agents 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (39) hide show
  1. agents/__init__.py +5 -1
  2. agents/_run_impl.py +5 -1
  3. agents/agent.py +62 -30
  4. agents/agent_output.py +2 -2
  5. agents/function_schema.py +11 -1
  6. agents/guardrail.py +5 -1
  7. agents/handoffs.py +32 -14
  8. agents/lifecycle.py +26 -17
  9. agents/mcp/server.py +82 -11
  10. agents/mcp/util.py +16 -9
  11. agents/memory/__init__.py +3 -0
  12. agents/memory/session.py +369 -0
  13. agents/model_settings.py +15 -7
  14. agents/models/chatcmpl_converter.py +20 -3
  15. agents/models/chatcmpl_stream_handler.py +134 -43
  16. agents/models/openai_responses.py +12 -5
  17. agents/realtime/README.md +3 -0
  18. agents/realtime/__init__.py +177 -0
  19. agents/realtime/agent.py +89 -0
  20. agents/realtime/config.py +188 -0
  21. agents/realtime/events.py +216 -0
  22. agents/realtime/handoffs.py +165 -0
  23. agents/realtime/items.py +184 -0
  24. agents/realtime/model.py +69 -0
  25. agents/realtime/model_events.py +159 -0
  26. agents/realtime/model_inputs.py +100 -0
  27. agents/realtime/openai_realtime.py +670 -0
  28. agents/realtime/runner.py +118 -0
  29. agents/realtime/session.py +535 -0
  30. agents/run.py +106 -4
  31. agents/tool.py +6 -7
  32. agents/tool_context.py +16 -3
  33. agents/voice/models/openai_stt.py +1 -1
  34. agents/voice/pipeline.py +6 -0
  35. agents/voice/workflow.py +8 -0
  36. {openai_agents-0.1.0.dist-info → openai_agents-0.2.1.dist-info}/METADATA +121 -4
  37. {openai_agents-0.1.0.dist-info → openai_agents-0.2.1.dist-info}/RECORD +39 -24
  38. {openai_agents-0.1.0.dist-info → openai_agents-0.2.1.dist-info}/WHEEL +0 -0
  39. {openai_agents-0.1.0.dist-info → openai_agents-0.2.1.dist-info}/licenses/LICENSE +0 -0
agents/run.py CHANGED
@@ -32,6 +32,7 @@ from .exceptions import (
32
32
  ModelBehaviorError,
33
33
  OutputGuardrailTripwireTriggered,
34
34
  RunErrorDetails,
35
+ UserError,
35
36
  )
36
37
  from .guardrail import (
37
38
  InputGuardrail,
@@ -43,6 +44,7 @@ from .handoffs import Handoff, HandoffInputFilter, handoff
43
44
  from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
44
45
  from .lifecycle import RunHooks
45
46
  from .logger import logger
47
+ from .memory import Session
46
48
  from .model_settings import ModelSettings
47
49
  from .models.interface import Model, ModelProvider
48
50
  from .models.multi_provider import MultiProvider
@@ -156,6 +158,9 @@ class RunOptions(TypedDict, Generic[TContext]):
156
158
  previous_response_id: NotRequired[str | None]
157
159
  """The ID of the previous response, if any."""
158
160
 
161
+ session: NotRequired[Session | None]
162
+ """The session for the run."""
163
+
159
164
 
160
165
  class Runner:
161
166
  @classmethod
@@ -169,6 +174,7 @@ class Runner:
169
174
  hooks: RunHooks[TContext] | None = None,
170
175
  run_config: RunConfig | None = None,
171
176
  previous_response_id: str | None = None,
177
+ session: Session | None = None,
172
178
  ) -> RunResult:
173
179
  """Run a workflow starting at the given agent. The agent will run in a loop until a final
174
180
  output is generated. The loop runs like so:
@@ -205,6 +211,7 @@ class Runner:
205
211
  hooks=hooks,
206
212
  run_config=run_config,
207
213
  previous_response_id=previous_response_id,
214
+ session=session,
208
215
  )
209
216
 
210
217
  @classmethod
@@ -218,6 +225,7 @@ class Runner:
218
225
  hooks: RunHooks[TContext] | None = None,
219
226
  run_config: RunConfig | None = None,
220
227
  previous_response_id: str | None = None,
228
+ session: Session | None = None,
221
229
  ) -> RunResult:
222
230
  """Run a workflow synchronously, starting at the given agent. Note that this just wraps the
223
231
  `run` method, so it will not work if there's already an event loop (e.g. inside an async
@@ -257,6 +265,7 @@ class Runner:
257
265
  hooks=hooks,
258
266
  run_config=run_config,
259
267
  previous_response_id=previous_response_id,
268
+ session=session,
260
269
  )
261
270
 
262
271
  @classmethod
@@ -269,6 +278,7 @@ class Runner:
269
278
  hooks: RunHooks[TContext] | None = None,
270
279
  run_config: RunConfig | None = None,
271
280
  previous_response_id: str | None = None,
281
+ session: Session | None = None,
272
282
  ) -> RunResultStreaming:
273
283
  """Run a workflow starting at the given agent in streaming mode. The returned result object
274
284
  contains a method you can use to stream semantic events as they are generated.
@@ -305,6 +315,7 @@ class Runner:
305
315
  hooks=hooks,
306
316
  run_config=run_config,
307
317
  previous_response_id=previous_response_id,
318
+ session=session,
308
319
  )
309
320
 
310
321
 
@@ -325,11 +336,15 @@ class AgentRunner:
325
336
  hooks = kwargs.get("hooks")
326
337
  run_config = kwargs.get("run_config")
327
338
  previous_response_id = kwargs.get("previous_response_id")
339
+ session = kwargs.get("session")
328
340
  if hooks is None:
329
341
  hooks = RunHooks[Any]()
330
342
  if run_config is None:
331
343
  run_config = RunConfig()
332
344
 
345
+ # Prepare input with session if enabled
346
+ prepared_input = await self._prepare_input_with_session(input, session)
347
+
333
348
  tool_use_tracker = AgentToolUseTracker()
334
349
 
335
350
  with TraceCtxManager(
@@ -340,7 +355,7 @@ class AgentRunner:
340
355
  disabled=run_config.tracing_disabled,
341
356
  ):
342
357
  current_turn = 0
343
- original_input: str | list[TResponseInputItem] = copy.deepcopy(input)
358
+ original_input: str | list[TResponseInputItem] = copy.deepcopy(prepared_input)
344
359
  generated_items: list[RunItem] = []
345
360
  model_responses: list[ModelResponse] = []
346
361
 
@@ -399,7 +414,7 @@ class AgentRunner:
399
414
  starting_agent,
400
415
  starting_agent.input_guardrails
401
416
  + (run_config.input_guardrails or []),
402
- copy.deepcopy(input),
417
+ copy.deepcopy(prepared_input),
403
418
  context_wrapper,
404
419
  ),
405
420
  self._run_single_turn(
@@ -441,7 +456,7 @@ class AgentRunner:
441
456
  turn_result.next_step.output,
442
457
  context_wrapper,
443
458
  )
444
- return RunResult(
459
+ result = RunResult(
445
460
  input=original_input,
446
461
  new_items=generated_items,
447
462
  raw_responses=model_responses,
@@ -451,6 +466,11 @@ class AgentRunner:
451
466
  output_guardrail_results=output_guardrail_results,
452
467
  context_wrapper=context_wrapper,
453
468
  )
469
+
470
+ # Save the conversation to session if enabled
471
+ await self._save_result_to_session(session, input, result)
472
+
473
+ return result
454
474
  elif isinstance(turn_result.next_step, NextStepHandoff):
455
475
  current_agent = cast(Agent[TContext], turn_result.next_step.new_agent)
456
476
  current_span.finish(reset_current=True)
@@ -488,10 +508,13 @@ class AgentRunner:
488
508
  hooks = kwargs.get("hooks")
489
509
  run_config = kwargs.get("run_config")
490
510
  previous_response_id = kwargs.get("previous_response_id")
511
+ session = kwargs.get("session")
512
+
491
513
  return asyncio.get_event_loop().run_until_complete(
492
514
  self.run(
493
515
  starting_agent,
494
516
  input,
517
+ session=session,
495
518
  context=context,
496
519
  max_turns=max_turns,
497
520
  hooks=hooks,
@@ -511,6 +534,8 @@ class AgentRunner:
511
534
  hooks = kwargs.get("hooks")
512
535
  run_config = kwargs.get("run_config")
513
536
  previous_response_id = kwargs.get("previous_response_id")
537
+ session = kwargs.get("session")
538
+
514
539
  if hooks is None:
515
540
  hooks = RunHooks[Any]()
516
541
  if run_config is None:
@@ -563,6 +588,7 @@ class AgentRunner:
563
588
  context_wrapper=context_wrapper,
564
589
  run_config=run_config,
565
590
  previous_response_id=previous_response_id,
591
+ session=session,
566
592
  )
567
593
  )
568
594
  return streamed_result
@@ -621,6 +647,7 @@ class AgentRunner:
621
647
  context_wrapper: RunContextWrapper[TContext],
622
648
  run_config: RunConfig,
623
649
  previous_response_id: str | None,
650
+ session: Session | None,
624
651
  ):
625
652
  if streamed_result.trace:
626
653
  streamed_result.trace.start(mark_as_current=True)
@@ -634,6 +661,12 @@ class AgentRunner:
634
661
  streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent))
635
662
 
636
663
  try:
664
+ # Prepare input with session if enabled
665
+ prepared_input = await AgentRunner._prepare_input_with_session(starting_input, session)
666
+
667
+ # Update the streamed result with the prepared input
668
+ streamed_result.input = prepared_input
669
+
637
670
  while True:
638
671
  if streamed_result.is_complete:
639
672
  break
@@ -680,7 +713,7 @@ class AgentRunner:
680
713
  cls._run_input_guardrails_with_queue(
681
714
  starting_agent,
682
715
  starting_agent.input_guardrails + (run_config.input_guardrails or []),
683
- copy.deepcopy(ItemHelpers.input_to_new_input_list(starting_input)),
716
+ copy.deepcopy(ItemHelpers.input_to_new_input_list(prepared_input)),
684
717
  context_wrapper,
685
718
  streamed_result,
686
719
  current_span,
@@ -734,6 +767,23 @@ class AgentRunner:
734
767
  streamed_result.output_guardrail_results = output_guardrail_results
735
768
  streamed_result.final_output = turn_result.next_step.output
736
769
  streamed_result.is_complete = True
770
+
771
+ # Save the conversation to session if enabled
772
+ # Create a temporary RunResult for session saving
773
+ temp_result = RunResult(
774
+ input=streamed_result.input,
775
+ new_items=streamed_result.new_items,
776
+ raw_responses=streamed_result.raw_responses,
777
+ final_output=streamed_result.final_output,
778
+ _last_agent=current_agent,
779
+ input_guardrail_results=streamed_result.input_guardrail_results,
780
+ output_guardrail_results=streamed_result.output_guardrail_results,
781
+ context_wrapper=context_wrapper,
782
+ )
783
+ await AgentRunner._save_result_to_session(
784
+ session, starting_input, temp_result
785
+ )
786
+
737
787
  streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
738
788
  elif isinstance(turn_result.next_step, NextStepRunAgain):
739
789
  pass
@@ -1136,5 +1186,57 @@ class AgentRunner:
1136
1186
 
1137
1187
  return run_config.model_provider.get_model(agent.model)
1138
1188
 
1189
+ @classmethod
1190
+ async def _prepare_input_with_session(
1191
+ cls,
1192
+ input: str | list[TResponseInputItem],
1193
+ session: Session | None,
1194
+ ) -> str | list[TResponseInputItem]:
1195
+ """Prepare input by combining it with session history if enabled."""
1196
+ if session is None:
1197
+ return input
1198
+
1199
+ # Validate that we don't have both a session and a list input, as this creates
1200
+ # ambiguity about whether the list should append to or replace existing session history
1201
+ if isinstance(input, list):
1202
+ raise UserError(
1203
+ "Cannot provide both a session and a list of input items. "
1204
+ "When using session memory, provide only a string input to append to the "
1205
+ "conversation, or use session=None and provide a list to manually manage "
1206
+ "conversation history."
1207
+ )
1208
+
1209
+ # Get previous conversation history
1210
+ history = await session.get_items()
1211
+
1212
+ # Convert input to list format
1213
+ new_input_list = ItemHelpers.input_to_new_input_list(input)
1214
+
1215
+ # Combine history with new input
1216
+ combined_input = history + new_input_list
1217
+
1218
+ return combined_input
1219
+
1220
+ @classmethod
1221
+ async def _save_result_to_session(
1222
+ cls,
1223
+ session: Session | None,
1224
+ original_input: str | list[TResponseInputItem],
1225
+ result: RunResult,
1226
+ ) -> None:
1227
+ """Save the conversation turn to session."""
1228
+ if session is None:
1229
+ return
1230
+
1231
+ # Convert original input to list format if needed
1232
+ input_list = ItemHelpers.input_to_new_input_list(original_input)
1233
+
1234
+ # Convert new items to input format
1235
+ new_items_as_input = [item.to_input_item() for item in result.new_items]
1236
+
1237
+ # Save all items from this turn
1238
+ items_to_save = input_list + new_items_as_input
1239
+ await session.add_items(items_to_save)
1240
+
1139
1241
 
1140
1242
  DEFAULT_AGENT_RUNNER = AgentRunner()
agents/tool.py CHANGED
@@ -30,8 +30,7 @@ from .util import _error_tracing
30
30
  from .util._types import MaybeAwaitable
31
31
 
32
32
  if TYPE_CHECKING:
33
-
34
- from .agent import Agent
33
+ from .agent import Agent, AgentBase
35
34
 
36
35
  ToolParams = ParamSpec("ToolParams")
37
36
 
@@ -88,7 +87,7 @@ class FunctionTool:
88
87
  """Whether the JSON schema is in strict mode. We **strongly** recommend setting this to True,
89
88
  as it increases the likelihood of correct JSON input."""
90
89
 
91
- is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True
90
+ is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True
92
91
  """Whether the tool is enabled. Either a bool or a Callable that takes the run context and agent
93
92
  and returns whether the tool is enabled. You can use this to dynamically enable/disable a tool
94
93
  based on your context/state."""
@@ -201,7 +200,7 @@ MCPToolApprovalFunction = Callable[
201
200
  @dataclass
202
201
  class HostedMCPTool:
203
202
  """A tool that allows the LLM to use a remote MCP server. The LLM will automatically list and
204
- call tools, without requiring a a round trip back to your code.
203
+ call tools, without requiring a round trip back to your code.
205
204
  If you want to run MCP servers locally via stdio, in a VPC or other non-publicly-accessible
206
205
  environment, or you just prefer to run tool calls locally, then you can instead use the servers
207
206
  in `agents.mcp` and pass `Agent(mcp_servers=[...])` to the agent."""
@@ -301,7 +300,7 @@ def function_tool(
301
300
  use_docstring_info: bool = True,
302
301
  failure_error_function: ToolErrorFunction | None = None,
303
302
  strict_mode: bool = True,
304
- is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,
303
+ is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,
305
304
  ) -> FunctionTool:
306
305
  """Overload for usage as @function_tool (no parentheses)."""
307
306
  ...
@@ -316,7 +315,7 @@ def function_tool(
316
315
  use_docstring_info: bool = True,
317
316
  failure_error_function: ToolErrorFunction | None = None,
318
317
  strict_mode: bool = True,
319
- is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,
318
+ is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,
320
319
  ) -> Callable[[ToolFunction[...]], FunctionTool]:
321
320
  """Overload for usage as @function_tool(...)."""
322
321
  ...
@@ -331,7 +330,7 @@ def function_tool(
331
330
  use_docstring_info: bool = True,
332
331
  failure_error_function: ToolErrorFunction | None = default_tool_error_function,
333
332
  strict_mode: bool = True,
334
- is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,
333
+ is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,
335
334
  ) -> FunctionTool | Callable[[ToolFunction[...]], FunctionTool]:
336
335
  """
337
336
  Decorator to create a FunctionTool from a function. By default, we will:
agents/tool_context.py CHANGED
@@ -1,5 +1,7 @@
1
1
  from dataclasses import dataclass, field, fields
2
- from typing import Any
2
+ from typing import Any, Optional
3
+
4
+ from openai.types.responses import ResponseFunctionToolCall
3
5
 
4
6
  from .run_context import RunContextWrapper, TContext
5
7
 
@@ -8,16 +10,26 @@ def _assert_must_pass_tool_call_id() -> str:
8
10
  raise ValueError("tool_call_id must be passed to ToolContext")
9
11
 
10
12
 
13
+ def _assert_must_pass_tool_name() -> str:
14
+ raise ValueError("tool_name must be passed to ToolContext")
15
+
16
+
11
17
  @dataclass
12
18
  class ToolContext(RunContextWrapper[TContext]):
13
19
  """The context of a tool call."""
14
20
 
21
+ tool_name: str = field(default_factory=_assert_must_pass_tool_name)
22
+ """The name of the tool being invoked."""
23
+
15
24
  tool_call_id: str = field(default_factory=_assert_must_pass_tool_call_id)
16
25
  """The ID of the tool call."""
17
26
 
18
27
  @classmethod
19
28
  def from_agent_context(
20
- cls, context: RunContextWrapper[TContext], tool_call_id: str
29
+ cls,
30
+ context: RunContextWrapper[TContext],
31
+ tool_call_id: str,
32
+ tool_call: Optional[ResponseFunctionToolCall] = None,
21
33
  ) -> "ToolContext":
22
34
  """
23
35
  Create a ToolContext from a RunContextWrapper.
@@ -26,4 +38,5 @@ class ToolContext(RunContextWrapper[TContext]):
26
38
  base_values: dict[str, Any] = {
27
39
  f.name: getattr(context, f.name) for f in fields(RunContextWrapper) if f.init
28
40
  }
29
- return cls(tool_call_id=tool_call_id, **base_values)
41
+ tool_name = tool_call.name if tool_call is not None else _assert_must_pass_tool_name()
42
+ return cls(tool_name=tool_name, tool_call_id=tool_call_id, **base_values)
@@ -226,7 +226,7 @@ class OpenAISTTTranscriptionSession(StreamedTranscriptionSession):
226
226
  break
227
227
 
228
228
  event_type = event.get("type", "unknown")
229
- if event_type == "conversation.item.input_audio_transcription.completed":
229
+ if event_type == "input_audio_transcription_completed":
230
230
  transcript = cast(str, event.get("transcript", ""))
231
231
  if len(transcript) > 0:
232
232
  self._end_turn(transcript)
agents/voice/pipeline.py CHANGED
@@ -125,6 +125,12 @@ class VoicePipeline:
125
125
  self._get_tts_model(), self.config.tts_settings, self.config
126
126
  )
127
127
 
128
+ try:
129
+ async for intro_text in self.workflow.on_start():
130
+ await output._add_text(intro_text)
131
+ except Exception as e:
132
+ logger.warning(f"on_start() failed: {e}")
133
+
128
134
  transcription_session = await self._get_stt_model().create_session(
129
135
  audio_input,
130
136
  self.config.stt_settings,
agents/voice/workflow.py CHANGED
@@ -32,6 +32,14 @@ class VoiceWorkflowBase(abc.ABC):
32
32
  """
33
33
  pass
34
34
 
35
+ async def on_start(self) -> AsyncIterator[str]:
36
+ """
37
+ Optional method that runs before any user input is received. Can be used
38
+ to deliver a greeting or instruction via TTS. Defaults to doing nothing.
39
+ """
40
+ return
41
+ yield
42
+
35
43
 
36
44
  class VoiceWorkflowHelper:
37
45
  @classmethod
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.1.0
3
+ Version: 0.2.1
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -20,14 +20,16 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
20
  Classifier: Typing :: Typed
21
21
  Requires-Python: >=3.9
22
22
  Requires-Dist: griffe<2,>=1.5.6
23
- Requires-Dist: mcp<2,>=1.9.4; python_version >= '3.10'
24
- Requires-Dist: openai>=1.87.0
23
+ Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
24
+ Requires-Dist: openai<2,>=1.96.1
25
25
  Requires-Dist: pydantic<3,>=2.10
26
26
  Requires-Dist: requests<3,>=2.0
27
27
  Requires-Dist: types-requests<3,>=2.0
28
28
  Requires-Dist: typing-extensions<5,>=4.12.2
29
29
  Provides-Extra: litellm
30
30
  Requires-Dist: litellm<2,>=1.67.4.post1; extra == 'litellm'
31
+ Provides-Extra: realtime
32
+ Requires-Dist: websockets<16,>=15.0; extra == 'realtime'
31
33
  Provides-Extra: viz
32
34
  Requires-Dist: graphviz>=0.17; extra == 'viz'
33
35
  Provides-Extra: voice
@@ -49,10 +51,119 @@ The OpenAI Agents SDK is a lightweight yet powerful framework for building multi
49
51
  1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
50
52
  2. [**Handoffs**](https://openai.github.io/openai-agents-python/handoffs/): A specialized tool call used by the Agents SDK for transferring control between agents
51
53
  3. [**Guardrails**](https://openai.github.io/openai-agents-python/guardrails/): Configurable safety checks for input and output validation
52
- 4. [**Tracing**](https://openai.github.io/openai-agents-python/tracing/): Built-in tracking of agent runs, allowing you to view, debug and optimize your workflows
54
+ 4. [**Sessions**](#sessions): Automatic conversation history management across agent runs
55
+ 5. [**Tracing**](https://openai.github.io/openai-agents-python/tracing/): Built-in tracking of agent runs, allowing you to view, debug and optimize your workflows
53
56
 
54
57
  Explore the [examples](examples) directory to see the SDK in action, and read our [documentation](https://openai.github.io/openai-agents-python/) for more details.
55
58
 
59
+ ## Sessions
60
+
61
+ The Agents SDK provides built-in session memory to automatically maintain conversation history across multiple agent runs, eliminating the need to manually handle `.to_input_list()` between turns.
62
+
63
+ ### Quick start
64
+
65
+ ```python
66
+ from agents import Agent, Runner, SQLiteSession
67
+
68
+ # Create agent
69
+ agent = Agent(
70
+ name="Assistant",
71
+ instructions="Reply very concisely.",
72
+ )
73
+
74
+ # Create a session instance
75
+ session = SQLiteSession("conversation_123")
76
+
77
+ # First turn
78
+ result = await Runner.run(
79
+ agent,
80
+ "What city is the Golden Gate Bridge in?",
81
+ session=session
82
+ )
83
+ print(result.final_output) # "San Francisco"
84
+
85
+ # Second turn - agent automatically remembers previous context
86
+ result = await Runner.run(
87
+ agent,
88
+ "What state is it in?",
89
+ session=session
90
+ )
91
+ print(result.final_output) # "California"
92
+
93
+ # Also works with synchronous runner
94
+ result = Runner.run_sync(
95
+ agent,
96
+ "What's the population?",
97
+ session=session
98
+ )
99
+ print(result.final_output) # "Approximately 39 million"
100
+ ```
101
+
102
+ ### Session options
103
+
104
+ - **No memory** (default): No session memory when session parameter is omitted
105
+ - **`session: Session = DatabaseSession(...)`**: Use a Session instance to manage conversation history
106
+
107
+ ```python
108
+ from agents import Agent, Runner, SQLiteSession
109
+
110
+ # Custom SQLite database file
111
+ session = SQLiteSession("user_123", "conversations.db")
112
+ agent = Agent(name="Assistant")
113
+
114
+ # Different session IDs maintain separate conversation histories
115
+ result1 = await Runner.run(
116
+ agent,
117
+ "Hello",
118
+ session=session
119
+ )
120
+ result2 = await Runner.run(
121
+ agent,
122
+ "Hello",
123
+ session=SQLiteSession("user_456", "conversations.db")
124
+ )
125
+ ```
126
+
127
+ ### Custom session implementations
128
+
129
+ You can implement your own session memory by creating a class that follows the `Session` protocol:
130
+
131
+ ```python
132
+ from agents.memory import Session
133
+ from typing import List
134
+
135
+ class MyCustomSession:
136
+ """Custom session implementation following the Session protocol."""
137
+
138
+ def __init__(self, session_id: str):
139
+ self.session_id = session_id
140
+ # Your initialization here
141
+
142
+ async def get_items(self, limit: int | None = None) -> List[dict]:
143
+ # Retrieve conversation history for the session
144
+ pass
145
+
146
+ async def add_items(self, items: List[dict]) -> None:
147
+ # Store new items for the session
148
+ pass
149
+
150
+ async def pop_item(self) -> dict | None:
151
+ # Remove and return the most recent item from the session
152
+ pass
153
+
154
+ async def clear_session(self) -> None:
155
+ # Clear all items for the session
156
+ pass
157
+
158
+ # Use your custom session
159
+ agent = Agent(name="Assistant")
160
+ result = await Runner.run(
161
+ agent,
162
+ "Hello",
163
+ session=MyCustomSession("my_session")
164
+ )
165
+ ```
166
+
56
167
  ## Get started
57
168
 
58
169
  1. Set up your Python environment
@@ -207,10 +318,16 @@ make sync
207
318
 
208
319
  2. (After making changes) lint/test
209
320
 
321
+ ```
322
+ make check # run tests linter and typechecker
323
+ ```
324
+
325
+ Or to run them individually:
210
326
  ```
211
327
  make tests # run tests
212
328
  make mypy # run typechecker
213
329
  make lint # run linter
330
+ make format-check # run style checker
214
331
  ```
215
332
 
216
333
  ## Acknowledgements
@@ -1,28 +1,28 @@
1
- agents/__init__.py,sha256=bsO_4_YL8UzXpI24IE8gKo-qlnDIJ00gvF2rSVfaIy0,7674
1
+ agents/__init__.py,sha256=KO_SBzwwg7cXPvMNDD1_lRhFIVR6E2RmyU624sAEEVo,7781
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
- agents/_run_impl.py,sha256=amAggJoriIaUnxs6vjYiZ_vgIIlb1h0v8T3ae8WsS0M,44407
5
- agents/agent.py,sha256=MQtOFgTBGDc7C-Z4CvtER7qNZHQuxkXbaQ4P2J6DdtE,11997
6
- agents/agent_output.py,sha256=cVIVwpsgOfloCHL0BD9DSCBCzW_s3T4LesDhvJRu2Uc,7127
4
+ agents/_run_impl.py,sha256=LlUM0YqZWmqz4WoWu0YK1Du6k09TX-ot94sikM16Y4U,44507
5
+ agents/agent.py,sha256=eWtYqVJHz3ol3SoLZm132_sJ46dF5DEKQ8aV8KgDv2E,13381
6
+ agents/agent_output.py,sha256=bHItis02dw-issbxjB4VnjUFdSByM9OR26rzxsFOSnQ,7154
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
9
- agents/function_schema.py,sha256=XoZVE1dnrDYFhHIIv8SmK3CJEsesC0a2Kj0AYEec9Ok,13106
10
- agents/guardrail.py,sha256=vWWcApo9s_6aHapQ5AMko08MqC8Jrlk-J5iqIRctCDQ,9291
11
- agents/handoffs.py,sha256=TSW2cbFVbuwweFhP8QQP01OWctFGYbi6DszcO7NZHME,10095
9
+ agents/function_schema.py,sha256=JvMh356N60_c3hj7BXySuM7eqVwP00jealR7rdPnl60,13590
10
+ agents/guardrail.py,sha256=1kzhx_bAyq4rtYbqGLlQp2sTk9g4A29NOJZq4LSOIOk,9430
11
+ agents/handoffs.py,sha256=L-b2eMNKyi-uF5Isz7UfpKc2Amvqies3i5tVjDnM3M4,10793
12
12
  agents/items.py,sha256=lXFc_gKLEqwXIcyMKk4Q-6Rjry0MWD93xlvk4Y1W970,9695
13
- agents/lifecycle.py,sha256=wYFG6PLSKQ7bICKVbB8oGtdoJNINGq9obh2RSKlAkDE,2938
13
+ agents/lifecycle.py,sha256=C1LSoCa_0zf0nt7yI3SKL5bAAG4Cso6--Gmk8S8zpJg,3111
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
- agents/model_settings.py,sha256=YxZufM4qivPF3ckXjvK2RS_Fk2BhJCDVHHqyH4-Dwv0,5950
15
+ agents/model_settings.py,sha256=98MlYQ35AVJM0OkTe0HHETKCRbh7hDdVbtYkIQibc3I,6043
16
16
  agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
17
17
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
18
18
  agents/repl.py,sha256=FKZlkGfw6QxItTkjFkCAQwXuV_pn69DIamGd3PiKQFk,2361
19
19
  agents/result.py,sha256=YCGYHoc5X1_vLKu5QiK6F8C1ZXI3tTfLXaZoqbYgUMA,10753
20
- agents/run.py,sha256=Pf-k78LsXiDobVo4dcLkhv090dchQYrCfN7n3lzRTPA,46774
20
+ agents/run.py,sha256=GNVMvEs0cw5oU6OISrN5YYEVYVF-KduMt3nfpgBynLs,50792
21
21
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
22
22
  agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
23
23
  agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
24
- agents/tool.py,sha256=1DxA3RT0G32tbr6JPB-EMCG8j32zQNCK_vwdAKVqpp4,16538
25
- agents/tool_context.py,sha256=JAo3hyk5nvUe81IKF71f30Im9vql664zfu5mQJj7jas,941
24
+ agents/tool.py,sha256=m0NHppRFBNTb4mYrlLjGSY-dBOu5KgnIvVpShPBuhps,16542
25
+ agents/tool_context.py,sha256=lbnctijZeanXAThddkklF7vDrXK1Ie2_wx6JZPCOihI,1434
26
26
  agents/usage.py,sha256=GB83eElU-DVkdutGObGDSX5vJNy8ssu3Xbpp5LlHfwU,1643
27
27
  agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
28
28
  agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -33,19 +33,34 @@ agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
33
33
  agents/extensions/models/litellm_model.py,sha256=Gmk7M4KGZ-Mfk2LUCzHL-FMm5C6_n41QzwSMVxYcfE8,15014
34
34
  agents/extensions/models/litellm_provider.py,sha256=wTm00Anq8YoNb9AnyT0JOunDG-HCDm_98ORNy7aNJdw,928
35
35
  agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
36
- agents/mcp/server.py,sha256=yRW7qK1OHO49aFNQu9bj9CRWxuWiwR-iNBamV-LxW2Y,20088
37
- agents/mcp/util.py,sha256=psB7P7mbLTOn7pFsrSyVaQ-1jM8JlJRep8giNgvJcQU,7885
36
+ agents/mcp/server.py,sha256=mTXQL4om5oA2fYevk63SUlwDri-RcUleUH_4hFrA0QM,24266
37
+ agents/mcp/util.py,sha256=BP84hWPLF4wgyACTBYgafQ_qGRbz3hRNUG2HqWoNnss,8421
38
+ agents/memory/__init__.py,sha256=bo2Rb3PqwSCo9PhBVVJOjvjMM1TfytuDPAFEDADYwwA,84
39
+ agents/memory/session.py,sha256=9RQ1I7qGh_9DzsyUd9srSPrxRBlw7jks-67NxYqKvvs,13060
38
40
  agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
41
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
40
- agents/models/chatcmpl_converter.py,sha256=ePmSVJb0qAumFm6UHrgii7TzvO7krfqsELoUDZfAGLo,18618
42
+ agents/models/chatcmpl_converter.py,sha256=lHVmWOxULJd_Q9WnWdh_ZYYRq07-4UNfpl7KDZEGZdg,19420
41
43
  agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
42
- agents/models/chatcmpl_stream_handler.py,sha256=AvFyb9Uvq2WzblNXysGKVqLqA8BhGHfeZqNii_4uXjw,19120
44
+ agents/models/chatcmpl_stream_handler.py,sha256=59sdQ6MndKHxPKCd4-D5pziQ9dDFBIfjtr53KJmySvI,23984
43
45
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
44
46
  agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
45
47
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
46
48
  agents/models/openai_chatcompletions.py,sha256=Br7nWsibVvMr0jff6H6adpe_AjYTgLgoAu6lgQ6LZO8,12191
47
49
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
48
- agents/models/openai_responses.py,sha256=82YIHExAPuXc92uJFIJJCyjdyFNcvt1nctgiPhC2b5Y,16302
50
+ agents/models/openai_responses.py,sha256=IaZ419gGkx8cWDZxi_2djvAor3RoUUiAdid782WOyv0,16720
51
+ agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
52
+ agents/realtime/__init__.py,sha256=MPdn2EXsjP1WX-iGaQm94Yw_j8xNm-KcO-vdHhm0sCw,4807
53
+ agents/realtime/agent.py,sha256=xVQYVJjsbi4FpJZ8jwogfKUsguOzpWXWih6rqLZ8AgE,3745
54
+ agents/realtime/config.py,sha256=O7EGQgHrv2p0gtvZfODwSb4g1RJXkJ2ySH1YdNLt_K8,5751
55
+ agents/realtime/events.py,sha256=bOyO7Yv0g_6StXKqAzapNTOq8GdaOuQqj3BbtXNfHU4,5090
56
+ agents/realtime/handoffs.py,sha256=avLFix5kEutel57IRcddssGiVHzGptOzWL9OqPaLVh8,6702
57
+ agents/realtime/items.py,sha256=psT6AH65qmngmPsgwk6CXacVo5tEDYq0Za3EitHFpTA,5052
58
+ agents/realtime/model.py,sha256=YwMBwtj33Z6uADnz1AoYg4wSfmpfYdZNq7ZaK8hlekw,2188
59
+ agents/realtime/model_events.py,sha256=JDh70uDctVuwex5EiYUdWhqQvBarN3ge7eREd1aUznU,3386
60
+ agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
61
+ agents/realtime/openai_realtime.py,sha256=YubVE1BbdnDpTbLxuh9WFy0eS2y1WzA8h0EdtIzfhC0,27190
62
+ agents/realtime/runner.py,sha256=PdSQZ-YibJULEtvWVsa8uUzLxHwgFosCbOoRXTLcaB0,4067
63
+ agents/realtime/session.py,sha256=I359k07sRUjWmNnIxRptiomSz4eQiOytnfF25eB7sCQ,20856
49
64
  agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
50
65
  agents/tracing/create.py,sha256=Gm9N5O2DeBy6UU86tRN0wnmzWyXb-qAUBbTj9oxIHao,18106
51
66
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
@@ -71,16 +86,16 @@ agents/voice/exceptions.py,sha256=QcyfvaUTBe4gxbFP82oDSa_puzZ4Z4O4k01B8pAHnK0,23
71
86
  agents/voice/imports.py,sha256=VaE5I8aJTP9Zl_0-y9dx1UcAP7KPRDMaikFK2jFnn8s,348
72
87
  agents/voice/input.py,sha256=FSbdHMIdLVKX4vYcmf3WBJ5dAlh5zMDjCAuGfXOZTQs,2910
73
88
  agents/voice/model.py,sha256=LWnIWEwU0-aFkff3kbTKkxejnYqzS2XHG5Qm2YcrzFI,5956
74
- agents/voice/pipeline.py,sha256=5LKTTDytQt4QlZzVKgbB9x3X2zA-TeR94FTi15vIUc0,6259
89
+ agents/voice/pipeline.py,sha256=F_b9QSPVbIJAlxpDoHqSt3mWqRqLnm8Dbfk4H9sJ-3M,6491
75
90
  agents/voice/pipeline_config.py,sha256=_cynbnzxvQijxkGrMYHJzIV54F9bRvDsPV24qexVO8c,1759
76
91
  agents/voice/result.py,sha256=Yx9JCMGCE9OfXacaBFfFLQJRwkNo5-h4Nqm9OPnemU4,11107
77
92
  agents/voice/utils.py,sha256=MrRomVqBLXeMAOue-Itwh0Fc5HjB0QCMKXclqFPhrbI,1309
78
- agents/voice/workflow.py,sha256=lef1NulzNHWFiiPUESGeb_6WhD6CouP1W5NOUAYFewk,3527
93
+ agents/voice/workflow.py,sha256=m_-_4qU1gEE5gcGahiE2IrIimmRW2X1rR20zZEGivSc,3795
79
94
  agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
80
95
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
81
- agents/voice/models/openai_stt.py,sha256=rRsldkvkPhH4T0waX1dhccEqIwmPYh-teK_LRvBgiNI,16882
96
+ agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
82
97
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
83
- openai_agents-0.1.0.dist-info/METADATA,sha256=SHdolY2k2hnYiNuhfVCYCbDtvM1hMTpMz1n6TPXzwnQ,8561
84
- openai_agents-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
85
- openai_agents-0.1.0.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
86
- openai_agents-0.1.0.dist-info/RECORD,,
98
+ openai_agents-0.2.1.dist-info/METADATA,sha256=AD8egWlRGmW_EX7Igqw6t3u1Hb_-6yjh0YKLWmenQw8,11560
99
+ openai_agents-0.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
100
+ openai_agents-0.2.1.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
101
+ openai_agents-0.2.1.dist-info/RECORD,,