openai-agents 0.0.19__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (43) hide show
  1. agents/__init__.py +5 -2
  2. agents/_run_impl.py +35 -1
  3. agents/agent.py +65 -29
  4. agents/extensions/models/litellm_model.py +7 -3
  5. agents/function_schema.py +11 -1
  6. agents/guardrail.py +5 -1
  7. agents/handoffs.py +14 -0
  8. agents/lifecycle.py +26 -17
  9. agents/mcp/__init__.py +13 -1
  10. agents/mcp/server.py +173 -16
  11. agents/mcp/util.py +89 -6
  12. agents/memory/__init__.py +3 -0
  13. agents/memory/session.py +369 -0
  14. agents/model_settings.py +60 -6
  15. agents/models/chatcmpl_converter.py +31 -2
  16. agents/models/chatcmpl_stream_handler.py +128 -16
  17. agents/models/openai_chatcompletions.py +12 -10
  18. agents/models/openai_responses.py +25 -8
  19. agents/realtime/README.md +3 -0
  20. agents/realtime/__init__.py +174 -0
  21. agents/realtime/agent.py +80 -0
  22. agents/realtime/config.py +128 -0
  23. agents/realtime/events.py +216 -0
  24. agents/realtime/items.py +91 -0
  25. agents/realtime/model.py +69 -0
  26. agents/realtime/model_events.py +159 -0
  27. agents/realtime/model_inputs.py +100 -0
  28. agents/realtime/openai_realtime.py +584 -0
  29. agents/realtime/runner.py +118 -0
  30. agents/realtime/session.py +502 -0
  31. agents/repl.py +1 -4
  32. agents/run.py +131 -10
  33. agents/tool.py +30 -6
  34. agents/tool_context.py +16 -3
  35. agents/tracing/__init__.py +1 -2
  36. agents/tracing/processor_interface.py +1 -1
  37. agents/voice/models/openai_stt.py +1 -1
  38. agents/voice/pipeline.py +6 -0
  39. agents/voice/workflow.py +8 -0
  40. {openai_agents-0.0.19.dist-info → openai_agents-0.2.0.dist-info}/METADATA +133 -8
  41. {openai_agents-0.0.19.dist-info → openai_agents-0.2.0.dist-info}/RECORD +43 -29
  42. {openai_agents-0.0.19.dist-info → openai_agents-0.2.0.dist-info}/WHEEL +0 -0
  43. {openai_agents-0.0.19.dist-info → openai_agents-0.2.0.dist-info}/licenses/LICENSE +0 -0
agents/run.py CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import asyncio
4
4
  import copy
5
+ import inspect
5
6
  from dataclasses import dataclass, field
6
7
  from typing import Any, Generic, cast
7
8
 
@@ -31,6 +32,7 @@ from .exceptions import (
31
32
  ModelBehaviorError,
32
33
  OutputGuardrailTripwireTriggered,
33
34
  RunErrorDetails,
35
+ UserError,
34
36
  )
35
37
  from .guardrail import (
36
38
  InputGuardrail,
@@ -42,6 +44,7 @@ from .handoffs import Handoff, HandoffInputFilter, handoff
42
44
  from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
43
45
  from .lifecycle import RunHooks
44
46
  from .logger import logger
47
+ from .memory import Session
45
48
  from .model_settings import ModelSettings
46
49
  from .models.interface import Model, ModelProvider
47
50
  from .models.multi_provider import MultiProvider
@@ -155,6 +158,9 @@ class RunOptions(TypedDict, Generic[TContext]):
155
158
  previous_response_id: NotRequired[str | None]
156
159
  """The ID of the previous response, if any."""
157
160
 
161
+ session: NotRequired[Session | None]
162
+ """The session for the run."""
163
+
158
164
 
159
165
  class Runner:
160
166
  @classmethod
@@ -168,6 +174,7 @@ class Runner:
168
174
  hooks: RunHooks[TContext] | None = None,
169
175
  run_config: RunConfig | None = None,
170
176
  previous_response_id: str | None = None,
177
+ session: Session | None = None,
171
178
  ) -> RunResult:
172
179
  """Run a workflow starting at the given agent. The agent will run in a loop until a final
173
180
  output is generated. The loop runs like so:
@@ -204,6 +211,7 @@ class Runner:
204
211
  hooks=hooks,
205
212
  run_config=run_config,
206
213
  previous_response_id=previous_response_id,
214
+ session=session,
207
215
  )
208
216
 
209
217
  @classmethod
@@ -217,6 +225,7 @@ class Runner:
217
225
  hooks: RunHooks[TContext] | None = None,
218
226
  run_config: RunConfig | None = None,
219
227
  previous_response_id: str | None = None,
228
+ session: Session | None = None,
220
229
  ) -> RunResult:
221
230
  """Run a workflow synchronously, starting at the given agent. Note that this just wraps the
222
231
  `run` method, so it will not work if there's already an event loop (e.g. inside an async
@@ -256,6 +265,7 @@ class Runner:
256
265
  hooks=hooks,
257
266
  run_config=run_config,
258
267
  previous_response_id=previous_response_id,
268
+ session=session,
259
269
  )
260
270
 
261
271
  @classmethod
@@ -268,6 +278,7 @@ class Runner:
268
278
  hooks: RunHooks[TContext] | None = None,
269
279
  run_config: RunConfig | None = None,
270
280
  previous_response_id: str | None = None,
281
+ session: Session | None = None,
271
282
  ) -> RunResultStreaming:
272
283
  """Run a workflow starting at the given agent in streaming mode. The returned result object
273
284
  contains a method you can use to stream semantic events as they are generated.
@@ -304,6 +315,7 @@ class Runner:
304
315
  hooks=hooks,
305
316
  run_config=run_config,
306
317
  previous_response_id=previous_response_id,
318
+ session=session,
307
319
  )
308
320
 
309
321
 
@@ -324,11 +336,15 @@ class AgentRunner:
324
336
  hooks = kwargs.get("hooks")
325
337
  run_config = kwargs.get("run_config")
326
338
  previous_response_id = kwargs.get("previous_response_id")
339
+ session = kwargs.get("session")
327
340
  if hooks is None:
328
341
  hooks = RunHooks[Any]()
329
342
  if run_config is None:
330
343
  run_config = RunConfig()
331
344
 
345
+ # Prepare input with session if enabled
346
+ prepared_input = await self._prepare_input_with_session(input, session)
347
+
332
348
  tool_use_tracker = AgentToolUseTracker()
333
349
 
334
350
  with TraceCtxManager(
@@ -339,7 +355,7 @@ class AgentRunner:
339
355
  disabled=run_config.tracing_disabled,
340
356
  ):
341
357
  current_turn = 0
342
- original_input: str | list[TResponseInputItem] = copy.deepcopy(input)
358
+ original_input: str | list[TResponseInputItem] = copy.deepcopy(prepared_input)
343
359
  generated_items: list[RunItem] = []
344
360
  model_responses: list[ModelResponse] = []
345
361
 
@@ -361,7 +377,8 @@ class AgentRunner:
361
377
  # agent changes, or if the agent loop ends.
362
378
  if current_span is None:
363
379
  handoff_names = [
364
- h.agent_name for h in AgentRunner._get_handoffs(current_agent)
380
+ h.agent_name
381
+ for h in await AgentRunner._get_handoffs(current_agent, context_wrapper)
365
382
  ]
366
383
  if output_schema := AgentRunner._get_output_schema(current_agent):
367
384
  output_type_name = output_schema.name()
@@ -397,7 +414,7 @@ class AgentRunner:
397
414
  starting_agent,
398
415
  starting_agent.input_guardrails
399
416
  + (run_config.input_guardrails or []),
400
- copy.deepcopy(input),
417
+ copy.deepcopy(prepared_input),
401
418
  context_wrapper,
402
419
  ),
403
420
  self._run_single_turn(
@@ -439,7 +456,7 @@ class AgentRunner:
439
456
  turn_result.next_step.output,
440
457
  context_wrapper,
441
458
  )
442
- return RunResult(
459
+ result = RunResult(
443
460
  input=original_input,
444
461
  new_items=generated_items,
445
462
  raw_responses=model_responses,
@@ -449,6 +466,11 @@ class AgentRunner:
449
466
  output_guardrail_results=output_guardrail_results,
450
467
  context_wrapper=context_wrapper,
451
468
  )
469
+
470
+ # Save the conversation to session if enabled
471
+ await self._save_result_to_session(session, input, result)
472
+
473
+ return result
452
474
  elif isinstance(turn_result.next_step, NextStepHandoff):
453
475
  current_agent = cast(Agent[TContext], turn_result.next_step.new_agent)
454
476
  current_span.finish(reset_current=True)
@@ -486,10 +508,13 @@ class AgentRunner:
486
508
  hooks = kwargs.get("hooks")
487
509
  run_config = kwargs.get("run_config")
488
510
  previous_response_id = kwargs.get("previous_response_id")
511
+ session = kwargs.get("session")
512
+
489
513
  return asyncio.get_event_loop().run_until_complete(
490
514
  self.run(
491
515
  starting_agent,
492
516
  input,
517
+ session=session,
493
518
  context=context,
494
519
  max_turns=max_turns,
495
520
  hooks=hooks,
@@ -509,6 +534,8 @@ class AgentRunner:
509
534
  hooks = kwargs.get("hooks")
510
535
  run_config = kwargs.get("run_config")
511
536
  previous_response_id = kwargs.get("previous_response_id")
537
+ session = kwargs.get("session")
538
+
512
539
  if hooks is None:
513
540
  hooks = RunHooks[Any]()
514
541
  if run_config is None:
@@ -561,6 +588,7 @@ class AgentRunner:
561
588
  context_wrapper=context_wrapper,
562
589
  run_config=run_config,
563
590
  previous_response_id=previous_response_id,
591
+ session=session,
564
592
  )
565
593
  )
566
594
  return streamed_result
@@ -619,6 +647,7 @@ class AgentRunner:
619
647
  context_wrapper: RunContextWrapper[TContext],
620
648
  run_config: RunConfig,
621
649
  previous_response_id: str | None,
650
+ session: Session | None,
622
651
  ):
623
652
  if streamed_result.trace:
624
653
  streamed_result.trace.start(mark_as_current=True)
@@ -632,6 +661,12 @@ class AgentRunner:
632
661
  streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent))
633
662
 
634
663
  try:
664
+ # Prepare input with session if enabled
665
+ prepared_input = await AgentRunner._prepare_input_with_session(starting_input, session)
666
+
667
+ # Update the streamed result with the prepared input
668
+ streamed_result.input = prepared_input
669
+
635
670
  while True:
636
671
  if streamed_result.is_complete:
637
672
  break
@@ -641,7 +676,10 @@ class AgentRunner:
641
676
  # Start an agent span if we don't have one. This span is ended if the current
642
677
  # agent changes, or if the agent loop ends.
643
678
  if current_span is None:
644
- handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)]
679
+ handoff_names = [
680
+ h.agent_name
681
+ for h in await cls._get_handoffs(current_agent, context_wrapper)
682
+ ]
645
683
  if output_schema := cls._get_output_schema(current_agent):
646
684
  output_type_name = output_schema.name()
647
685
  else:
@@ -675,7 +713,7 @@ class AgentRunner:
675
713
  cls._run_input_guardrails_with_queue(
676
714
  starting_agent,
677
715
  starting_agent.input_guardrails + (run_config.input_guardrails or []),
678
- copy.deepcopy(ItemHelpers.input_to_new_input_list(starting_input)),
716
+ copy.deepcopy(ItemHelpers.input_to_new_input_list(prepared_input)),
679
717
  context_wrapper,
680
718
  streamed_result,
681
719
  current_span,
@@ -729,6 +767,23 @@ class AgentRunner:
729
767
  streamed_result.output_guardrail_results = output_guardrail_results
730
768
  streamed_result.final_output = turn_result.next_step.output
731
769
  streamed_result.is_complete = True
770
+
771
+ # Save the conversation to session if enabled
772
+ # Create a temporary RunResult for session saving
773
+ temp_result = RunResult(
774
+ input=streamed_result.input,
775
+ new_items=streamed_result.new_items,
776
+ raw_responses=streamed_result.raw_responses,
777
+ final_output=streamed_result.final_output,
778
+ _last_agent=current_agent,
779
+ input_guardrail_results=streamed_result.input_guardrail_results,
780
+ output_guardrail_results=streamed_result.output_guardrail_results,
781
+ context_wrapper=context_wrapper,
782
+ )
783
+ await AgentRunner._save_result_to_session(
784
+ session, starting_input, temp_result
785
+ )
786
+
732
787
  streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
733
788
  elif isinstance(turn_result.next_step, NextStepRunAgain):
734
789
  pass
@@ -798,7 +853,7 @@ class AgentRunner:
798
853
  agent.get_prompt(context_wrapper),
799
854
  )
800
855
 
801
- handoffs = cls._get_handoffs(agent)
856
+ handoffs = await cls._get_handoffs(agent, context_wrapper)
802
857
  model = cls._get_model(agent, run_config)
803
858
  model_settings = agent.model_settings.resolve(run_config.model_settings)
804
859
  model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
@@ -898,7 +953,7 @@ class AgentRunner:
898
953
  )
899
954
 
900
955
  output_schema = cls._get_output_schema(agent)
901
- handoffs = cls._get_handoffs(agent)
956
+ handoffs = await cls._get_handoffs(agent, context_wrapper)
902
957
  input = ItemHelpers.input_to_new_input_list(original_input)
903
958
  input.extend([generated_item.to_input_item() for generated_item in generated_items])
904
959
 
@@ -1091,14 +1146,28 @@ class AgentRunner:
1091
1146
  return AgentOutputSchema(agent.output_type)
1092
1147
 
1093
1148
  @classmethod
1094
- def _get_handoffs(cls, agent: Agent[Any]) -> list[Handoff]:
1149
+ async def _get_handoffs(
1150
+ cls, agent: Agent[Any], context_wrapper: RunContextWrapper[Any]
1151
+ ) -> list[Handoff]:
1095
1152
  handoffs = []
1096
1153
  for handoff_item in agent.handoffs:
1097
1154
  if isinstance(handoff_item, Handoff):
1098
1155
  handoffs.append(handoff_item)
1099
1156
  elif isinstance(handoff_item, Agent):
1100
1157
  handoffs.append(handoff(handoff_item))
1101
- return handoffs
1158
+
1159
+ async def _check_handoff_enabled(handoff_obj: Handoff) -> bool:
1160
+ attr = handoff_obj.is_enabled
1161
+ if isinstance(attr, bool):
1162
+ return attr
1163
+ res = attr(context_wrapper, agent)
1164
+ if inspect.isawaitable(res):
1165
+ return bool(await res)
1166
+ return bool(res)
1167
+
1168
+ results = await asyncio.gather(*(_check_handoff_enabled(h) for h in handoffs))
1169
+ enabled: list[Handoff] = [h for h, ok in zip(handoffs, results) if ok]
1170
+ return enabled
1102
1171
 
1103
1172
  @classmethod
1104
1173
  async def _get_all_tools(
@@ -1117,5 +1186,57 @@ class AgentRunner:
1117
1186
 
1118
1187
  return run_config.model_provider.get_model(agent.model)
1119
1188
 
1189
+ @classmethod
1190
+ async def _prepare_input_with_session(
1191
+ cls,
1192
+ input: str | list[TResponseInputItem],
1193
+ session: Session | None,
1194
+ ) -> str | list[TResponseInputItem]:
1195
+ """Prepare input by combining it with session history if enabled."""
1196
+ if session is None:
1197
+ return input
1198
+
1199
+ # Validate that we don't have both a session and a list input, as this creates
1200
+ # ambiguity about whether the list should append to or replace existing session history
1201
+ if isinstance(input, list):
1202
+ raise UserError(
1203
+ "Cannot provide both a session and a list of input items. "
1204
+ "When using session memory, provide only a string input to append to the "
1205
+ "conversation, or use session=None and provide a list to manually manage "
1206
+ "conversation history."
1207
+ )
1208
+
1209
+ # Get previous conversation history
1210
+ history = await session.get_items()
1211
+
1212
+ # Convert input to list format
1213
+ new_input_list = ItemHelpers.input_to_new_input_list(input)
1214
+
1215
+ # Combine history with new input
1216
+ combined_input = history + new_input_list
1217
+
1218
+ return combined_input
1219
+
1220
+ @classmethod
1221
+ async def _save_result_to_session(
1222
+ cls,
1223
+ session: Session | None,
1224
+ original_input: str | list[TResponseInputItem],
1225
+ result: RunResult,
1226
+ ) -> None:
1227
+ """Save the conversation turn to session."""
1228
+ if session is None:
1229
+ return
1230
+
1231
+ # Convert original input to list format if needed
1232
+ input_list = ItemHelpers.input_to_new_input_list(original_input)
1233
+
1234
+ # Convert new items to input format
1235
+ new_items_as_input = [item.to_input_item() for item in result.new_items]
1236
+
1237
+ # Save all items from this turn
1238
+ items_to_save = input_list + new_items_as_input
1239
+ await session.add_items(items_to_save)
1240
+
1120
1241
 
1121
1242
  DEFAULT_AGENT_RUNNER = AgentRunner()
agents/tool.py CHANGED
@@ -7,6 +7,10 @@ from dataclasses import dataclass
7
7
  from typing import TYPE_CHECKING, Any, Callable, Literal, Union, overload
8
8
 
9
9
  from openai.types.responses.file_search_tool_param import Filters, RankingOptions
10
+ from openai.types.responses.response_computer_tool_call import (
11
+ PendingSafetyCheck,
12
+ ResponseComputerToolCall,
13
+ )
10
14
  from openai.types.responses.response_output_item import LocalShellCall, McpApprovalRequest
11
15
  from openai.types.responses.tool_param import CodeInterpreter, ImageGeneration, Mcp
12
16
  from openai.types.responses.web_search_tool_param import UserLocation
@@ -26,7 +30,7 @@ from .util import _error_tracing
26
30
  from .util._types import MaybeAwaitable
27
31
 
28
32
  if TYPE_CHECKING:
29
- from .agent import Agent
33
+ from .agent import Agent, AgentBase
30
34
 
31
35
  ToolParams = ParamSpec("ToolParams")
32
36
 
@@ -83,7 +87,7 @@ class FunctionTool:
83
87
  """Whether the JSON schema is in strict mode. We **strongly** recommend setting this to True,
84
88
  as it increases the likelihood of correct JSON input."""
85
89
 
86
- is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True
90
+ is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True
87
91
  """Whether the tool is enabled. Either a bool or a Callable that takes the run context and agent
88
92
  and returns whether the tool is enabled. You can use this to dynamically enable/disable a tool
89
93
  based on your context/state."""
@@ -141,11 +145,31 @@ class ComputerTool:
141
145
  as well as implements the computer actions like click, screenshot, etc.
142
146
  """
143
147
 
148
+ on_safety_check: Callable[[ComputerToolSafetyCheckData], MaybeAwaitable[bool]] | None = None
149
+ """Optional callback to acknowledge computer tool safety checks."""
150
+
144
151
  @property
145
152
  def name(self):
146
153
  return "computer_use_preview"
147
154
 
148
155
 
156
+ @dataclass
157
+ class ComputerToolSafetyCheckData:
158
+ """Information about a computer tool safety check."""
159
+
160
+ ctx_wrapper: RunContextWrapper[Any]
161
+ """The run context."""
162
+
163
+ agent: Agent[Any]
164
+ """The agent performing the computer action."""
165
+
166
+ tool_call: ResponseComputerToolCall
167
+ """The computer tool call."""
168
+
169
+ safety_check: PendingSafetyCheck
170
+ """The pending safety check to acknowledge."""
171
+
172
+
149
173
  @dataclass
150
174
  class MCPToolApprovalRequest:
151
175
  """A request to approve a tool call."""
@@ -176,7 +200,7 @@ MCPToolApprovalFunction = Callable[
176
200
  @dataclass
177
201
  class HostedMCPTool:
178
202
  """A tool that allows the LLM to use a remote MCP server. The LLM will automatically list and
179
- call tools, without requiring a a round trip back to your code.
203
+ call tools, without requiring a round trip back to your code.
180
204
  If you want to run MCP servers locally via stdio, in a VPC or other non-publicly-accessible
181
205
  environment, or you just prefer to run tool calls locally, then you can instead use the servers
182
206
  in `agents.mcp` and pass `Agent(mcp_servers=[...])` to the agent."""
@@ -276,7 +300,7 @@ def function_tool(
276
300
  use_docstring_info: bool = True,
277
301
  failure_error_function: ToolErrorFunction | None = None,
278
302
  strict_mode: bool = True,
279
- is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,
303
+ is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,
280
304
  ) -> FunctionTool:
281
305
  """Overload for usage as @function_tool (no parentheses)."""
282
306
  ...
@@ -291,7 +315,7 @@ def function_tool(
291
315
  use_docstring_info: bool = True,
292
316
  failure_error_function: ToolErrorFunction | None = None,
293
317
  strict_mode: bool = True,
294
- is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,
318
+ is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,
295
319
  ) -> Callable[[ToolFunction[...]], FunctionTool]:
296
320
  """Overload for usage as @function_tool(...)."""
297
321
  ...
@@ -306,7 +330,7 @@ def function_tool(
306
330
  use_docstring_info: bool = True,
307
331
  failure_error_function: ToolErrorFunction | None = default_tool_error_function,
308
332
  strict_mode: bool = True,
309
- is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,
333
+ is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,
310
334
  ) -> FunctionTool | Callable[[ToolFunction[...]], FunctionTool]:
311
335
  """
312
336
  Decorator to create a FunctionTool from a function. By default, we will:
agents/tool_context.py CHANGED
@@ -1,5 +1,7 @@
1
1
  from dataclasses import dataclass, field, fields
2
- from typing import Any
2
+ from typing import Any, Optional
3
+
4
+ from openai.types.responses import ResponseFunctionToolCall
3
5
 
4
6
  from .run_context import RunContextWrapper, TContext
5
7
 
@@ -8,16 +10,26 @@ def _assert_must_pass_tool_call_id() -> str:
8
10
  raise ValueError("tool_call_id must be passed to ToolContext")
9
11
 
10
12
 
13
+ def _assert_must_pass_tool_name() -> str:
14
+ raise ValueError("tool_name must be passed to ToolContext")
15
+
16
+
11
17
  @dataclass
12
18
  class ToolContext(RunContextWrapper[TContext]):
13
19
  """The context of a tool call."""
14
20
 
21
+ tool_name: str = field(default_factory=_assert_must_pass_tool_name)
22
+ """The name of the tool being invoked."""
23
+
15
24
  tool_call_id: str = field(default_factory=_assert_must_pass_tool_call_id)
16
25
  """The ID of the tool call."""
17
26
 
18
27
  @classmethod
19
28
  def from_agent_context(
20
- cls, context: RunContextWrapper[TContext], tool_call_id: str
29
+ cls,
30
+ context: RunContextWrapper[TContext],
31
+ tool_call_id: str,
32
+ tool_call: Optional[ResponseFunctionToolCall] = None,
21
33
  ) -> "ToolContext":
22
34
  """
23
35
  Create a ToolContext from a RunContextWrapper.
@@ -26,4 +38,5 @@ class ToolContext(RunContextWrapper[TContext]):
26
38
  base_values: dict[str, Any] = {
27
39
  f.name: getattr(context, f.name) for f in fields(RunContextWrapper) if f.init
28
40
  }
29
- return cls(tool_call_id=tool_call_id, **base_values)
41
+ tool_name = tool_call.name if tool_call is not None else _assert_must_pass_tool_name()
42
+ return cls(tool_name=tool_name, tool_call_id=tool_call_id, **base_values)
@@ -1,7 +1,5 @@
1
1
  import atexit
2
2
 
3
- from agents.tracing.provider import DefaultTraceProvider, TraceProvider
4
-
5
3
  from .create import (
6
4
  agent_span,
7
5
  custom_span,
@@ -20,6 +18,7 @@ from .create import (
20
18
  )
21
19
  from .processor_interface import TracingProcessor
22
20
  from .processors import default_exporter, default_processor
21
+ from .provider import DefaultTraceProvider, TraceProvider
23
22
  from .setup import get_trace_provider, set_trace_provider
24
23
  from .span_data import (
25
24
  AgentSpanData,
@@ -23,7 +23,7 @@ class TracingProcessor(abc.ABC):
23
23
  """Called when a trace is finished.
24
24
 
25
25
  Args:
26
- trace: The trace that started.
26
+ trace: The trace that finished.
27
27
  """
28
28
  pass
29
29
 
@@ -226,7 +226,7 @@ class OpenAISTTTranscriptionSession(StreamedTranscriptionSession):
226
226
  break
227
227
 
228
228
  event_type = event.get("type", "unknown")
229
- if event_type == "conversation.item.input_audio_transcription.completed":
229
+ if event_type == "input_audio_transcription_completed":
230
230
  transcript = cast(str, event.get("transcript", ""))
231
231
  if len(transcript) > 0:
232
232
  self._end_turn(transcript)
agents/voice/pipeline.py CHANGED
@@ -125,6 +125,12 @@ class VoicePipeline:
125
125
  self._get_tts_model(), self.config.tts_settings, self.config
126
126
  )
127
127
 
128
+ try:
129
+ async for intro_text in self.workflow.on_start():
130
+ await output._add_text(intro_text)
131
+ except Exception as e:
132
+ logger.warning(f"on_start() failed: {e}")
133
+
128
134
  transcription_session = await self._get_stt_model().create_session(
129
135
  audio_input,
130
136
  self.config.stt_settings,
agents/voice/workflow.py CHANGED
@@ -32,6 +32,14 @@ class VoiceWorkflowBase(abc.ABC):
32
32
  """
33
33
  pass
34
34
 
35
+ async def on_start(self) -> AsyncIterator[str]:
36
+ """
37
+ Optional method that runs before any user input is received. Can be used
38
+ to deliver a greeting or instruction via TTS. Defaults to doing nothing.
39
+ """
40
+ return
41
+ yield
42
+
35
43
 
36
44
  class VoiceWorkflowHelper:
37
45
  @classmethod