openai-agents 0.0.17__py3-none-any.whl → 0.0.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/__init__.py CHANGED
@@ -45,6 +45,8 @@ from .models.interface import Model, ModelProvider, ModelTracing
45
45
  from .models.openai_chatcompletions import OpenAIChatCompletionsModel
46
46
  from .models.openai_provider import OpenAIProvider
47
47
  from .models.openai_responses import OpenAIResponsesModel
48
+ from .prompts import DynamicPromptFunction, GenerateDynamicPromptData, Prompt
49
+ from .repl import run_demo_loop
48
50
  from .result import RunResult, RunResultStreaming
49
51
  from .run import RunConfig, Runner
50
52
  from .run_context import RunContextWrapper, TContext
@@ -160,6 +162,7 @@ __all__ = [
160
162
  "ToolsToFinalOutputFunction",
161
163
  "ToolsToFinalOutputResult",
162
164
  "Runner",
165
+ "run_demo_loop",
163
166
  "Model",
164
167
  "ModelProvider",
165
168
  "ModelTracing",
@@ -176,6 +179,9 @@ __all__ = [
176
179
  "AgentsException",
177
180
  "InputGuardrailTripwireTriggered",
178
181
  "OutputGuardrailTripwireTriggered",
182
+ "DynamicPromptFunction",
183
+ "GenerateDynamicPromptData",
184
+ "Prompt",
179
185
  "MaxTurnsExceeded",
180
186
  "ModelBehaviorError",
181
187
  "UserError",
agents/_run_impl.py CHANGED
@@ -75,6 +75,7 @@ from .tool import (
75
75
  MCPToolApprovalRequest,
76
76
  Tool,
77
77
  )
78
+ from .tool_context import ToolContext
78
79
  from .tracing import (
79
80
  SpanError,
80
81
  Trace,
@@ -543,23 +544,24 @@ class RunImpl:
543
544
  func_tool: FunctionTool, tool_call: ResponseFunctionToolCall
544
545
  ) -> Any:
545
546
  with function_span(func_tool.name) as span_fn:
547
+ tool_context = ToolContext.from_agent_context(context_wrapper, tool_call.call_id)
546
548
  if config.trace_include_sensitive_data:
547
549
  span_fn.span_data.input = tool_call.arguments
548
550
  try:
549
551
  _, _, result = await asyncio.gather(
550
- hooks.on_tool_start(context_wrapper, agent, func_tool),
552
+ hooks.on_tool_start(tool_context, agent, func_tool),
551
553
  (
552
- agent.hooks.on_tool_start(context_wrapper, agent, func_tool)
554
+ agent.hooks.on_tool_start(tool_context, agent, func_tool)
553
555
  if agent.hooks
554
556
  else _coro.noop_coroutine()
555
557
  ),
556
- func_tool.on_invoke_tool(context_wrapper, tool_call.arguments),
558
+ func_tool.on_invoke_tool(tool_context, tool_call.arguments),
557
559
  )
558
560
 
559
561
  await asyncio.gather(
560
- hooks.on_tool_end(context_wrapper, agent, func_tool, result),
562
+ hooks.on_tool_end(tool_context, agent, func_tool, result),
561
563
  (
562
- agent.hooks.on_tool_end(context_wrapper, agent, func_tool, result)
564
+ agent.hooks.on_tool_end(tool_context, agent, func_tool, result)
563
565
  if agent.hooks
564
566
  else _coro.noop_coroutine()
565
567
  ),
agents/agent.py CHANGED
@@ -7,6 +7,7 @@ from collections.abc import Awaitable
7
7
  from dataclasses import dataclass, field
8
8
  from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, cast
9
9
 
10
+ from openai.types.responses.response_prompt_param import ResponsePromptParam
10
11
  from typing_extensions import NotRequired, TypeAlias, TypedDict
11
12
 
12
13
  from .agent_output import AgentOutputSchemaBase
@@ -17,6 +18,7 @@ from .logger import logger
17
18
  from .mcp import MCPUtil
18
19
  from .model_settings import ModelSettings
19
20
  from .models.interface import Model
21
+ from .prompts import DynamicPromptFunction, Prompt, PromptUtil
20
22
  from .run_context import RunContextWrapper, TContext
21
23
  from .tool import FunctionTool, FunctionToolResult, Tool, function_tool
22
24
  from .util import _transforms
@@ -95,6 +97,12 @@ class Agent(Generic[TContext]):
95
97
  return a string.
96
98
  """
97
99
 
100
+ prompt: Prompt | DynamicPromptFunction | None = None
101
+ """A prompt object (or a function that returns a Prompt). Prompts allow you to dynamically
102
+ configure the instructions, tools and other config for an agent outside of your code. Only
103
+ usable with OpenAI models, using the Responses API.
104
+ """
105
+
98
106
  handoff_description: str | None = None
99
107
  """A description of the agent. This is used when the agent is used as a handoff, so that an
100
108
  LLM knows what it does and when to invoke it.
@@ -242,6 +250,12 @@ class Agent(Generic[TContext]):
242
250
 
243
251
  return None
244
252
 
253
+ async def get_prompt(
254
+ self, run_context: RunContextWrapper[TContext]
255
+ ) -> ResponsePromptParam | None:
256
+ """Get the prompt for the agent."""
257
+ return await PromptUtil.to_model_input(self.prompt, run_context, self)
258
+
245
259
  async def get_mcp_tools(self) -> list[Tool]:
246
260
  """Fetches the available tools from the MCP servers."""
247
261
  convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)
@@ -71,6 +71,7 @@ class LitellmModel(Model):
71
71
  handoffs: list[Handoff],
72
72
  tracing: ModelTracing,
73
73
  previous_response_id: str | None,
74
+ prompt: Any | None = None,
74
75
  ) -> ModelResponse:
75
76
  with generation_span(
76
77
  model=str(self.model),
@@ -88,6 +89,7 @@ class LitellmModel(Model):
88
89
  span_generation,
89
90
  tracing,
90
91
  stream=False,
92
+ prompt=prompt,
91
93
  )
92
94
 
93
95
  assert isinstance(response.choices[0], litellm.types.utils.Choices)
@@ -153,8 +155,8 @@ class LitellmModel(Model):
153
155
  output_schema: AgentOutputSchemaBase | None,
154
156
  handoffs: list[Handoff],
155
157
  tracing: ModelTracing,
156
- *,
157
158
  previous_response_id: str | None,
159
+ prompt: Any | None = None,
158
160
  ) -> AsyncIterator[TResponseStreamEvent]:
159
161
  with generation_span(
160
162
  model=str(self.model),
@@ -172,6 +174,7 @@ class LitellmModel(Model):
172
174
  span_generation,
173
175
  tracing,
174
176
  stream=True,
177
+ prompt=prompt,
175
178
  )
176
179
 
177
180
  final_response: Response | None = None
@@ -202,6 +205,7 @@ class LitellmModel(Model):
202
205
  span: Span[GenerationSpanData],
203
206
  tracing: ModelTracing,
204
207
  stream: Literal[True],
208
+ prompt: Any | None = None,
205
209
  ) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
206
210
 
207
211
  @overload
@@ -216,6 +220,7 @@ class LitellmModel(Model):
216
220
  span: Span[GenerationSpanData],
217
221
  tracing: ModelTracing,
218
222
  stream: Literal[False],
223
+ prompt: Any | None = None,
219
224
  ) -> litellm.types.utils.ModelResponse: ...
220
225
 
221
226
  async def _fetch_response(
@@ -229,6 +234,7 @@ class LitellmModel(Model):
229
234
  span: Span[GenerationSpanData],
230
235
  tracing: ModelTracing,
231
236
  stream: bool = False,
237
+ prompt: Any | None = None,
232
238
  ) -> litellm.types.utils.ModelResponse | tuple[Response, AsyncStream[ChatCompletionChunk]]:
233
239
  converted_messages = Converter.items_to_messages(input)
234
240
 
@@ -284,6 +290,10 @@ class LitellmModel(Model):
284
290
  if model_settings.extra_body and isinstance(model_settings.extra_body, dict):
285
291
  extra_kwargs.update(model_settings.extra_body)
286
292
 
293
+ # Add kwargs from model_settings.extra_args, filtering out None values
294
+ if model_settings.extra_args:
295
+ extra_kwargs.update(model_settings.extra_args)
296
+
287
297
  ret = await litellm.acompletion(
288
298
  model=self.model,
289
299
  messages=converted_messages,
agents/function_schema.py CHANGED
@@ -13,6 +13,7 @@ from pydantic import BaseModel, Field, create_model
13
13
  from .exceptions import UserError
14
14
  from .run_context import RunContextWrapper
15
15
  from .strict_schema import ensure_strict_json_schema
16
+ from .tool_context import ToolContext
16
17
 
17
18
 
18
19
  @dataclass
@@ -222,7 +223,8 @@ def function_schema(
222
223
  doc_info = None
223
224
  param_descs = {}
224
225
 
225
- func_name = name_override or doc_info.name if doc_info else func.__name__
226
+ # Ensure name_override takes precedence even if docstring info is disabled.
227
+ func_name = name_override or (doc_info.name if doc_info else func.__name__)
226
228
 
227
229
  # 2. Inspect function signature and get type hints
228
230
  sig = inspect.signature(func)
@@ -237,21 +239,21 @@ def function_schema(
237
239
  ann = type_hints.get(first_name, first_param.annotation)
238
240
  if ann != inspect._empty:
239
241
  origin = get_origin(ann) or ann
240
- if origin is RunContextWrapper:
242
+ if origin is RunContextWrapper or origin is ToolContext:
241
243
  takes_context = True # Mark that the function takes context
242
244
  else:
243
245
  filtered_params.append((first_name, first_param))
244
246
  else:
245
247
  filtered_params.append((first_name, first_param))
246
248
 
247
- # For parameters other than the first, raise error if any use RunContextWrapper.
249
+ # For parameters other than the first, raise error if any use RunContextWrapper or ToolContext.
248
250
  for name, param in params[1:]:
249
251
  ann = type_hints.get(name, param.annotation)
250
252
  if ann != inspect._empty:
251
253
  origin = get_origin(ann) or ann
252
- if origin is RunContextWrapper:
254
+ if origin is RunContextWrapper or origin is ToolContext:
253
255
  raise UserError(
254
- f"RunContextWrapper param found at non-first position in function"
256
+ f"RunContextWrapper/ToolContext param found at non-first position in function"
255
257
  f" {func.__name__}"
256
258
  )
257
259
  filtered_params.append((name, param))
agents/handoffs.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import inspect
4
+ import json
4
5
  from collections.abc import Awaitable
5
6
  from dataclasses import dataclass
6
7
  from typing import TYPE_CHECKING, Any, Callable, Generic, cast, overload
@@ -99,8 +100,7 @@ class Handoff(Generic[TContext]):
99
100
  """
100
101
 
101
102
  def get_transfer_message(self, agent: Agent[Any]) -> str:
102
- base = f"{{'assistant': '{agent.name}'}}"
103
- return base
103
+ return json.dumps({"assistant": agent.name})
104
104
 
105
105
  @classmethod
106
106
  def default_tool_name(cls, agent: Agent[Any]) -> str:
agents/mcp/server.py CHANGED
@@ -340,10 +340,10 @@ class MCPServerStreamableHttpParams(TypedDict):
340
340
  headers: NotRequired[dict[str, str]]
341
341
  """The headers to send to the server."""
342
342
 
343
- timeout: NotRequired[timedelta]
343
+ timeout: NotRequired[timedelta | float]
344
344
  """The timeout for the HTTP request. Defaults to 5 seconds."""
345
345
 
346
- sse_read_timeout: NotRequired[timedelta]
346
+ sse_read_timeout: NotRequired[timedelta | float]
347
347
  """The timeout for the SSE connection, in seconds. Defaults to 5 minutes."""
348
348
 
349
349
  terminate_on_close: NotRequired[bool]
@@ -401,8 +401,8 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
401
401
  return streamablehttp_client(
402
402
  url=self.params["url"],
403
403
  headers=self.params.get("headers", None),
404
- timeout=self.params.get("timeout", timedelta(seconds=30)),
405
- sse_read_timeout=self.params.get("sse_read_timeout", timedelta(seconds=60 * 5)),
404
+ timeout=self.params.get("timeout", 5),
405
+ sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5),
406
406
  terminate_on_close=self.params.get("terminate_on_close", True),
407
407
  )
408
408
 
agents/model_settings.py CHANGED
@@ -73,6 +73,11 @@ class ModelSettings:
73
73
  """Additional headers to provide with the request.
74
74
  Defaults to None if not provided."""
75
75
 
76
+ extra_args: dict[str, Any] | None = None
77
+ """Arbitrary keyword arguments to pass to the model API call.
78
+ These will be passed directly to the underlying model provider's API.
79
+ Use with caution as not all models support all parameters."""
80
+
76
81
  def resolve(self, override: ModelSettings | None) -> ModelSettings:
77
82
  """Produce a new ModelSettings by overlaying any non-None values from the
78
83
  override on top of this instance."""
@@ -84,6 +89,16 @@ class ModelSettings:
84
89
  for field in fields(self)
85
90
  if getattr(override, field.name) is not None
86
91
  }
92
+
93
+ # Handle extra_args merging specially - merge dictionaries instead of replacing
94
+ if self.extra_args is not None or override.extra_args is not None:
95
+ merged_args = {}
96
+ if self.extra_args:
97
+ merged_args.update(self.extra_args)
98
+ if override.extra_args:
99
+ merged_args.update(override.extra_args)
100
+ changes["extra_args"] = merged_args if merged_args else None
101
+
87
102
  return replace(self, **changes)
88
103
 
89
104
  def to_json_dict(self) -> dict[str, Any]:
@@ -5,6 +5,8 @@ import enum
5
5
  from collections.abc import AsyncIterator
6
6
  from typing import TYPE_CHECKING
7
7
 
8
+ from openai.types.responses.response_prompt_param import ResponsePromptParam
9
+
8
10
  from ..agent_output import AgentOutputSchemaBase
9
11
  from ..handoffs import Handoff
10
12
  from ..items import ModelResponse, TResponseInputItem, TResponseStreamEvent
@@ -46,6 +48,7 @@ class Model(abc.ABC):
46
48
  tracing: ModelTracing,
47
49
  *,
48
50
  previous_response_id: str | None,
51
+ prompt: ResponsePromptParam | None,
49
52
  ) -> ModelResponse:
50
53
  """Get a response from the model.
51
54
 
@@ -59,6 +62,7 @@ class Model(abc.ABC):
59
62
  tracing: Tracing configuration.
60
63
  previous_response_id: the ID of the previous response. Generally not used by the model,
61
64
  except for the OpenAI Responses API.
65
+ prompt: The prompt config to use for the model.
62
66
 
63
67
  Returns:
64
68
  The full model response.
@@ -77,6 +81,7 @@ class Model(abc.ABC):
77
81
  tracing: ModelTracing,
78
82
  *,
79
83
  previous_response_id: str | None,
84
+ prompt: ResponsePromptParam | None,
80
85
  ) -> AsyncIterator[TResponseStreamEvent]:
81
86
  """Stream a response from the model.
82
87
 
@@ -90,6 +95,7 @@ class Model(abc.ABC):
90
95
  tracing: Tracing configuration.
91
96
  previous_response_id: the ID of the previous response. Generally not used by the model,
92
97
  except for the OpenAI Responses API.
98
+ prompt: The prompt config to use for the model.
93
99
 
94
100
  Returns:
95
101
  An iterator of response stream events, in OpenAI Responses format.
@@ -9,6 +9,7 @@ from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
9
9
  from openai.types import ChatModel
10
10
  from openai.types.chat import ChatCompletion, ChatCompletionChunk
11
11
  from openai.types.responses import Response
12
+ from openai.types.responses.response_prompt_param import ResponsePromptParam
12
13
  from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
13
14
 
14
15
  from .. import _debug
@@ -53,6 +54,7 @@ class OpenAIChatCompletionsModel(Model):
53
54
  handoffs: list[Handoff],
54
55
  tracing: ModelTracing,
55
56
  previous_response_id: str | None,
57
+ prompt: ResponsePromptParam | None = None,
56
58
  ) -> ModelResponse:
57
59
  with generation_span(
58
60
  model=str(self.model),
@@ -69,6 +71,7 @@ class OpenAIChatCompletionsModel(Model):
69
71
  span_generation,
70
72
  tracing,
71
73
  stream=False,
74
+ prompt=prompt,
72
75
  )
73
76
 
74
77
  first_choice = response.choices[0]
@@ -136,8 +139,8 @@ class OpenAIChatCompletionsModel(Model):
136
139
  output_schema: AgentOutputSchemaBase | None,
137
140
  handoffs: list[Handoff],
138
141
  tracing: ModelTracing,
139
- *,
140
142
  previous_response_id: str | None,
143
+ prompt: ResponsePromptParam | None = None,
141
144
  ) -> AsyncIterator[TResponseStreamEvent]:
142
145
  """
143
146
  Yields a partial message as it is generated, as well as the usage information.
@@ -157,6 +160,7 @@ class OpenAIChatCompletionsModel(Model):
157
160
  span_generation,
158
161
  tracing,
159
162
  stream=True,
163
+ prompt=prompt,
160
164
  )
161
165
 
162
166
  final_response: Response | None = None
@@ -187,6 +191,7 @@ class OpenAIChatCompletionsModel(Model):
187
191
  span: Span[GenerationSpanData],
188
192
  tracing: ModelTracing,
189
193
  stream: Literal[True],
194
+ prompt: ResponsePromptParam | None = None,
190
195
  ) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
191
196
 
192
197
  @overload
@@ -201,6 +206,7 @@ class OpenAIChatCompletionsModel(Model):
201
206
  span: Span[GenerationSpanData],
202
207
  tracing: ModelTracing,
203
208
  stream: Literal[False],
209
+ prompt: ResponsePromptParam | None = None,
204
210
  ) -> ChatCompletion: ...
205
211
 
206
212
  async def _fetch_response(
@@ -214,6 +220,7 @@ class OpenAIChatCompletionsModel(Model):
214
220
  span: Span[GenerationSpanData],
215
221
  tracing: ModelTracing,
216
222
  stream: bool = False,
223
+ prompt: ResponsePromptParam | None = None,
217
224
  ) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]:
218
225
  converted_messages = Converter.items_to_messages(input)
219
226
 
@@ -281,6 +288,7 @@ class OpenAIChatCompletionsModel(Model):
281
288
  extra_query=model_settings.extra_query,
282
289
  extra_body=model_settings.extra_body,
283
290
  metadata=self._non_null_or_not_given(model_settings.metadata),
291
+ **(model_settings.extra_args or {}),
284
292
  )
285
293
 
286
294
  if isinstance(ret, ChatCompletion):
@@ -17,6 +17,7 @@ from openai.types.responses import (
17
17
  WebSearchToolParam,
18
18
  response_create_params,
19
19
  )
20
+ from openai.types.responses.response_prompt_param import ResponsePromptParam
20
21
 
21
22
  from .. import _debug
22
23
  from ..agent_output import AgentOutputSchemaBase
@@ -74,6 +75,7 @@ class OpenAIResponsesModel(Model):
74
75
  handoffs: list[Handoff],
75
76
  tracing: ModelTracing,
76
77
  previous_response_id: str | None,
78
+ prompt: ResponsePromptParam | None = None,
77
79
  ) -> ModelResponse:
78
80
  with response_span(disabled=tracing.is_disabled()) as span_response:
79
81
  try:
@@ -86,6 +88,7 @@ class OpenAIResponsesModel(Model):
86
88
  handoffs,
87
89
  previous_response_id,
88
90
  stream=False,
91
+ prompt=prompt,
89
92
  )
90
93
 
91
94
  if _debug.DONT_LOG_MODEL_DATA:
@@ -141,6 +144,7 @@ class OpenAIResponsesModel(Model):
141
144
  handoffs: list[Handoff],
142
145
  tracing: ModelTracing,
143
146
  previous_response_id: str | None,
147
+ prompt: ResponsePromptParam | None = None,
144
148
  ) -> AsyncIterator[ResponseStreamEvent]:
145
149
  """
146
150
  Yields a partial message as it is generated, as well as the usage information.
@@ -156,6 +160,7 @@ class OpenAIResponsesModel(Model):
156
160
  handoffs,
157
161
  previous_response_id,
158
162
  stream=True,
163
+ prompt=prompt,
159
164
  )
160
165
 
161
166
  final_response: Response | None = None
@@ -192,6 +197,7 @@ class OpenAIResponsesModel(Model):
192
197
  handoffs: list[Handoff],
193
198
  previous_response_id: str | None,
194
199
  stream: Literal[True],
200
+ prompt: ResponsePromptParam | None = None,
195
201
  ) -> AsyncStream[ResponseStreamEvent]: ...
196
202
 
197
203
  @overload
@@ -205,6 +211,7 @@ class OpenAIResponsesModel(Model):
205
211
  handoffs: list[Handoff],
206
212
  previous_response_id: str | None,
207
213
  stream: Literal[False],
214
+ prompt: ResponsePromptParam | None = None,
208
215
  ) -> Response: ...
209
216
 
210
217
  async def _fetch_response(
@@ -217,6 +224,7 @@ class OpenAIResponsesModel(Model):
217
224
  handoffs: list[Handoff],
218
225
  previous_response_id: str | None,
219
226
  stream: Literal[True] | Literal[False] = False,
227
+ prompt: ResponsePromptParam | None = None,
220
228
  ) -> Response | AsyncStream[ResponseStreamEvent]:
221
229
  list_input = ItemHelpers.input_to_new_input_list(input)
222
230
 
@@ -252,6 +260,7 @@ class OpenAIResponsesModel(Model):
252
260
  input=list_input,
253
261
  include=converted_tools.includes,
254
262
  tools=converted_tools.tools,
263
+ prompt=self._non_null_or_not_given(prompt),
255
264
  temperature=self._non_null_or_not_given(model_settings.temperature),
256
265
  top_p=self._non_null_or_not_given(model_settings.top_p),
257
266
  truncation=self._non_null_or_not_given(model_settings.truncation),
@@ -266,6 +275,7 @@ class OpenAIResponsesModel(Model):
266
275
  store=self._non_null_or_not_given(model_settings.store),
267
276
  reasoning=self._non_null_or_not_given(model_settings.reasoning),
268
277
  metadata=self._non_null_or_not_given(model_settings.metadata),
278
+ **(model_settings.extra_args or {}),
269
279
  )
270
280
 
271
281
  def _get_client(self) -> AsyncOpenAI:
agents/prompts.py ADDED
@@ -0,0 +1,76 @@
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ from dataclasses import dataclass
5
+ from typing import TYPE_CHECKING, Any, Callable
6
+
7
+ from openai.types.responses.response_prompt_param import (
8
+ ResponsePromptParam,
9
+ Variables as ResponsesPromptVariables,
10
+ )
11
+ from typing_extensions import NotRequired, TypedDict
12
+
13
+ from agents.util._types import MaybeAwaitable
14
+
15
+ from .exceptions import UserError
16
+ from .run_context import RunContextWrapper
17
+
18
+ if TYPE_CHECKING:
19
+ from .agent import Agent
20
+
21
+
22
+ class Prompt(TypedDict):
23
+ """Prompt configuration to use for interacting with an OpenAI model."""
24
+
25
+ id: str
26
+ """The unique ID of the prompt."""
27
+
28
+ version: NotRequired[str]
29
+ """Optional version of the prompt."""
30
+
31
+ variables: NotRequired[dict[str, ResponsesPromptVariables]]
32
+ """Optional variables to substitute into the prompt."""
33
+
34
+
35
+ @dataclass
36
+ class GenerateDynamicPromptData:
37
+ """Inputs to a function that allows you to dynamically generate a prompt."""
38
+
39
+ context: RunContextWrapper[Any]
40
+ """The run context."""
41
+
42
+ agent: Agent[Any]
43
+ """The agent for which the prompt is being generated."""
44
+
45
+
46
+ DynamicPromptFunction = Callable[[GenerateDynamicPromptData], MaybeAwaitable[Prompt]]
47
+ """A function that dynamically generates a prompt."""
48
+
49
+
50
+ class PromptUtil:
51
+ @staticmethod
52
+ async def to_model_input(
53
+ prompt: Prompt | DynamicPromptFunction | None,
54
+ context: RunContextWrapper[Any],
55
+ agent: Agent[Any],
56
+ ) -> ResponsePromptParam | None:
57
+ if prompt is None:
58
+ return None
59
+
60
+ resolved_prompt: Prompt
61
+ if isinstance(prompt, dict):
62
+ resolved_prompt = prompt
63
+ else:
64
+ func_result = prompt(GenerateDynamicPromptData(context=context, agent=agent))
65
+ if inspect.isawaitable(func_result):
66
+ resolved_prompt = await func_result
67
+ else:
68
+ resolved_prompt = func_result
69
+ if not isinstance(resolved_prompt, dict):
70
+ raise UserError("Dynamic prompt function must return a Prompt")
71
+
72
+ return {
73
+ "id": resolved_prompt["id"],
74
+ "version": resolved_prompt.get("version"),
75
+ "variables": resolved_prompt.get("variables"),
76
+ }
agents/repl.py ADDED
@@ -0,0 +1,65 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ from openai.types.responses.response_text_delta_event import ResponseTextDeltaEvent
6
+
7
+ from .agent import Agent
8
+ from .items import ItemHelpers, TResponseInputItem
9
+ from .result import RunResultBase
10
+ from .run import Runner
11
+ from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
12
+
13
+
14
+ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
15
+ """Run a simple REPL loop with the given agent.
16
+
17
+ This utility allows quick manual testing and debugging of an agent from the
18
+ command line. Conversation state is preserved across turns. Enter ``exit``
19
+ or ``quit`` to stop the loop.
20
+
21
+ Args:
22
+ agent: The starting agent to run.
23
+ stream: Whether to stream the agent output.
24
+ """
25
+
26
+ current_agent = agent
27
+ input_items: list[TResponseInputItem] = []
28
+ while True:
29
+ try:
30
+ user_input = input(" > ")
31
+ except (EOFError, KeyboardInterrupt):
32
+ print()
33
+ break
34
+ if user_input.strip().lower() in {"exit", "quit"}:
35
+ break
36
+ if not user_input:
37
+ continue
38
+
39
+ input_items.append({"role": "user", "content": user_input})
40
+
41
+ result: RunResultBase
42
+ if stream:
43
+ result = Runner.run_streamed(current_agent, input=input_items)
44
+ async for event in result.stream_events():
45
+ if isinstance(event, RawResponsesStreamEvent):
46
+ if isinstance(event.data, ResponseTextDeltaEvent):
47
+ print(event.data.delta, end="", flush=True)
48
+ elif isinstance(event, RunItemStreamEvent):
49
+ if event.item.type == "tool_call_item":
50
+ print("\n[tool called]", flush=True)
51
+ elif event.item.type == "tool_call_output_item":
52
+ print(f"\n[tool output: {event.item.output}]", flush=True)
53
+ elif event.item.type == "message_output_item":
54
+ message = ItemHelpers.text_message_output(event.item)
55
+ print(message, end="", flush=True)
56
+ elif isinstance(event, AgentUpdatedStreamEvent):
57
+ print(f"\n[Agent updated: {event.new_agent.name}]", flush=True)
58
+ print()
59
+ else:
60
+ result = await Runner.run(current_agent, input_items)
61
+ if result.final_output is not None:
62
+ print(result.final_output)
63
+
64
+ current_agent = result.last_agent
65
+ input_items = result.to_input_list()
agents/run.py CHANGED
@@ -6,6 +6,9 @@ from dataclasses import dataclass, field
6
6
  from typing import Any, cast
7
7
 
8
8
  from openai.types.responses import ResponseCompletedEvent
9
+ from openai.types.responses.response_prompt_param import (
10
+ ResponsePromptParam,
11
+ )
9
12
 
10
13
  from ._run_impl import (
11
14
  AgentToolUseTracker,
@@ -682,7 +685,10 @@ class Runner:
682
685
  streamed_result.current_agent = agent
683
686
  streamed_result._current_agent_output_schema = output_schema
684
687
 
685
- system_prompt = await agent.get_system_prompt(context_wrapper)
688
+ system_prompt, prompt_config = await asyncio.gather(
689
+ agent.get_system_prompt(context_wrapper),
690
+ agent.get_prompt(context_wrapper),
691
+ )
686
692
 
687
693
  handoffs = cls._get_handoffs(agent)
688
694
  model = cls._get_model(agent, run_config)
@@ -706,6 +712,7 @@ class Runner:
706
712
  run_config.tracing_disabled, run_config.trace_include_sensitive_data
707
713
  ),
708
714
  previous_response_id=previous_response_id,
715
+ prompt=prompt_config,
709
716
  ):
710
717
  if isinstance(event, ResponseCompletedEvent):
711
718
  usage = (
@@ -777,7 +784,10 @@ class Runner:
777
784
  ),
778
785
  )
779
786
 
780
- system_prompt = await agent.get_system_prompt(context_wrapper)
787
+ system_prompt, prompt_config = await asyncio.gather(
788
+ agent.get_system_prompt(context_wrapper),
789
+ agent.get_prompt(context_wrapper),
790
+ )
781
791
 
782
792
  output_schema = cls._get_output_schema(agent)
783
793
  handoffs = cls._get_handoffs(agent)
@@ -795,6 +805,7 @@ class Runner:
795
805
  run_config,
796
806
  tool_use_tracker,
797
807
  previous_response_id,
808
+ prompt_config,
798
809
  )
799
810
 
800
811
  return await cls._get_single_step_result_from_response(
@@ -938,6 +949,7 @@ class Runner:
938
949
  run_config: RunConfig,
939
950
  tool_use_tracker: AgentToolUseTracker,
940
951
  previous_response_id: str | None,
952
+ prompt_config: ResponsePromptParam | None,
941
953
  ) -> ModelResponse:
942
954
  model = cls._get_model(agent, run_config)
943
955
  model_settings = agent.model_settings.resolve(run_config.model_settings)
@@ -954,6 +966,7 @@ class Runner:
954
966
  run_config.tracing_disabled, run_config.trace_include_sensitive_data
955
967
  ),
956
968
  previous_response_id=previous_response_id,
969
+ prompt=prompt_config,
957
970
  )
958
971
 
959
972
  context_wrapper.usage.add(new_response.usage)
agents/tool.py CHANGED
@@ -20,6 +20,7 @@ from .function_schema import DocstringStyle, function_schema
20
20
  from .items import RunItem
21
21
  from .logger import logger
22
22
  from .run_context import RunContextWrapper
23
+ from .tool_context import ToolContext
23
24
  from .tracing import SpanError
24
25
  from .util import _error_tracing
25
26
  from .util._types import MaybeAwaitable
@@ -31,8 +32,13 @@ ToolParams = ParamSpec("ToolParams")
31
32
 
32
33
  ToolFunctionWithoutContext = Callable[ToolParams, Any]
33
34
  ToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParams], Any]
35
+ ToolFunctionWithToolContext = Callable[Concatenate[ToolContext, ToolParams], Any]
34
36
 
35
- ToolFunction = Union[ToolFunctionWithoutContext[ToolParams], ToolFunctionWithContext[ToolParams]]
37
+ ToolFunction = Union[
38
+ ToolFunctionWithoutContext[ToolParams],
39
+ ToolFunctionWithContext[ToolParams],
40
+ ToolFunctionWithToolContext[ToolParams],
41
+ ]
36
42
 
37
43
 
38
44
  @dataclass
@@ -62,7 +68,7 @@ class FunctionTool:
62
68
  params_json_schema: dict[str, Any]
63
69
  """The JSON schema for the tool's parameters."""
64
70
 
65
- on_invoke_tool: Callable[[RunContextWrapper[Any], str], Awaitable[Any]]
71
+ on_invoke_tool: Callable[[ToolContext[Any], str], Awaitable[Any]]
66
72
  """A function that invokes the tool with the given context and parameters. The params passed
67
73
  are:
68
74
  1. The tool run context.
@@ -344,7 +350,7 @@ def function_tool(
344
350
  strict_json_schema=strict_mode,
345
351
  )
346
352
 
347
- async def _on_invoke_tool_impl(ctx: RunContextWrapper[Any], input: str) -> Any:
353
+ async def _on_invoke_tool_impl(ctx: ToolContext[Any], input: str) -> Any:
348
354
  try:
349
355
  json_data: dict[str, Any] = json.loads(input) if input else {}
350
356
  except Exception as e:
@@ -393,7 +399,7 @@ def function_tool(
393
399
 
394
400
  return result
395
401
 
396
- async def _on_invoke_tool(ctx: RunContextWrapper[Any], input: str) -> Any:
402
+ async def _on_invoke_tool(ctx: ToolContext[Any], input: str) -> Any:
397
403
  try:
398
404
  return await _on_invoke_tool_impl(ctx, input)
399
405
  except Exception as e:
agents/tool_context.py ADDED
@@ -0,0 +1,29 @@
1
+ from dataclasses import dataclass, field, fields
2
+ from typing import Any
3
+
4
+ from .run_context import RunContextWrapper, TContext
5
+
6
+
7
+ def _assert_must_pass_tool_call_id() -> str:
8
+ raise ValueError("tool_call_id must be passed to ToolContext")
9
+
10
+
11
+ @dataclass
12
+ class ToolContext(RunContextWrapper[TContext]):
13
+ """The context of a tool call."""
14
+
15
+ tool_call_id: str = field(default_factory=_assert_must_pass_tool_call_id)
16
+ """The ID of the tool call."""
17
+
18
+ @classmethod
19
+ def from_agent_context(
20
+ cls, context: RunContextWrapper[TContext], tool_call_id: str
21
+ ) -> "ToolContext":
22
+ """
23
+ Create a ToolContext from a RunContextWrapper.
24
+ """
25
+ # Grab the names of the RunContextWrapper's init=True fields
26
+ base_values: dict[str, Any] = {
27
+ f.name: getattr(context, f.name) for f in fields(RunContextWrapper) if f.init
28
+ }
29
+ return cls(tool_call_id=tool_call_id, **base_values)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.17
3
+ Version: 0.0.18
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -19,8 +19,8 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
19
  Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.9
21
21
  Requires-Dist: griffe<2,>=1.5.6
22
- Requires-Dist: mcp<2,>=1.8.0; python_version >= '3.10'
23
- Requires-Dist: openai>=1.81.0
22
+ Requires-Dist: mcp<2,>=1.9.4; python_version >= '3.10'
23
+ Requires-Dist: openai>=1.87.0
24
24
  Requires-Dist: pydantic<3,>=2.10
25
25
  Requires-Dist: requests<3,>=2.0
26
26
  Requires-Dist: types-requests<3,>=2.0
@@ -40,6 +40,9 @@ The OpenAI Agents SDK is a lightweight yet powerful framework for building multi
40
40
 
41
41
  <img src="https://cdn.openai.com/API/docs/images/orchestration.png" alt="Image of the Agents Tracing UI" style="max-height: 803px;">
42
42
 
43
+ > [!NOTE]
44
+ > Looking for the JavaScript/TypeScript version? Check out [Agents SDK JS/TS](https://github.com/openai/openai-agents-js).
45
+
43
46
  ### Core concepts:
44
47
 
45
48
  1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
@@ -1,25 +1,28 @@
1
- agents/__init__.py,sha256=ZnZazUPdfh19uNOgNyu2OBQr5zz2DdUrKgam3Y9BAk4,7438
1
+ agents/__init__.py,sha256=PakDxML3ApH6jRLlq9wvHqvV8xGoONi4sH-z_iuV6tQ,7645
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
- agents/_run_impl.py,sha256=DyIodrzaWNdydZWDKJT6wGg3v445jwBUOwxb5mM-c58,42742
5
- agents/agent.py,sha256=eeOWjR-a0xOB4Ctt9OTl93rEr_VRAkynN2M0vfx2nTs,11195
4
+ agents/_run_impl.py,sha256=_3XbxIKNLXJHvjCQgnQTosT0CWCS9F7qFtW_wOdDeNQ,42863
5
+ agents/agent.py,sha256=bK4mD3BB5FGadX4d3elz-CM6OXC_IUPXGYLmLT0WKTA,11889
6
6
  agents/agent_output.py,sha256=cVIVwpsgOfloCHL0BD9DSCBCzW_s3T4LesDhvJRu2Uc,7127
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
9
- agents/function_schema.py,sha256=k4GTdxf5bvcisVr9b4xSdTGzkB5oP3XZnJMdouABCsw,12909
9
+ agents/function_schema.py,sha256=XoZVE1dnrDYFhHIIv8SmK3CJEsesC0a2Kj0AYEec9Ok,13106
10
10
  agents/guardrail.py,sha256=vWWcApo9s_6aHapQ5AMko08MqC8Jrlk-J5iqIRctCDQ,9291
11
- agents/handoffs.py,sha256=mWvtgWMJjSIlhUR9xf-pXOJbWVCKxNBXytP9tsPGWII,9045
11
+ agents/handoffs.py,sha256=ZcSPM4lrAg4zs3GXwiCzKd3WefgkXlb1JJrmXUTAhQ8,9040
12
12
  agents/items.py,sha256=lXFc_gKLEqwXIcyMKk4Q-6Rjry0MWD93xlvk4Y1W970,9695
13
13
  agents/lifecycle.py,sha256=wYFG6PLSKQ7bICKVbB8oGtdoJNINGq9obh2RSKlAkDE,2938
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
- agents/model_settings.py,sha256=7s9YjfHBVz1f1a-V3dd-8eMe-IAgfDXhQgChI27Kz00,3326
15
+ agents/model_settings.py,sha256=bPeBKdKY3O8NLT70uQU6HKOISwscVOD70RRrW4YpwnY,4021
16
+ agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
16
17
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
18
+ agents/repl.py,sha256=v06JyiZnHfqWZHpHEUj9CSH4RTfIVKQ9NJYwN_YiwT0,2578
17
19
  agents/result.py,sha256=YCGYHoc5X1_vLKu5QiK6F8C1ZXI3tTfLXaZoqbYgUMA,10753
18
- agents/run.py,sha256=cGvRtw9Ck7gEthmdnUBtb82lD7y0JgIZFsjMXbkCJZY,41816
20
+ agents/run.py,sha256=lthR3iUER16gRgSP5aSnaDmNCKQ-wdHwymZbfTQUmNk,42250
19
21
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
20
22
  agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
21
23
  agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
22
- agents/tool.py,sha256=yDUuR6oAO2NufHoJqKtqLExGx6ClHPTYYPsdraf39P0,15675
24
+ agents/tool.py,sha256=CdylnBeM9s6b8qc3yWwtQ-4ZV4FesnMYUsRrmJ2ix08,15833
25
+ agents/tool_context.py,sha256=JAo3hyk5nvUe81IKF71f30Im9vql664zfu5mQJj7jas,941
23
26
  agents/usage.py,sha256=GB83eElU-DVkdutGObGDSX5vJNy8ssu3Xbpp5LlHfwU,1643
24
27
  agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
25
28
  agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -27,10 +30,10 @@ agents/extensions/handoff_filters.py,sha256=2cXxu1JROez96CpTiGuT9PIuaIrIE8ksP01f
27
30
  agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
28
31
  agents/extensions/visualization.py,sha256=g2eEwW22qe3A4WtH37LwaHhK3QZE9FYHVw9IcOVpwbk,4699
29
32
  agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- agents/extensions/models/litellm_model.py,sha256=zcjdGI2EyhKqiXnobl_WPuPL8_zl2sGDOz7bul3Kjzs,14447
33
+ agents/extensions/models/litellm_model.py,sha256=S6PJ8fcsjTJ_QZEuU0zLxHMMxtXixkCeNvs9STjkUuU,14850
31
34
  agents/extensions/models/litellm_provider.py,sha256=wTm00Anq8YoNb9AnyT0JOunDG-HCDm_98ORNy7aNJdw,928
32
35
  agents/mcp/__init__.py,sha256=_aDpMTvYCe1IezOEasZ0vmombBM8r7BD8lpXiKi-UlM,499
33
- agents/mcp/server.py,sha256=mP_JxJzz00prX_0SzTZO38bjvhj4A61icypUjvxdG4k,15915
36
+ agents/mcp/server.py,sha256=98L7Fn4r5taSueL84taBZlP9mhVJZcok39GJ3IG6kPQ,15892
34
37
  agents/mcp/util.py,sha256=qXbAo9O-yv0JfmZBxDJIQ8ieHMTNWTEX5lnSVBv637k,5243
35
38
  agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
39
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
@@ -38,11 +41,11 @@ agents/models/chatcmpl_converter.py,sha256=Sae-ITlhQz8_SiFiSat7Z-lavqIuczduOXR_P
38
41
  agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
39
42
  agents/models/chatcmpl_stream_handler.py,sha256=sDl8O7AKxpWxAq7-bgCUClD5JySUnbQ8RTPc0HeDElM,13713
40
43
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
41
- agents/models/interface.py,sha256=eEpiIBn9MxsmXUK1HPpn3c7TYPduBYC7tsWnDHSYJHo,3553
44
+ agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
42
45
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
43
- agents/models/openai_chatcompletions.py,sha256=aSE1cww-C-6p5PXpslo70X-V0MHqbN6msLhnawFbhJU,11445
46
+ agents/models/openai_chatcompletions.py,sha256=brGc48JXbNJYKFD6Fz6HmC_-p8FfwvpFd8_cQtJdEAk,11877
44
47
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
45
- agents/models/openai_responses.py,sha256=JFajISS-sYYxKhb66tZ5cYPEqIYOj6ap762Z-87c7fE,15368
48
+ agents/models/openai_responses.py,sha256=9XtVlZbzch0g96E8lT4wbvTHN_12W1re-U4r4h4VPSY,15875
46
49
  agents/tracing/__init__.py,sha256=-hJeEiNvgyQdEXpFTrr_qu_XYREvIrF5KyePDtovSak,2804
47
50
  agents/tracing/create.py,sha256=kkMf2pp5Te20YkiSvf3Xj3J9qMibQCjEAxZs1Lr_kTE,18124
48
51
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
@@ -76,7 +79,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
76
79
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
77
80
  agents/voice/models/openai_stt.py,sha256=rRsldkvkPhH4T0waX1dhccEqIwmPYh-teK_LRvBgiNI,16882
78
81
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
79
- openai_agents-0.0.17.dist-info/METADATA,sha256=2SF0ZEolF_69dW0eTSWGuJc_RLuOAnYkqvBqj4IgqCw,8163
80
- openai_agents-0.0.17.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
81
- openai_agents-0.0.17.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
82
- openai_agents-0.0.17.dist-info/RECORD,,
82
+ openai_agents-0.0.18.dist-info/METADATA,sha256=qLzQQNCJ2wbS3C_BTGg4GRcmBp7IIgQi98z_wL3RrVw,8297
83
+ openai_agents-0.0.18.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
84
+ openai_agents-0.0.18.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
85
+ openai_agents-0.0.18.dist-info/RECORD,,