openai-agents 0.0.17__py3-none-any.whl → 0.0.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +8 -0
- agents/_run_impl.py +7 -5
- agents/agent.py +14 -0
- agents/extensions/models/litellm_model.py +11 -1
- agents/function_schema.py +7 -5
- agents/handoffs.py +2 -2
- agents/mcp/server.py +4 -4
- agents/model_settings.py +15 -0
- agents/models/interface.py +6 -0
- agents/models/openai_chatcompletions.py +9 -1
- agents/models/openai_responses.py +10 -0
- agents/prompts.py +76 -0
- agents/repl.py +65 -0
- agents/run.py +221 -97
- agents/tool.py +10 -4
- agents/tool_context.py +29 -0
- agents/tracing/__init__.py +11 -5
- agents/tracing/create.py +16 -16
- agents/tracing/provider.py +294 -0
- agents/tracing/setup.py +13 -206
- agents/tracing/util.py +9 -10
- {openai_agents-0.0.17.dist-info → openai_agents-0.0.19.dist-info}/METADATA +6 -3
- {openai_agents-0.0.17.dist-info → openai_agents-0.0.19.dist-info}/RECORD +25 -21
- {openai_agents-0.0.17.dist-info → openai_agents-0.0.19.dist-info}/WHEEL +0 -0
- {openai_agents-0.0.17.dist-info → openai_agents-0.0.19.dist-info}/licenses/LICENSE +0 -0
agents/__init__.py
CHANGED
|
@@ -45,6 +45,8 @@ from .models.interface import Model, ModelProvider, ModelTracing
|
|
|
45
45
|
from .models.openai_chatcompletions import OpenAIChatCompletionsModel
|
|
46
46
|
from .models.openai_provider import OpenAIProvider
|
|
47
47
|
from .models.openai_responses import OpenAIResponsesModel
|
|
48
|
+
from .prompts import DynamicPromptFunction, GenerateDynamicPromptData, Prompt
|
|
49
|
+
from .repl import run_demo_loop
|
|
48
50
|
from .result import RunResult, RunResultStreaming
|
|
49
51
|
from .run import RunConfig, Runner
|
|
50
52
|
from .run_context import RunContextWrapper, TContext
|
|
@@ -102,6 +104,7 @@ from .tracing import (
|
|
|
102
104
|
handoff_span,
|
|
103
105
|
mcp_tools_span,
|
|
104
106
|
set_trace_processors,
|
|
107
|
+
set_trace_provider,
|
|
105
108
|
set_tracing_disabled,
|
|
106
109
|
set_tracing_export_api_key,
|
|
107
110
|
speech_group_span,
|
|
@@ -160,6 +163,7 @@ __all__ = [
|
|
|
160
163
|
"ToolsToFinalOutputFunction",
|
|
161
164
|
"ToolsToFinalOutputResult",
|
|
162
165
|
"Runner",
|
|
166
|
+
"run_demo_loop",
|
|
163
167
|
"Model",
|
|
164
168
|
"ModelProvider",
|
|
165
169
|
"ModelTracing",
|
|
@@ -176,6 +180,9 @@ __all__ = [
|
|
|
176
180
|
"AgentsException",
|
|
177
181
|
"InputGuardrailTripwireTriggered",
|
|
178
182
|
"OutputGuardrailTripwireTriggered",
|
|
183
|
+
"DynamicPromptFunction",
|
|
184
|
+
"GenerateDynamicPromptData",
|
|
185
|
+
"Prompt",
|
|
179
186
|
"MaxTurnsExceeded",
|
|
180
187
|
"ModelBehaviorError",
|
|
181
188
|
"UserError",
|
|
@@ -240,6 +247,7 @@ __all__ = [
|
|
|
240
247
|
"guardrail_span",
|
|
241
248
|
"handoff_span",
|
|
242
249
|
"set_trace_processors",
|
|
250
|
+
"set_trace_provider",
|
|
243
251
|
"set_tracing_disabled",
|
|
244
252
|
"speech_group_span",
|
|
245
253
|
"transcription_span",
|
agents/_run_impl.py
CHANGED
|
@@ -75,6 +75,7 @@ from .tool import (
|
|
|
75
75
|
MCPToolApprovalRequest,
|
|
76
76
|
Tool,
|
|
77
77
|
)
|
|
78
|
+
from .tool_context import ToolContext
|
|
78
79
|
from .tracing import (
|
|
79
80
|
SpanError,
|
|
80
81
|
Trace,
|
|
@@ -543,23 +544,24 @@ class RunImpl:
|
|
|
543
544
|
func_tool: FunctionTool, tool_call: ResponseFunctionToolCall
|
|
544
545
|
) -> Any:
|
|
545
546
|
with function_span(func_tool.name) as span_fn:
|
|
547
|
+
tool_context = ToolContext.from_agent_context(context_wrapper, tool_call.call_id)
|
|
546
548
|
if config.trace_include_sensitive_data:
|
|
547
549
|
span_fn.span_data.input = tool_call.arguments
|
|
548
550
|
try:
|
|
549
551
|
_, _, result = await asyncio.gather(
|
|
550
|
-
hooks.on_tool_start(
|
|
552
|
+
hooks.on_tool_start(tool_context, agent, func_tool),
|
|
551
553
|
(
|
|
552
|
-
agent.hooks.on_tool_start(
|
|
554
|
+
agent.hooks.on_tool_start(tool_context, agent, func_tool)
|
|
553
555
|
if agent.hooks
|
|
554
556
|
else _coro.noop_coroutine()
|
|
555
557
|
),
|
|
556
|
-
func_tool.on_invoke_tool(
|
|
558
|
+
func_tool.on_invoke_tool(tool_context, tool_call.arguments),
|
|
557
559
|
)
|
|
558
560
|
|
|
559
561
|
await asyncio.gather(
|
|
560
|
-
hooks.on_tool_end(
|
|
562
|
+
hooks.on_tool_end(tool_context, agent, func_tool, result),
|
|
561
563
|
(
|
|
562
|
-
agent.hooks.on_tool_end(
|
|
564
|
+
agent.hooks.on_tool_end(tool_context, agent, func_tool, result)
|
|
563
565
|
if agent.hooks
|
|
564
566
|
else _coro.noop_coroutine()
|
|
565
567
|
),
|
agents/agent.py
CHANGED
|
@@ -7,6 +7,7 @@ from collections.abc import Awaitable
|
|
|
7
7
|
from dataclasses import dataclass, field
|
|
8
8
|
from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, cast
|
|
9
9
|
|
|
10
|
+
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
10
11
|
from typing_extensions import NotRequired, TypeAlias, TypedDict
|
|
11
12
|
|
|
12
13
|
from .agent_output import AgentOutputSchemaBase
|
|
@@ -17,6 +18,7 @@ from .logger import logger
|
|
|
17
18
|
from .mcp import MCPUtil
|
|
18
19
|
from .model_settings import ModelSettings
|
|
19
20
|
from .models.interface import Model
|
|
21
|
+
from .prompts import DynamicPromptFunction, Prompt, PromptUtil
|
|
20
22
|
from .run_context import RunContextWrapper, TContext
|
|
21
23
|
from .tool import FunctionTool, FunctionToolResult, Tool, function_tool
|
|
22
24
|
from .util import _transforms
|
|
@@ -95,6 +97,12 @@ class Agent(Generic[TContext]):
|
|
|
95
97
|
return a string.
|
|
96
98
|
"""
|
|
97
99
|
|
|
100
|
+
prompt: Prompt | DynamicPromptFunction | None = None
|
|
101
|
+
"""A prompt object (or a function that returns a Prompt). Prompts allow you to dynamically
|
|
102
|
+
configure the instructions, tools and other config for an agent outside of your code. Only
|
|
103
|
+
usable with OpenAI models, using the Responses API.
|
|
104
|
+
"""
|
|
105
|
+
|
|
98
106
|
handoff_description: str | None = None
|
|
99
107
|
"""A description of the agent. This is used when the agent is used as a handoff, so that an
|
|
100
108
|
LLM knows what it does and when to invoke it.
|
|
@@ -242,6 +250,12 @@ class Agent(Generic[TContext]):
|
|
|
242
250
|
|
|
243
251
|
return None
|
|
244
252
|
|
|
253
|
+
async def get_prompt(
|
|
254
|
+
self, run_context: RunContextWrapper[TContext]
|
|
255
|
+
) -> ResponsePromptParam | None:
|
|
256
|
+
"""Get the prompt for the agent."""
|
|
257
|
+
return await PromptUtil.to_model_input(self.prompt, run_context, self)
|
|
258
|
+
|
|
245
259
|
async def get_mcp_tools(self) -> list[Tool]:
|
|
246
260
|
"""Fetches the available tools from the MCP servers."""
|
|
247
261
|
convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)
|
|
@@ -71,6 +71,7 @@ class LitellmModel(Model):
|
|
|
71
71
|
handoffs: list[Handoff],
|
|
72
72
|
tracing: ModelTracing,
|
|
73
73
|
previous_response_id: str | None,
|
|
74
|
+
prompt: Any | None = None,
|
|
74
75
|
) -> ModelResponse:
|
|
75
76
|
with generation_span(
|
|
76
77
|
model=str(self.model),
|
|
@@ -88,6 +89,7 @@ class LitellmModel(Model):
|
|
|
88
89
|
span_generation,
|
|
89
90
|
tracing,
|
|
90
91
|
stream=False,
|
|
92
|
+
prompt=prompt,
|
|
91
93
|
)
|
|
92
94
|
|
|
93
95
|
assert isinstance(response.choices[0], litellm.types.utils.Choices)
|
|
@@ -153,8 +155,8 @@ class LitellmModel(Model):
|
|
|
153
155
|
output_schema: AgentOutputSchemaBase | None,
|
|
154
156
|
handoffs: list[Handoff],
|
|
155
157
|
tracing: ModelTracing,
|
|
156
|
-
*,
|
|
157
158
|
previous_response_id: str | None,
|
|
159
|
+
prompt: Any | None = None,
|
|
158
160
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
159
161
|
with generation_span(
|
|
160
162
|
model=str(self.model),
|
|
@@ -172,6 +174,7 @@ class LitellmModel(Model):
|
|
|
172
174
|
span_generation,
|
|
173
175
|
tracing,
|
|
174
176
|
stream=True,
|
|
177
|
+
prompt=prompt,
|
|
175
178
|
)
|
|
176
179
|
|
|
177
180
|
final_response: Response | None = None
|
|
@@ -202,6 +205,7 @@ class LitellmModel(Model):
|
|
|
202
205
|
span: Span[GenerationSpanData],
|
|
203
206
|
tracing: ModelTracing,
|
|
204
207
|
stream: Literal[True],
|
|
208
|
+
prompt: Any | None = None,
|
|
205
209
|
) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
|
|
206
210
|
|
|
207
211
|
@overload
|
|
@@ -216,6 +220,7 @@ class LitellmModel(Model):
|
|
|
216
220
|
span: Span[GenerationSpanData],
|
|
217
221
|
tracing: ModelTracing,
|
|
218
222
|
stream: Literal[False],
|
|
223
|
+
prompt: Any | None = None,
|
|
219
224
|
) -> litellm.types.utils.ModelResponse: ...
|
|
220
225
|
|
|
221
226
|
async def _fetch_response(
|
|
@@ -229,6 +234,7 @@ class LitellmModel(Model):
|
|
|
229
234
|
span: Span[GenerationSpanData],
|
|
230
235
|
tracing: ModelTracing,
|
|
231
236
|
stream: bool = False,
|
|
237
|
+
prompt: Any | None = None,
|
|
232
238
|
) -> litellm.types.utils.ModelResponse | tuple[Response, AsyncStream[ChatCompletionChunk]]:
|
|
233
239
|
converted_messages = Converter.items_to_messages(input)
|
|
234
240
|
|
|
@@ -284,6 +290,10 @@ class LitellmModel(Model):
|
|
|
284
290
|
if model_settings.extra_body and isinstance(model_settings.extra_body, dict):
|
|
285
291
|
extra_kwargs.update(model_settings.extra_body)
|
|
286
292
|
|
|
293
|
+
# Add kwargs from model_settings.extra_args, filtering out None values
|
|
294
|
+
if model_settings.extra_args:
|
|
295
|
+
extra_kwargs.update(model_settings.extra_args)
|
|
296
|
+
|
|
287
297
|
ret = await litellm.acompletion(
|
|
288
298
|
model=self.model,
|
|
289
299
|
messages=converted_messages,
|
agents/function_schema.py
CHANGED
|
@@ -13,6 +13,7 @@ from pydantic import BaseModel, Field, create_model
|
|
|
13
13
|
from .exceptions import UserError
|
|
14
14
|
from .run_context import RunContextWrapper
|
|
15
15
|
from .strict_schema import ensure_strict_json_schema
|
|
16
|
+
from .tool_context import ToolContext
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
@dataclass
|
|
@@ -222,7 +223,8 @@ def function_schema(
|
|
|
222
223
|
doc_info = None
|
|
223
224
|
param_descs = {}
|
|
224
225
|
|
|
225
|
-
|
|
226
|
+
# Ensure name_override takes precedence even if docstring info is disabled.
|
|
227
|
+
func_name = name_override or (doc_info.name if doc_info else func.__name__)
|
|
226
228
|
|
|
227
229
|
# 2. Inspect function signature and get type hints
|
|
228
230
|
sig = inspect.signature(func)
|
|
@@ -237,21 +239,21 @@ def function_schema(
|
|
|
237
239
|
ann = type_hints.get(first_name, first_param.annotation)
|
|
238
240
|
if ann != inspect._empty:
|
|
239
241
|
origin = get_origin(ann) or ann
|
|
240
|
-
if origin is RunContextWrapper:
|
|
242
|
+
if origin is RunContextWrapper or origin is ToolContext:
|
|
241
243
|
takes_context = True # Mark that the function takes context
|
|
242
244
|
else:
|
|
243
245
|
filtered_params.append((first_name, first_param))
|
|
244
246
|
else:
|
|
245
247
|
filtered_params.append((first_name, first_param))
|
|
246
248
|
|
|
247
|
-
# For parameters other than the first, raise error if any use RunContextWrapper.
|
|
249
|
+
# For parameters other than the first, raise error if any use RunContextWrapper or ToolContext.
|
|
248
250
|
for name, param in params[1:]:
|
|
249
251
|
ann = type_hints.get(name, param.annotation)
|
|
250
252
|
if ann != inspect._empty:
|
|
251
253
|
origin = get_origin(ann) or ann
|
|
252
|
-
if origin is RunContextWrapper:
|
|
254
|
+
if origin is RunContextWrapper or origin is ToolContext:
|
|
253
255
|
raise UserError(
|
|
254
|
-
f"RunContextWrapper param found at non-first position in function"
|
|
256
|
+
f"RunContextWrapper/ToolContext param found at non-first position in function"
|
|
255
257
|
f" {func.__name__}"
|
|
256
258
|
)
|
|
257
259
|
filtered_params.append((name, param))
|
agents/handoffs.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import inspect
|
|
4
|
+
import json
|
|
4
5
|
from collections.abc import Awaitable
|
|
5
6
|
from dataclasses import dataclass
|
|
6
7
|
from typing import TYPE_CHECKING, Any, Callable, Generic, cast, overload
|
|
@@ -99,8 +100,7 @@ class Handoff(Generic[TContext]):
|
|
|
99
100
|
"""
|
|
100
101
|
|
|
101
102
|
def get_transfer_message(self, agent: Agent[Any]) -> str:
|
|
102
|
-
|
|
103
|
-
return base
|
|
103
|
+
return json.dumps({"assistant": agent.name})
|
|
104
104
|
|
|
105
105
|
@classmethod
|
|
106
106
|
def default_tool_name(cls, agent: Agent[Any]) -> str:
|
agents/mcp/server.py
CHANGED
|
@@ -340,10 +340,10 @@ class MCPServerStreamableHttpParams(TypedDict):
|
|
|
340
340
|
headers: NotRequired[dict[str, str]]
|
|
341
341
|
"""The headers to send to the server."""
|
|
342
342
|
|
|
343
|
-
timeout: NotRequired[timedelta]
|
|
343
|
+
timeout: NotRequired[timedelta | float]
|
|
344
344
|
"""The timeout for the HTTP request. Defaults to 5 seconds."""
|
|
345
345
|
|
|
346
|
-
sse_read_timeout: NotRequired[timedelta]
|
|
346
|
+
sse_read_timeout: NotRequired[timedelta | float]
|
|
347
347
|
"""The timeout for the SSE connection, in seconds. Defaults to 5 minutes."""
|
|
348
348
|
|
|
349
349
|
terminate_on_close: NotRequired[bool]
|
|
@@ -401,8 +401,8 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
|
|
|
401
401
|
return streamablehttp_client(
|
|
402
402
|
url=self.params["url"],
|
|
403
403
|
headers=self.params.get("headers", None),
|
|
404
|
-
timeout=self.params.get("timeout",
|
|
405
|
-
sse_read_timeout=self.params.get("sse_read_timeout",
|
|
404
|
+
timeout=self.params.get("timeout", 5),
|
|
405
|
+
sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5),
|
|
406
406
|
terminate_on_close=self.params.get("terminate_on_close", True),
|
|
407
407
|
)
|
|
408
408
|
|
agents/model_settings.py
CHANGED
|
@@ -73,6 +73,11 @@ class ModelSettings:
|
|
|
73
73
|
"""Additional headers to provide with the request.
|
|
74
74
|
Defaults to None if not provided."""
|
|
75
75
|
|
|
76
|
+
extra_args: dict[str, Any] | None = None
|
|
77
|
+
"""Arbitrary keyword arguments to pass to the model API call.
|
|
78
|
+
These will be passed directly to the underlying model provider's API.
|
|
79
|
+
Use with caution as not all models support all parameters."""
|
|
80
|
+
|
|
76
81
|
def resolve(self, override: ModelSettings | None) -> ModelSettings:
|
|
77
82
|
"""Produce a new ModelSettings by overlaying any non-None values from the
|
|
78
83
|
override on top of this instance."""
|
|
@@ -84,6 +89,16 @@ class ModelSettings:
|
|
|
84
89
|
for field in fields(self)
|
|
85
90
|
if getattr(override, field.name) is not None
|
|
86
91
|
}
|
|
92
|
+
|
|
93
|
+
# Handle extra_args merging specially - merge dictionaries instead of replacing
|
|
94
|
+
if self.extra_args is not None or override.extra_args is not None:
|
|
95
|
+
merged_args = {}
|
|
96
|
+
if self.extra_args:
|
|
97
|
+
merged_args.update(self.extra_args)
|
|
98
|
+
if override.extra_args:
|
|
99
|
+
merged_args.update(override.extra_args)
|
|
100
|
+
changes["extra_args"] = merged_args if merged_args else None
|
|
101
|
+
|
|
87
102
|
return replace(self, **changes)
|
|
88
103
|
|
|
89
104
|
def to_json_dict(self) -> dict[str, Any]:
|
agents/models/interface.py
CHANGED
|
@@ -5,6 +5,8 @@ import enum
|
|
|
5
5
|
from collections.abc import AsyncIterator
|
|
6
6
|
from typing import TYPE_CHECKING
|
|
7
7
|
|
|
8
|
+
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
9
|
+
|
|
8
10
|
from ..agent_output import AgentOutputSchemaBase
|
|
9
11
|
from ..handoffs import Handoff
|
|
10
12
|
from ..items import ModelResponse, TResponseInputItem, TResponseStreamEvent
|
|
@@ -46,6 +48,7 @@ class Model(abc.ABC):
|
|
|
46
48
|
tracing: ModelTracing,
|
|
47
49
|
*,
|
|
48
50
|
previous_response_id: str | None,
|
|
51
|
+
prompt: ResponsePromptParam | None,
|
|
49
52
|
) -> ModelResponse:
|
|
50
53
|
"""Get a response from the model.
|
|
51
54
|
|
|
@@ -59,6 +62,7 @@ class Model(abc.ABC):
|
|
|
59
62
|
tracing: Tracing configuration.
|
|
60
63
|
previous_response_id: the ID of the previous response. Generally not used by the model,
|
|
61
64
|
except for the OpenAI Responses API.
|
|
65
|
+
prompt: The prompt config to use for the model.
|
|
62
66
|
|
|
63
67
|
Returns:
|
|
64
68
|
The full model response.
|
|
@@ -77,6 +81,7 @@ class Model(abc.ABC):
|
|
|
77
81
|
tracing: ModelTracing,
|
|
78
82
|
*,
|
|
79
83
|
previous_response_id: str | None,
|
|
84
|
+
prompt: ResponsePromptParam | None,
|
|
80
85
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
81
86
|
"""Stream a response from the model.
|
|
82
87
|
|
|
@@ -90,6 +95,7 @@ class Model(abc.ABC):
|
|
|
90
95
|
tracing: Tracing configuration.
|
|
91
96
|
previous_response_id: the ID of the previous response. Generally not used by the model,
|
|
92
97
|
except for the OpenAI Responses API.
|
|
98
|
+
prompt: The prompt config to use for the model.
|
|
93
99
|
|
|
94
100
|
Returns:
|
|
95
101
|
An iterator of response stream events, in OpenAI Responses format.
|
|
@@ -9,6 +9,7 @@ from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
|
|
|
9
9
|
from openai.types import ChatModel
|
|
10
10
|
from openai.types.chat import ChatCompletion, ChatCompletionChunk
|
|
11
11
|
from openai.types.responses import Response
|
|
12
|
+
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
12
13
|
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
|
|
13
14
|
|
|
14
15
|
from .. import _debug
|
|
@@ -53,6 +54,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
53
54
|
handoffs: list[Handoff],
|
|
54
55
|
tracing: ModelTracing,
|
|
55
56
|
previous_response_id: str | None,
|
|
57
|
+
prompt: ResponsePromptParam | None = None,
|
|
56
58
|
) -> ModelResponse:
|
|
57
59
|
with generation_span(
|
|
58
60
|
model=str(self.model),
|
|
@@ -69,6 +71,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
69
71
|
span_generation,
|
|
70
72
|
tracing,
|
|
71
73
|
stream=False,
|
|
74
|
+
prompt=prompt,
|
|
72
75
|
)
|
|
73
76
|
|
|
74
77
|
first_choice = response.choices[0]
|
|
@@ -136,8 +139,8 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
136
139
|
output_schema: AgentOutputSchemaBase | None,
|
|
137
140
|
handoffs: list[Handoff],
|
|
138
141
|
tracing: ModelTracing,
|
|
139
|
-
*,
|
|
140
142
|
previous_response_id: str | None,
|
|
143
|
+
prompt: ResponsePromptParam | None = None,
|
|
141
144
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
142
145
|
"""
|
|
143
146
|
Yields a partial message as it is generated, as well as the usage information.
|
|
@@ -157,6 +160,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
157
160
|
span_generation,
|
|
158
161
|
tracing,
|
|
159
162
|
stream=True,
|
|
163
|
+
prompt=prompt,
|
|
160
164
|
)
|
|
161
165
|
|
|
162
166
|
final_response: Response | None = None
|
|
@@ -187,6 +191,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
187
191
|
span: Span[GenerationSpanData],
|
|
188
192
|
tracing: ModelTracing,
|
|
189
193
|
stream: Literal[True],
|
|
194
|
+
prompt: ResponsePromptParam | None = None,
|
|
190
195
|
) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
|
|
191
196
|
|
|
192
197
|
@overload
|
|
@@ -201,6 +206,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
201
206
|
span: Span[GenerationSpanData],
|
|
202
207
|
tracing: ModelTracing,
|
|
203
208
|
stream: Literal[False],
|
|
209
|
+
prompt: ResponsePromptParam | None = None,
|
|
204
210
|
) -> ChatCompletion: ...
|
|
205
211
|
|
|
206
212
|
async def _fetch_response(
|
|
@@ -214,6 +220,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
214
220
|
span: Span[GenerationSpanData],
|
|
215
221
|
tracing: ModelTracing,
|
|
216
222
|
stream: bool = False,
|
|
223
|
+
prompt: ResponsePromptParam | None = None,
|
|
217
224
|
) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]:
|
|
218
225
|
converted_messages = Converter.items_to_messages(input)
|
|
219
226
|
|
|
@@ -281,6 +288,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
281
288
|
extra_query=model_settings.extra_query,
|
|
282
289
|
extra_body=model_settings.extra_body,
|
|
283
290
|
metadata=self._non_null_or_not_given(model_settings.metadata),
|
|
291
|
+
**(model_settings.extra_args or {}),
|
|
284
292
|
)
|
|
285
293
|
|
|
286
294
|
if isinstance(ret, ChatCompletion):
|
|
@@ -17,6 +17,7 @@ from openai.types.responses import (
|
|
|
17
17
|
WebSearchToolParam,
|
|
18
18
|
response_create_params,
|
|
19
19
|
)
|
|
20
|
+
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
20
21
|
|
|
21
22
|
from .. import _debug
|
|
22
23
|
from ..agent_output import AgentOutputSchemaBase
|
|
@@ -74,6 +75,7 @@ class OpenAIResponsesModel(Model):
|
|
|
74
75
|
handoffs: list[Handoff],
|
|
75
76
|
tracing: ModelTracing,
|
|
76
77
|
previous_response_id: str | None,
|
|
78
|
+
prompt: ResponsePromptParam | None = None,
|
|
77
79
|
) -> ModelResponse:
|
|
78
80
|
with response_span(disabled=tracing.is_disabled()) as span_response:
|
|
79
81
|
try:
|
|
@@ -86,6 +88,7 @@ class OpenAIResponsesModel(Model):
|
|
|
86
88
|
handoffs,
|
|
87
89
|
previous_response_id,
|
|
88
90
|
stream=False,
|
|
91
|
+
prompt=prompt,
|
|
89
92
|
)
|
|
90
93
|
|
|
91
94
|
if _debug.DONT_LOG_MODEL_DATA:
|
|
@@ -141,6 +144,7 @@ class OpenAIResponsesModel(Model):
|
|
|
141
144
|
handoffs: list[Handoff],
|
|
142
145
|
tracing: ModelTracing,
|
|
143
146
|
previous_response_id: str | None,
|
|
147
|
+
prompt: ResponsePromptParam | None = None,
|
|
144
148
|
) -> AsyncIterator[ResponseStreamEvent]:
|
|
145
149
|
"""
|
|
146
150
|
Yields a partial message as it is generated, as well as the usage information.
|
|
@@ -156,6 +160,7 @@ class OpenAIResponsesModel(Model):
|
|
|
156
160
|
handoffs,
|
|
157
161
|
previous_response_id,
|
|
158
162
|
stream=True,
|
|
163
|
+
prompt=prompt,
|
|
159
164
|
)
|
|
160
165
|
|
|
161
166
|
final_response: Response | None = None
|
|
@@ -192,6 +197,7 @@ class OpenAIResponsesModel(Model):
|
|
|
192
197
|
handoffs: list[Handoff],
|
|
193
198
|
previous_response_id: str | None,
|
|
194
199
|
stream: Literal[True],
|
|
200
|
+
prompt: ResponsePromptParam | None = None,
|
|
195
201
|
) -> AsyncStream[ResponseStreamEvent]: ...
|
|
196
202
|
|
|
197
203
|
@overload
|
|
@@ -205,6 +211,7 @@ class OpenAIResponsesModel(Model):
|
|
|
205
211
|
handoffs: list[Handoff],
|
|
206
212
|
previous_response_id: str | None,
|
|
207
213
|
stream: Literal[False],
|
|
214
|
+
prompt: ResponsePromptParam | None = None,
|
|
208
215
|
) -> Response: ...
|
|
209
216
|
|
|
210
217
|
async def _fetch_response(
|
|
@@ -217,6 +224,7 @@ class OpenAIResponsesModel(Model):
|
|
|
217
224
|
handoffs: list[Handoff],
|
|
218
225
|
previous_response_id: str | None,
|
|
219
226
|
stream: Literal[True] | Literal[False] = False,
|
|
227
|
+
prompt: ResponsePromptParam | None = None,
|
|
220
228
|
) -> Response | AsyncStream[ResponseStreamEvent]:
|
|
221
229
|
list_input = ItemHelpers.input_to_new_input_list(input)
|
|
222
230
|
|
|
@@ -252,6 +260,7 @@ class OpenAIResponsesModel(Model):
|
|
|
252
260
|
input=list_input,
|
|
253
261
|
include=converted_tools.includes,
|
|
254
262
|
tools=converted_tools.tools,
|
|
263
|
+
prompt=self._non_null_or_not_given(prompt),
|
|
255
264
|
temperature=self._non_null_or_not_given(model_settings.temperature),
|
|
256
265
|
top_p=self._non_null_or_not_given(model_settings.top_p),
|
|
257
266
|
truncation=self._non_null_or_not_given(model_settings.truncation),
|
|
@@ -266,6 +275,7 @@ class OpenAIResponsesModel(Model):
|
|
|
266
275
|
store=self._non_null_or_not_given(model_settings.store),
|
|
267
276
|
reasoning=self._non_null_or_not_given(model_settings.reasoning),
|
|
268
277
|
metadata=self._non_null_or_not_given(model_settings.metadata),
|
|
278
|
+
**(model_settings.extra_args or {}),
|
|
269
279
|
)
|
|
270
280
|
|
|
271
281
|
def _get_client(self) -> AsyncOpenAI:
|
agents/prompts.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import inspect
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import TYPE_CHECKING, Any, Callable
|
|
6
|
+
|
|
7
|
+
from openai.types.responses.response_prompt_param import (
|
|
8
|
+
ResponsePromptParam,
|
|
9
|
+
Variables as ResponsesPromptVariables,
|
|
10
|
+
)
|
|
11
|
+
from typing_extensions import NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
from agents.util._types import MaybeAwaitable
|
|
14
|
+
|
|
15
|
+
from .exceptions import UserError
|
|
16
|
+
from .run_context import RunContextWrapper
|
|
17
|
+
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from .agent import Agent
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Prompt(TypedDict):
|
|
23
|
+
"""Prompt configuration to use for interacting with an OpenAI model."""
|
|
24
|
+
|
|
25
|
+
id: str
|
|
26
|
+
"""The unique ID of the prompt."""
|
|
27
|
+
|
|
28
|
+
version: NotRequired[str]
|
|
29
|
+
"""Optional version of the prompt."""
|
|
30
|
+
|
|
31
|
+
variables: NotRequired[dict[str, ResponsesPromptVariables]]
|
|
32
|
+
"""Optional variables to substitute into the prompt."""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class GenerateDynamicPromptData:
|
|
37
|
+
"""Inputs to a function that allows you to dynamically generate a prompt."""
|
|
38
|
+
|
|
39
|
+
context: RunContextWrapper[Any]
|
|
40
|
+
"""The run context."""
|
|
41
|
+
|
|
42
|
+
agent: Agent[Any]
|
|
43
|
+
"""The agent for which the prompt is being generated."""
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
DynamicPromptFunction = Callable[[GenerateDynamicPromptData], MaybeAwaitable[Prompt]]
|
|
47
|
+
"""A function that dynamically generates a prompt."""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class PromptUtil:
|
|
51
|
+
@staticmethod
|
|
52
|
+
async def to_model_input(
|
|
53
|
+
prompt: Prompt | DynamicPromptFunction | None,
|
|
54
|
+
context: RunContextWrapper[Any],
|
|
55
|
+
agent: Agent[Any],
|
|
56
|
+
) -> ResponsePromptParam | None:
|
|
57
|
+
if prompt is None:
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
resolved_prompt: Prompt
|
|
61
|
+
if isinstance(prompt, dict):
|
|
62
|
+
resolved_prompt = prompt
|
|
63
|
+
else:
|
|
64
|
+
func_result = prompt(GenerateDynamicPromptData(context=context, agent=agent))
|
|
65
|
+
if inspect.isawaitable(func_result):
|
|
66
|
+
resolved_prompt = await func_result
|
|
67
|
+
else:
|
|
68
|
+
resolved_prompt = func_result
|
|
69
|
+
if not isinstance(resolved_prompt, dict):
|
|
70
|
+
raise UserError("Dynamic prompt function must return a Prompt")
|
|
71
|
+
|
|
72
|
+
return {
|
|
73
|
+
"id": resolved_prompt["id"],
|
|
74
|
+
"version": resolved_prompt.get("version"),
|
|
75
|
+
"variables": resolved_prompt.get("variables"),
|
|
76
|
+
}
|
agents/repl.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from openai.types.responses.response_text_delta_event import ResponseTextDeltaEvent
|
|
6
|
+
|
|
7
|
+
from .agent import Agent
|
|
8
|
+
from .items import ItemHelpers, TResponseInputItem
|
|
9
|
+
from .result import RunResultBase
|
|
10
|
+
from .run import Runner
|
|
11
|
+
from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
|
|
15
|
+
"""Run a simple REPL loop with the given agent.
|
|
16
|
+
|
|
17
|
+
This utility allows quick manual testing and debugging of an agent from the
|
|
18
|
+
command line. Conversation state is preserved across turns. Enter ``exit``
|
|
19
|
+
or ``quit`` to stop the loop.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
agent: The starting agent to run.
|
|
23
|
+
stream: Whether to stream the agent output.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
current_agent = agent
|
|
27
|
+
input_items: list[TResponseInputItem] = []
|
|
28
|
+
while True:
|
|
29
|
+
try:
|
|
30
|
+
user_input = input(" > ")
|
|
31
|
+
except (EOFError, KeyboardInterrupt):
|
|
32
|
+
print()
|
|
33
|
+
break
|
|
34
|
+
if user_input.strip().lower() in {"exit", "quit"}:
|
|
35
|
+
break
|
|
36
|
+
if not user_input:
|
|
37
|
+
continue
|
|
38
|
+
|
|
39
|
+
input_items.append({"role": "user", "content": user_input})
|
|
40
|
+
|
|
41
|
+
result: RunResultBase
|
|
42
|
+
if stream:
|
|
43
|
+
result = Runner.run_streamed(current_agent, input=input_items)
|
|
44
|
+
async for event in result.stream_events():
|
|
45
|
+
if isinstance(event, RawResponsesStreamEvent):
|
|
46
|
+
if isinstance(event.data, ResponseTextDeltaEvent):
|
|
47
|
+
print(event.data.delta, end="", flush=True)
|
|
48
|
+
elif isinstance(event, RunItemStreamEvent):
|
|
49
|
+
if event.item.type == "tool_call_item":
|
|
50
|
+
print("\n[tool called]", flush=True)
|
|
51
|
+
elif event.item.type == "tool_call_output_item":
|
|
52
|
+
print(f"\n[tool output: {event.item.output}]", flush=True)
|
|
53
|
+
elif event.item.type == "message_output_item":
|
|
54
|
+
message = ItemHelpers.text_message_output(event.item)
|
|
55
|
+
print(message, end="", flush=True)
|
|
56
|
+
elif isinstance(event, AgentUpdatedStreamEvent):
|
|
57
|
+
print(f"\n[Agent updated: {event.new_agent.name}]", flush=True)
|
|
58
|
+
print()
|
|
59
|
+
else:
|
|
60
|
+
result = await Runner.run(current_agent, input_items)
|
|
61
|
+
if result.final_output is not None:
|
|
62
|
+
print(result.final_output)
|
|
63
|
+
|
|
64
|
+
current_agent = result.last_agent
|
|
65
|
+
input_items = result.to_input_list()
|