openai-agents 0.2.4__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +10 -1
- agents/_run_impl.py +14 -3
- agents/agent.py +18 -9
- agents/extensions/handoff_filters.py +1 -0
- agents/extensions/models/litellm_model.py +12 -6
- agents/extensions/visualization.py +11 -0
- agents/handoffs.py +22 -6
- agents/items.py +5 -1
- agents/lifecycle.py +2 -2
- agents/mcp/util.py +13 -15
- agents/model_settings.py +8 -2
- agents/models/chatcmpl_converter.py +14 -11
- agents/models/openai_chatcompletions.py +18 -4
- agents/models/openai_responses.py +11 -4
- agents/realtime/agent.py +6 -0
- agents/realtime/config.py +3 -0
- agents/realtime/openai_realtime.py +11 -3
- agents/realtime/session.py +32 -8
- agents/run.py +53 -6
- agents/tracing/processors.py +5 -2
- agents/tracing/provider.py +25 -7
- {openai_agents-0.2.4.dist-info → openai_agents-0.2.6.dist-info}/METADATA +6 -2
- {openai_agents-0.2.4.dist-info → openai_agents-0.2.6.dist-info}/RECORD +25 -25
- {openai_agents-0.2.4.dist-info → openai_agents-0.2.6.dist-info}/WHEEL +0 -0
- {openai_agents-0.2.4.dist-info → openai_agents-0.2.6.dist-info}/licenses/LICENSE +0 -0
agents/__init__.py
CHANGED
|
@@ -5,7 +5,13 @@ from typing import Literal
|
|
|
5
5
|
from openai import AsyncOpenAI
|
|
6
6
|
|
|
7
7
|
from . import _config
|
|
8
|
-
from .agent import
|
|
8
|
+
from .agent import (
|
|
9
|
+
Agent,
|
|
10
|
+
AgentBase,
|
|
11
|
+
StopAtTools,
|
|
12
|
+
ToolsToFinalOutputFunction,
|
|
13
|
+
ToolsToFinalOutputResult,
|
|
14
|
+
)
|
|
9
15
|
from .agent_output import AgentOutputSchema, AgentOutputSchemaBase
|
|
10
16
|
from .computer import AsyncComputer, Button, Computer, Environment
|
|
11
17
|
from .exceptions import (
|
|
@@ -43,6 +49,7 @@ from .lifecycle import AgentHooks, RunHooks
|
|
|
43
49
|
from .memory import Session, SQLiteSession
|
|
44
50
|
from .model_settings import ModelSettings
|
|
45
51
|
from .models.interface import Model, ModelProvider, ModelTracing
|
|
52
|
+
from .models.multi_provider import MultiProvider
|
|
46
53
|
from .models.openai_chatcompletions import OpenAIChatCompletionsModel
|
|
47
54
|
from .models.openai_provider import OpenAIProvider
|
|
48
55
|
from .models.openai_responses import OpenAIResponsesModel
|
|
@@ -162,6 +169,7 @@ def enable_verbose_stdout_logging():
|
|
|
162
169
|
__all__ = [
|
|
163
170
|
"Agent",
|
|
164
171
|
"AgentBase",
|
|
172
|
+
"StopAtTools",
|
|
165
173
|
"ToolsToFinalOutputFunction",
|
|
166
174
|
"ToolsToFinalOutputResult",
|
|
167
175
|
"Runner",
|
|
@@ -171,6 +179,7 @@ __all__ = [
|
|
|
171
179
|
"ModelTracing",
|
|
172
180
|
"ModelSettings",
|
|
173
181
|
"OpenAIChatCompletionsModel",
|
|
182
|
+
"MultiProvider",
|
|
174
183
|
"OpenAIProvider",
|
|
175
184
|
"OpenAIResponsesModel",
|
|
176
185
|
"AgentOutputSchema",
|
agents/_run_impl.py
CHANGED
|
@@ -774,6 +774,7 @@ class RunImpl:
|
|
|
774
774
|
else original_input,
|
|
775
775
|
pre_handoff_items=tuple(pre_step_items),
|
|
776
776
|
new_items=tuple(new_step_items),
|
|
777
|
+
run_context=context_wrapper,
|
|
777
778
|
)
|
|
778
779
|
if not callable(input_filter):
|
|
779
780
|
_error_tracing.attach_error_to_span(
|
|
@@ -785,6 +786,8 @@ class RunImpl:
|
|
|
785
786
|
)
|
|
786
787
|
raise UserError(f"Invalid input filter: {input_filter}")
|
|
787
788
|
filtered = input_filter(handoff_input_data)
|
|
789
|
+
if inspect.isawaitable(filtered):
|
|
790
|
+
filtered = await filtered
|
|
788
791
|
if not isinstance(filtered, HandoffInputData):
|
|
789
792
|
_error_tracing.attach_error_to_span(
|
|
790
793
|
span_handoff,
|
|
@@ -911,12 +914,12 @@ class RunImpl:
|
|
|
911
914
|
return result
|
|
912
915
|
|
|
913
916
|
@classmethod
|
|
914
|
-
def
|
|
917
|
+
def stream_step_items_to_queue(
|
|
915
918
|
cls,
|
|
916
|
-
|
|
919
|
+
new_step_items: list[RunItem],
|
|
917
920
|
queue: asyncio.Queue[StreamEvent | QueueCompleteSentinel],
|
|
918
921
|
):
|
|
919
|
-
for item in
|
|
922
|
+
for item in new_step_items:
|
|
920
923
|
if isinstance(item, MessageOutputItem):
|
|
921
924
|
event = RunItemStreamEvent(item=item, name="message_output_created")
|
|
922
925
|
elif isinstance(item, HandoffCallItem):
|
|
@@ -941,6 +944,14 @@ class RunImpl:
|
|
|
941
944
|
if event:
|
|
942
945
|
queue.put_nowait(event)
|
|
943
946
|
|
|
947
|
+
@classmethod
|
|
948
|
+
def stream_step_result_to_queue(
|
|
949
|
+
cls,
|
|
950
|
+
step_result: SingleStepResult,
|
|
951
|
+
queue: asyncio.Queue[StreamEvent | QueueCompleteSentinel],
|
|
952
|
+
):
|
|
953
|
+
cls.stream_step_items_to_queue(step_result.new_step_items, queue)
|
|
954
|
+
|
|
944
955
|
@classmethod
|
|
945
956
|
async def _check_for_final_output_from_tools(
|
|
946
957
|
cls,
|
agents/agent.py
CHANGED
|
@@ -101,7 +101,7 @@ class AgentBase(Generic[TContext]):
|
|
|
101
101
|
self.mcp_servers, convert_schemas_to_strict, run_context, self
|
|
102
102
|
)
|
|
103
103
|
|
|
104
|
-
async def get_all_tools(self, run_context: RunContextWrapper[
|
|
104
|
+
async def get_all_tools(self, run_context: RunContextWrapper[TContext]) -> list[Tool]:
|
|
105
105
|
"""All agent tools, including MCP tools and function tools."""
|
|
106
106
|
mcp_tools = await self.get_mcp_tools(run_context)
|
|
107
107
|
|
|
@@ -201,14 +201,16 @@ class Agent(AgentBase, Generic[TContext]):
|
|
|
201
201
|
tool_use_behavior: (
|
|
202
202
|
Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
|
|
203
203
|
) = "run_llm_again"
|
|
204
|
-
"""
|
|
204
|
+
"""
|
|
205
|
+
This lets you configure how tool use is handled.
|
|
205
206
|
- "run_llm_again": The default behavior. Tools are run, and then the LLM receives the results
|
|
206
207
|
and gets to respond.
|
|
207
208
|
- "stop_on_first_tool": The output of the first tool call is used as the final output. This
|
|
208
209
|
means that the LLM does not process the result of the tool call.
|
|
209
|
-
- A
|
|
210
|
-
|
|
211
|
-
|
|
210
|
+
- A StopAtTools object: The agent will stop running if any of the tools listed in
|
|
211
|
+
`stop_at_tool_names` is called.
|
|
212
|
+
The final output will be the output of the first matching tool call.
|
|
213
|
+
The LLM does not process the result of the tool call.
|
|
212
214
|
- A function: If you pass a function, it will be called with the run context and the list of
|
|
213
215
|
tool results. It must return a `ToolsToFinalOutputResult`, which determines whether the tool
|
|
214
216
|
calls result in a final output.
|
|
@@ -222,10 +224,17 @@ class Agent(AgentBase, Generic[TContext]):
|
|
|
222
224
|
to True. This ensures that the agent doesn't enter an infinite loop of tool usage."""
|
|
223
225
|
|
|
224
226
|
def clone(self, **kwargs: Any) -> Agent[TContext]:
|
|
225
|
-
"""Make a copy of the agent, with the given arguments changed.
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
227
|
+
"""Make a copy of the agent, with the given arguments changed.
|
|
228
|
+
Notes:
|
|
229
|
+
- Uses `dataclasses.replace`, which performs a **shallow copy**.
|
|
230
|
+
- Mutable attributes like `tools` and `handoffs` are shallow-copied:
|
|
231
|
+
new list objects are created only if overridden, but their contents
|
|
232
|
+
(tool functions and handoff objects) are shared with the original.
|
|
233
|
+
- To modify these independently, pass new lists when calling `clone()`.
|
|
234
|
+
Example:
|
|
235
|
+
```python
|
|
236
|
+
new_agent = agent.clone(instructions="New instructions")
|
|
237
|
+
```
|
|
229
238
|
"""
|
|
230
239
|
return dataclasses.replace(self, **kwargs)
|
|
231
240
|
|
|
@@ -18,13 +18,17 @@ except ImportError as _e:
|
|
|
18
18
|
) from _e
|
|
19
19
|
|
|
20
20
|
from openai import NOT_GIVEN, AsyncStream, NotGiven
|
|
21
|
-
from openai.types.chat import
|
|
21
|
+
from openai.types.chat import (
|
|
22
|
+
ChatCompletionChunk,
|
|
23
|
+
ChatCompletionMessageFunctionToolCall,
|
|
24
|
+
)
|
|
22
25
|
from openai.types.chat.chat_completion_message import (
|
|
23
26
|
Annotation,
|
|
24
27
|
AnnotationURLCitation,
|
|
25
28
|
ChatCompletionMessage,
|
|
26
29
|
)
|
|
27
|
-
from openai.types.chat.
|
|
30
|
+
from openai.types.chat.chat_completion_message_function_tool_call import Function
|
|
31
|
+
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall
|
|
28
32
|
from openai.types.responses import Response
|
|
29
33
|
|
|
30
34
|
from ... import _debug
|
|
@@ -321,6 +325,7 @@ class LitellmModel(Model):
|
|
|
321
325
|
stream=stream,
|
|
322
326
|
stream_options=stream_options,
|
|
323
327
|
reasoning_effort=reasoning_effort,
|
|
328
|
+
top_logprobs=model_settings.top_logprobs,
|
|
324
329
|
extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
|
|
325
330
|
api_key=self.api_key,
|
|
326
331
|
base_url=self.base_url,
|
|
@@ -361,7 +366,7 @@ class LitellmConverter:
|
|
|
361
366
|
if message.role != "assistant":
|
|
362
367
|
raise ModelBehaviorError(f"Unsupported role: {message.role}")
|
|
363
368
|
|
|
364
|
-
tool_calls = (
|
|
369
|
+
tool_calls: list[ChatCompletionMessageToolCall] | None = (
|
|
365
370
|
[LitellmConverter.convert_tool_call_to_openai(tool) for tool in message.tool_calls]
|
|
366
371
|
if message.tool_calls
|
|
367
372
|
else None
|
|
@@ -412,11 +417,12 @@ class LitellmConverter:
|
|
|
412
417
|
@classmethod
|
|
413
418
|
def convert_tool_call_to_openai(
|
|
414
419
|
cls, tool_call: litellm.types.utils.ChatCompletionMessageToolCall
|
|
415
|
-
) ->
|
|
416
|
-
return
|
|
420
|
+
) -> ChatCompletionMessageFunctionToolCall:
|
|
421
|
+
return ChatCompletionMessageFunctionToolCall(
|
|
417
422
|
id=tool_call.id,
|
|
418
423
|
type="function",
|
|
419
424
|
function=Function(
|
|
420
|
-
name=tool_call.function.name or "",
|
|
425
|
+
name=tool_call.function.name or "",
|
|
426
|
+
arguments=tool_call.function.arguments,
|
|
421
427
|
),
|
|
422
428
|
)
|
|
@@ -71,6 +71,12 @@ def get_all_nodes(
|
|
|
71
71
|
f"fillcolor=lightgreen, width=0.5, height=0.3];"
|
|
72
72
|
)
|
|
73
73
|
|
|
74
|
+
for mcp_server in agent.mcp_servers:
|
|
75
|
+
parts.append(
|
|
76
|
+
f'"{mcp_server.name}" [label="{mcp_server.name}", shape=box, style=filled, '
|
|
77
|
+
f"fillcolor=lightgrey, width=1, height=0.5];"
|
|
78
|
+
)
|
|
79
|
+
|
|
74
80
|
for handoff in agent.handoffs:
|
|
75
81
|
if isinstance(handoff, Handoff):
|
|
76
82
|
parts.append(
|
|
@@ -119,6 +125,11 @@ def get_all_edges(
|
|
|
119
125
|
"{agent.name}" -> "{tool.name}" [style=dotted, penwidth=1.5];
|
|
120
126
|
"{tool.name}" -> "{agent.name}" [style=dotted, penwidth=1.5];""")
|
|
121
127
|
|
|
128
|
+
for mcp_server in agent.mcp_servers:
|
|
129
|
+
parts.append(f"""
|
|
130
|
+
"{agent.name}" -> "{mcp_server.name}" [style=dashed, penwidth=1.5];
|
|
131
|
+
"{mcp_server.name}" -> "{agent.name}" [style=dashed, penwidth=1.5];""")
|
|
132
|
+
|
|
122
133
|
for handoff in agent.handoffs:
|
|
123
134
|
if isinstance(handoff, Handoff):
|
|
124
135
|
parts.append(f"""
|
agents/handoffs.py
CHANGED
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import inspect
|
|
4
4
|
import json
|
|
5
5
|
from collections.abc import Awaitable
|
|
6
|
-
from dataclasses import dataclass
|
|
6
|
+
from dataclasses import dataclass, replace as dataclasses_replace
|
|
7
7
|
from typing import TYPE_CHECKING, Any, Callable, Generic, cast, overload
|
|
8
8
|
|
|
9
9
|
from pydantic import TypeAdapter
|
|
@@ -49,8 +49,24 @@ class HandoffInputData:
|
|
|
49
49
|
handoff and the tool output message representing the response from the handoff output.
|
|
50
50
|
"""
|
|
51
51
|
|
|
52
|
+
run_context: RunContextWrapper[Any] | None = None
|
|
53
|
+
"""
|
|
54
|
+
The run context at the time the handoff was invoked.
|
|
55
|
+
Note that, since this property was added later on, it's optional for backwards compatibility.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def clone(self, **kwargs: Any) -> HandoffInputData:
|
|
59
|
+
"""
|
|
60
|
+
Make a copy of the handoff input data, with the given arguments changed. For example, you
|
|
61
|
+
could do:
|
|
62
|
+
```
|
|
63
|
+
new_handoff_input_data = handoff_input_data.clone(new_items=())
|
|
64
|
+
```
|
|
65
|
+
"""
|
|
66
|
+
return dataclasses_replace(self, **kwargs)
|
|
52
67
|
|
|
53
|
-
|
|
68
|
+
|
|
69
|
+
HandoffInputFilter: TypeAlias = Callable[[HandoffInputData], MaybeAwaitable[HandoffInputData]]
|
|
54
70
|
"""A function that filters the input data passed to the next agent."""
|
|
55
71
|
|
|
56
72
|
|
|
@@ -103,9 +119,9 @@ class Handoff(Generic[TContext, TAgent]):
|
|
|
103
119
|
True, as it increases the likelihood of correct JSON input.
|
|
104
120
|
"""
|
|
105
121
|
|
|
106
|
-
is_enabled: bool | Callable[
|
|
107
|
-
|
|
108
|
-
|
|
122
|
+
is_enabled: bool | Callable[
|
|
123
|
+
[RunContextWrapper[Any], AgentBase[Any]], MaybeAwaitable[bool]
|
|
124
|
+
] = True
|
|
109
125
|
"""Whether the handoff is enabled. Either a bool or a Callable that takes the run context and
|
|
110
126
|
agent and returns whether the handoff is enabled. You can use this to dynamically enable/disable
|
|
111
127
|
a handoff based on your context/state."""
|
|
@@ -248,7 +264,7 @@ def handoff(
|
|
|
248
264
|
async def _is_enabled(ctx: RunContextWrapper[Any], agent_base: AgentBase[Any]) -> bool:
|
|
249
265
|
from .agent import Agent
|
|
250
266
|
|
|
251
|
-
assert callable(is_enabled), "is_enabled must be
|
|
267
|
+
assert callable(is_enabled), "is_enabled must be callable here"
|
|
252
268
|
assert isinstance(agent_base, Agent), "Can't handoff to a non-Agent"
|
|
253
269
|
result = is_enabled(ctx, agent_base)
|
|
254
270
|
|
agents/items.py
CHANGED
|
@@ -66,7 +66,7 @@ class RunItemBase(Generic[T], abc.ABC):
|
|
|
66
66
|
"""The agent whose run caused this item to be generated."""
|
|
67
67
|
|
|
68
68
|
raw_item: T
|
|
69
|
-
"""The raw Responses item from the run. This will always be
|
|
69
|
+
"""The raw Responses item from the run. This will always be either an output item (i.e.
|
|
70
70
|
`openai.types.responses.ResponseOutputItem` or an input item
|
|
71
71
|
(i.e. `openai.types.responses.ResponseInputItemParam`).
|
|
72
72
|
"""
|
|
@@ -243,6 +243,8 @@ class ItemHelpers:
|
|
|
243
243
|
if not isinstance(message, ResponseOutputMessage):
|
|
244
244
|
return ""
|
|
245
245
|
|
|
246
|
+
if not message.content:
|
|
247
|
+
return ""
|
|
246
248
|
last_content = message.content[-1]
|
|
247
249
|
if isinstance(last_content, ResponseOutputText):
|
|
248
250
|
return last_content.text
|
|
@@ -255,6 +257,8 @@ class ItemHelpers:
|
|
|
255
257
|
def extract_last_text(cls, message: TResponseOutputItem) -> str | None:
|
|
256
258
|
"""Extracts the last text content from a message, if any. Ignores refusals."""
|
|
257
259
|
if isinstance(message, ResponseOutputMessage):
|
|
260
|
+
if not message.content:
|
|
261
|
+
return None
|
|
258
262
|
last_content = message.content[-1]
|
|
259
263
|
if isinstance(last_content, ResponseOutputText):
|
|
260
264
|
return last_content.text
|
agents/lifecycle.py
CHANGED
|
@@ -42,7 +42,7 @@ class RunHooksBase(Generic[TContext, TAgent]):
|
|
|
42
42
|
agent: TAgent,
|
|
43
43
|
tool: Tool,
|
|
44
44
|
) -> None:
|
|
45
|
-
"""Called
|
|
45
|
+
"""Called concurrently with tool invocation."""
|
|
46
46
|
pass
|
|
47
47
|
|
|
48
48
|
async def on_tool_end(
|
|
@@ -93,7 +93,7 @@ class AgentHooksBase(Generic[TContext, TAgent]):
|
|
|
93
93
|
agent: TAgent,
|
|
94
94
|
tool: Tool,
|
|
95
95
|
) -> None:
|
|
96
|
-
"""Called
|
|
96
|
+
"""Called concurrently with tool invocation."""
|
|
97
97
|
pass
|
|
98
98
|
|
|
99
99
|
async def on_tool_end(
|
agents/mcp/util.py
CHANGED
|
@@ -194,23 +194,21 @@ class MCPUtil:
|
|
|
194
194
|
else:
|
|
195
195
|
logger.debug(f"MCP tool {tool.name} returned {result}")
|
|
196
196
|
|
|
197
|
-
#
|
|
198
|
-
|
|
199
|
-
if len(result.content) == 1:
|
|
200
|
-
tool_output = result.content[0].model_dump_json()
|
|
201
|
-
# Append structured content if it exists and we're using it.
|
|
202
|
-
if server.use_structured_content and result.structuredContent:
|
|
203
|
-
tool_output = f"{tool_output}\n{json.dumps(result.structuredContent)}"
|
|
204
|
-
elif len(result.content) > 1:
|
|
205
|
-
tool_results = [item.model_dump(mode="json") for item in result.content]
|
|
206
|
-
if server.use_structured_content and result.structuredContent:
|
|
207
|
-
tool_results.append(result.structuredContent)
|
|
208
|
-
tool_output = json.dumps(tool_results)
|
|
209
|
-
elif server.use_structured_content and result.structuredContent:
|
|
197
|
+
# If structured content is requested and available, use it exclusively
|
|
198
|
+
if server.use_structured_content and result.structuredContent:
|
|
210
199
|
tool_output = json.dumps(result.structuredContent)
|
|
211
200
|
else:
|
|
212
|
-
#
|
|
213
|
-
|
|
201
|
+
# Fall back to regular text content processing
|
|
202
|
+
# The MCP tool result is a list of content items, whereas OpenAI tool
|
|
203
|
+
# outputs are a single string. We'll try to convert.
|
|
204
|
+
if len(result.content) == 1:
|
|
205
|
+
tool_output = result.content[0].model_dump_json()
|
|
206
|
+
elif len(result.content) > 1:
|
|
207
|
+
tool_results = [item.model_dump(mode="json") for item in result.content]
|
|
208
|
+
tool_output = json.dumps(tool_results)
|
|
209
|
+
else:
|
|
210
|
+
# Empty content is a valid result (e.g., "no results found")
|
|
211
|
+
tool_output = "[]"
|
|
214
212
|
|
|
215
213
|
current_span = get_current_span()
|
|
216
214
|
if current_span:
|
agents/model_settings.py
CHANGED
|
@@ -55,6 +55,7 @@ Headers: TypeAlias = Mapping[str, Union[str, Omit]]
|
|
|
55
55
|
ToolChoice: TypeAlias = Union[Literal["auto", "required", "none"], str, MCPToolChoice, None]
|
|
56
56
|
|
|
57
57
|
|
|
58
|
+
|
|
58
59
|
@dataclass
|
|
59
60
|
class ModelSettings:
|
|
60
61
|
"""Settings to use when calling an LLM.
|
|
@@ -106,16 +107,21 @@ class ModelSettings:
|
|
|
106
107
|
|
|
107
108
|
store: bool | None = None
|
|
108
109
|
"""Whether to store the generated model response for later retrieval.
|
|
109
|
-
|
|
110
|
+
For Responses API: automatically enabled when not specified.
|
|
111
|
+
For Chat Completions API: disabled when not specified."""
|
|
110
112
|
|
|
111
113
|
include_usage: bool | None = None
|
|
112
114
|
"""Whether to include usage chunk.
|
|
113
|
-
|
|
115
|
+
Only available for Chat Completions API."""
|
|
114
116
|
|
|
115
117
|
response_include: list[ResponseIncludable] | None = None
|
|
116
118
|
"""Additional output data to include in the model response.
|
|
117
119
|
[include parameter](https://platform.openai.com/docs/api-reference/responses/create#responses-create-include)"""
|
|
118
120
|
|
|
121
|
+
top_logprobs: int | None = None
|
|
122
|
+
"""Number of top tokens to return logprobs for. Setting this will
|
|
123
|
+
automatically include ``"message.output_text.logprobs"`` in the response."""
|
|
124
|
+
|
|
119
125
|
extra_query: Query | None = None
|
|
120
126
|
"""Additional query fields to provide with the request.
|
|
121
127
|
Defaults to None if not provided."""
|
|
@@ -12,8 +12,8 @@ from openai.types.chat import (
|
|
|
12
12
|
ChatCompletionContentPartTextParam,
|
|
13
13
|
ChatCompletionDeveloperMessageParam,
|
|
14
14
|
ChatCompletionMessage,
|
|
15
|
+
ChatCompletionMessageFunctionToolCallParam,
|
|
15
16
|
ChatCompletionMessageParam,
|
|
16
|
-
ChatCompletionMessageToolCallParam,
|
|
17
17
|
ChatCompletionSystemMessageParam,
|
|
18
18
|
ChatCompletionToolChoiceOptionParam,
|
|
19
19
|
ChatCompletionToolMessageParam,
|
|
@@ -126,15 +126,18 @@ class Converter:
|
|
|
126
126
|
|
|
127
127
|
if message.tool_calls:
|
|
128
128
|
for tool_call in message.tool_calls:
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
129
|
+
if tool_call.type == "function":
|
|
130
|
+
items.append(
|
|
131
|
+
ResponseFunctionToolCall(
|
|
132
|
+
id=FAKE_RESPONSES_ID,
|
|
133
|
+
call_id=tool_call.id,
|
|
134
|
+
arguments=tool_call.function.arguments,
|
|
135
|
+
name=tool_call.function.name,
|
|
136
|
+
type="function_call",
|
|
137
|
+
)
|
|
136
138
|
)
|
|
137
|
-
|
|
139
|
+
elif tool_call.type == "custom":
|
|
140
|
+
pass
|
|
138
141
|
|
|
139
142
|
return items
|
|
140
143
|
|
|
@@ -420,7 +423,7 @@ class Converter:
|
|
|
420
423
|
elif file_search := cls.maybe_file_search_call(item):
|
|
421
424
|
asst = ensure_assistant_message()
|
|
422
425
|
tool_calls = list(asst.get("tool_calls", []))
|
|
423
|
-
new_tool_call =
|
|
426
|
+
new_tool_call = ChatCompletionMessageFunctionToolCallParam(
|
|
424
427
|
id=file_search["id"],
|
|
425
428
|
type="function",
|
|
426
429
|
function={
|
|
@@ -440,7 +443,7 @@ class Converter:
|
|
|
440
443
|
asst = ensure_assistant_message()
|
|
441
444
|
tool_calls = list(asst.get("tool_calls", []))
|
|
442
445
|
arguments = func_call["arguments"] if func_call["arguments"] else "{}"
|
|
443
|
-
new_tool_call =
|
|
446
|
+
new_tool_call = ChatCompletionMessageFunctionToolCallParam(
|
|
444
447
|
id=func_call["call_id"],
|
|
445
448
|
type="function",
|
|
446
449
|
function={
|
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import json
|
|
4
4
|
import time
|
|
5
5
|
from collections.abc import AsyncIterator
|
|
6
|
-
from typing import TYPE_CHECKING, Any, Literal,
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Literal, overload
|
|
7
7
|
|
|
8
8
|
from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
|
|
9
9
|
from openai.types import ChatModel
|
|
@@ -28,6 +28,7 @@ from .chatcmpl_helpers import HEADERS, ChatCmplHelpers
|
|
|
28
28
|
from .chatcmpl_stream_handler import ChatCmplStreamHandler
|
|
29
29
|
from .fake_id import FAKE_RESPONSES_ID
|
|
30
30
|
from .interface import Model, ModelTracing
|
|
31
|
+
from .openai_responses import Converter as OpenAIResponsesConverter
|
|
31
32
|
|
|
32
33
|
if TYPE_CHECKING:
|
|
33
34
|
from ..model_settings import ModelSettings
|
|
@@ -286,6 +287,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
286
287
|
stream_options=self._non_null_or_not_given(stream_options),
|
|
287
288
|
store=self._non_null_or_not_given(store),
|
|
288
289
|
reasoning_effort=self._non_null_or_not_given(reasoning_effort),
|
|
290
|
+
top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs),
|
|
289
291
|
extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
|
|
290
292
|
extra_query=model_settings.extra_query,
|
|
291
293
|
extra_body=model_settings.extra_body,
|
|
@@ -296,15 +298,27 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
296
298
|
if isinstance(ret, ChatCompletion):
|
|
297
299
|
return ret
|
|
298
300
|
|
|
301
|
+
responses_tool_choice = OpenAIResponsesConverter.convert_tool_choice(
|
|
302
|
+
model_settings.tool_choice
|
|
303
|
+
)
|
|
304
|
+
if responses_tool_choice is None or responses_tool_choice == NOT_GIVEN:
|
|
305
|
+
# For Responses API data compatibility with Chat Completions patterns,
|
|
306
|
+
# we need to set "none" if tool_choice is absent.
|
|
307
|
+
# Without this fix, you'll get the following error:
|
|
308
|
+
# pydantic_core._pydantic_core.ValidationError: 4 validation errors for Response
|
|
309
|
+
# tool_choice.literal['none','auto','required']
|
|
310
|
+
# Input should be 'none', 'auto' or 'required'
|
|
311
|
+
# [type=literal_error, input_value=NOT_GIVEN, input_type=NotGiven]
|
|
312
|
+
# see also: https://github.com/openai/openai-agents-python/issues/980
|
|
313
|
+
responses_tool_choice = "auto"
|
|
314
|
+
|
|
299
315
|
response = Response(
|
|
300
316
|
id=FAKE_RESPONSES_ID,
|
|
301
317
|
created_at=time.time(),
|
|
302
318
|
model=self.model,
|
|
303
319
|
object="response",
|
|
304
320
|
output=[],
|
|
305
|
-
tool_choice=
|
|
306
|
-
if tool_choice != NOT_GIVEN
|
|
307
|
-
else "auto",
|
|
321
|
+
tool_choice=responses_tool_choice, # type: ignore[arg-type]
|
|
308
322
|
top_p=model_settings.top_p,
|
|
309
323
|
temperature=model_settings.temperature,
|
|
310
324
|
tools=[],
|
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import json
|
|
4
4
|
from collections.abc import AsyncIterator
|
|
5
5
|
from dataclasses import dataclass
|
|
6
|
-
from typing import TYPE_CHECKING, Any, Literal, overload
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Literal, cast, overload
|
|
7
7
|
|
|
8
8
|
from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven
|
|
9
9
|
from openai.types import ChatModel
|
|
@@ -247,9 +247,12 @@ class OpenAIResponsesModel(Model):
|
|
|
247
247
|
converted_tools = Converter.convert_tools(tools, handoffs)
|
|
248
248
|
response_format = Converter.get_response_format(output_schema)
|
|
249
249
|
|
|
250
|
-
|
|
250
|
+
include_set: set[str] = set(converted_tools.includes)
|
|
251
251
|
if model_settings.response_include is not None:
|
|
252
|
-
|
|
252
|
+
include_set.update(model_settings.response_include)
|
|
253
|
+
if model_settings.top_logprobs is not None:
|
|
254
|
+
include_set.add("message.output_text.logprobs")
|
|
255
|
+
include = cast(list[ResponseIncludable], list(include_set))
|
|
253
256
|
|
|
254
257
|
if _debug.DONT_LOG_MODEL_DATA:
|
|
255
258
|
logger.debug("Calling LLM")
|
|
@@ -264,6 +267,10 @@ class OpenAIResponsesModel(Model):
|
|
|
264
267
|
f"Previous response id: {previous_response_id}\n"
|
|
265
268
|
)
|
|
266
269
|
|
|
270
|
+
extra_args = dict(model_settings.extra_args or {})
|
|
271
|
+
if model_settings.top_logprobs is not None:
|
|
272
|
+
extra_args["top_logprobs"] = model_settings.top_logprobs
|
|
273
|
+
|
|
267
274
|
return await self._client.responses.create(
|
|
268
275
|
previous_response_id=self._non_null_or_not_given(previous_response_id),
|
|
269
276
|
instructions=self._non_null_or_not_given(system_instructions),
|
|
@@ -286,7 +293,7 @@ class OpenAIResponsesModel(Model):
|
|
|
286
293
|
store=self._non_null_or_not_given(model_settings.store),
|
|
287
294
|
reasoning=self._non_null_or_not_given(model_settings.reasoning),
|
|
288
295
|
metadata=self._non_null_or_not_given(model_settings.metadata),
|
|
289
|
-
**
|
|
296
|
+
**extra_args,
|
|
290
297
|
)
|
|
291
298
|
|
|
292
299
|
def _get_client(self) -> AsyncOpenAI:
|
agents/realtime/agent.py
CHANGED
|
@@ -7,6 +7,7 @@ from dataclasses import dataclass, field
|
|
|
7
7
|
from typing import Any, Callable, Generic, cast
|
|
8
8
|
|
|
9
9
|
from ..agent import AgentBase
|
|
10
|
+
from ..guardrail import OutputGuardrail
|
|
10
11
|
from ..handoffs import Handoff
|
|
11
12
|
from ..lifecycle import AgentHooksBase, RunHooksBase
|
|
12
13
|
from ..logger import logger
|
|
@@ -62,6 +63,11 @@ class RealtimeAgent(AgentBase, Generic[TContext]):
|
|
|
62
63
|
modularity.
|
|
63
64
|
"""
|
|
64
65
|
|
|
66
|
+
output_guardrails: list[OutputGuardrail[TContext]] = field(default_factory=list)
|
|
67
|
+
"""A list of checks that run on the final output of the agent, after generating a response.
|
|
68
|
+
Runs only if the agent produces a final output.
|
|
69
|
+
"""
|
|
70
|
+
|
|
65
71
|
hooks: RealtimeAgentHooks | None = None
|
|
66
72
|
"""A class that receives callbacks on various lifecycle events for this agent.
|
|
67
73
|
"""
|
agents/realtime/config.py
CHANGED
|
@@ -94,6 +94,9 @@ class RealtimeSessionModelSettings(TypedDict):
|
|
|
94
94
|
voice: NotRequired[str]
|
|
95
95
|
"""The voice to use for audio output."""
|
|
96
96
|
|
|
97
|
+
speed: NotRequired[float]
|
|
98
|
+
"""The speed of the model's responses."""
|
|
99
|
+
|
|
97
100
|
input_audio_format: NotRequired[RealtimeAudioFormat]
|
|
98
101
|
"""The format for input audio streams."""
|
|
99
102
|
|
|
@@ -150,7 +150,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
|
|
|
150
150
|
|
|
151
151
|
model_settings: RealtimeSessionModelSettings = options.get("initial_model_settings", {})
|
|
152
152
|
|
|
153
|
-
self._playback_tracker = options.get("playback_tracker",
|
|
153
|
+
self._playback_tracker = options.get("playback_tracker", None)
|
|
154
154
|
|
|
155
155
|
self.model = model_settings.get("model_name", self.model)
|
|
156
156
|
api_key = await get_api_key(options.get("api_key"))
|
|
@@ -226,7 +226,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
|
|
|
226
226
|
|
|
227
227
|
except websockets.exceptions.ConnectionClosedOK:
|
|
228
228
|
# Normal connection closure - no exception event needed
|
|
229
|
-
logger.
|
|
229
|
+
logger.debug("WebSocket connection closed normally")
|
|
230
230
|
except websockets.exceptions.ConnectionClosed as e:
|
|
231
231
|
await self._emit_event(
|
|
232
232
|
RealtimeModelExceptionEvent(
|
|
@@ -329,7 +329,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
|
|
|
329
329
|
current_item_content_index = playback_state.get("current_item_content_index")
|
|
330
330
|
elapsed_ms = playback_state.get("elapsed_ms")
|
|
331
331
|
if current_item_id is None or elapsed_ms is None:
|
|
332
|
-
logger.
|
|
332
|
+
logger.debug(
|
|
333
333
|
"Skipping interrupt. "
|
|
334
334
|
f"Item id: {current_item_id}, "
|
|
335
335
|
f"elapsed ms: {elapsed_ms}, "
|
|
@@ -351,6 +351,13 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
|
|
|
351
351
|
int(elapsed_ms),
|
|
352
352
|
)
|
|
353
353
|
await self._send_raw_message(converted)
|
|
354
|
+
else:
|
|
355
|
+
logger.debug(
|
|
356
|
+
"Didn't interrupt bc elapsed ms is < 0. "
|
|
357
|
+
f"Item id: {current_item_id}, "
|
|
358
|
+
f"elapsed ms: {elapsed_ms}, "
|
|
359
|
+
f"content index: {current_item_content_index}"
|
|
360
|
+
)
|
|
354
361
|
|
|
355
362
|
automatic_response_cancellation_enabled = (
|
|
356
363
|
self._created_session
|
|
@@ -569,6 +576,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
|
|
|
569
576
|
or DEFAULT_MODEL_SETTINGS.get("model_name")
|
|
570
577
|
),
|
|
571
578
|
voice=model_settings.get("voice", DEFAULT_MODEL_SETTINGS.get("voice")),
|
|
579
|
+
speed=model_settings.get("speed", None),
|
|
572
580
|
modalities=model_settings.get("modalities", DEFAULT_MODEL_SETTINGS.get("modalities")),
|
|
573
581
|
input_audio_format=model_settings.get(
|
|
574
582
|
"input_audio_format",
|
agents/realtime/session.py
CHANGED
|
@@ -180,6 +180,19 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
180
180
|
"""Interrupt the model."""
|
|
181
181
|
await self._model.send_event(RealtimeModelSendInterrupt())
|
|
182
182
|
|
|
183
|
+
async def update_agent(self, agent: RealtimeAgent) -> None:
|
|
184
|
+
"""Update the active agent for this session and apply its settings to the model."""
|
|
185
|
+
self._current_agent = agent
|
|
186
|
+
|
|
187
|
+
updated_settings = await self._get_updated_model_settings_from_agent(
|
|
188
|
+
starting_settings=None,
|
|
189
|
+
agent=self._current_agent,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
await self._model.send_event(
|
|
193
|
+
RealtimeModelSendSessionUpdate(session_settings=updated_settings)
|
|
194
|
+
)
|
|
195
|
+
|
|
183
196
|
async def on_event(self, event: RealtimeModelEvent) -> None:
|
|
184
197
|
await self._put_event(RealtimeRawModelEvent(data=event, info=self._event_info))
|
|
185
198
|
|
|
@@ -361,19 +374,20 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
361
374
|
)
|
|
362
375
|
)
|
|
363
376
|
|
|
364
|
-
#
|
|
377
|
+
# First, send the session update so the model receives the new instructions
|
|
378
|
+
await self._model.send_event(
|
|
379
|
+
RealtimeModelSendSessionUpdate(session_settings=updated_settings)
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
# Then send tool output to complete the handoff (this triggers a new response)
|
|
383
|
+
transfer_message = handoff.get_transfer_message(result)
|
|
365
384
|
await self._model.send_event(
|
|
366
385
|
RealtimeModelSendToolOutput(
|
|
367
386
|
tool_call=event,
|
|
368
|
-
output=
|
|
387
|
+
output=transfer_message,
|
|
369
388
|
start_response=True,
|
|
370
389
|
)
|
|
371
390
|
)
|
|
372
|
-
|
|
373
|
-
# Send session update to model
|
|
374
|
-
await self._model.send_event(
|
|
375
|
-
RealtimeModelSendSessionUpdate(session_settings=updated_settings)
|
|
376
|
-
)
|
|
377
391
|
else:
|
|
378
392
|
raise ModelBehaviorError(f"Tool {event.name} not found")
|
|
379
393
|
|
|
@@ -430,7 +444,17 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
430
444
|
|
|
431
445
|
async def _run_output_guardrails(self, text: str) -> bool:
|
|
432
446
|
"""Run output guardrails on the given text. Returns True if any guardrail was triggered."""
|
|
433
|
-
|
|
447
|
+
combined_guardrails = self._current_agent.output_guardrails + self._run_config.get(
|
|
448
|
+
"output_guardrails", []
|
|
449
|
+
)
|
|
450
|
+
seen_ids: set[int] = set()
|
|
451
|
+
output_guardrails = []
|
|
452
|
+
for guardrail in combined_guardrails:
|
|
453
|
+
guardrail_id = id(guardrail)
|
|
454
|
+
if guardrail_id not in seen_ids:
|
|
455
|
+
output_guardrails.append(guardrail)
|
|
456
|
+
seen_ids.add(guardrail_id)
|
|
457
|
+
|
|
434
458
|
if not output_guardrails or self._interrupted_by_guardrail:
|
|
435
459
|
return False
|
|
436
460
|
|
agents/run.py
CHANGED
|
@@ -904,10 +904,9 @@ class AgentRunner:
|
|
|
904
904
|
raise ModelBehaviorError("Model did not produce a final response!")
|
|
905
905
|
|
|
906
906
|
# 3. Now, we can process the turn as we do in the non-streaming case
|
|
907
|
-
|
|
907
|
+
return await cls._get_single_step_result_from_streamed_response(
|
|
908
908
|
agent=agent,
|
|
909
|
-
|
|
910
|
-
pre_step_items=streamed_result.new_items,
|
|
909
|
+
streamed_result=streamed_result,
|
|
911
910
|
new_response=final_response,
|
|
912
911
|
output_schema=output_schema,
|
|
913
912
|
all_tools=all_tools,
|
|
@@ -918,9 +917,6 @@ class AgentRunner:
|
|
|
918
917
|
tool_use_tracker=tool_use_tracker,
|
|
919
918
|
)
|
|
920
919
|
|
|
921
|
-
RunImpl.stream_step_result_to_queue(single_step_result, streamed_result._event_queue)
|
|
922
|
-
return single_step_result
|
|
923
|
-
|
|
924
920
|
@classmethod
|
|
925
921
|
async def _run_single_turn(
|
|
926
922
|
cls,
|
|
@@ -1023,6 +1019,57 @@ class AgentRunner:
|
|
|
1023
1019
|
run_config=run_config,
|
|
1024
1020
|
)
|
|
1025
1021
|
|
|
1022
|
+
@classmethod
|
|
1023
|
+
async def _get_single_step_result_from_streamed_response(
|
|
1024
|
+
cls,
|
|
1025
|
+
*,
|
|
1026
|
+
agent: Agent[TContext],
|
|
1027
|
+
all_tools: list[Tool],
|
|
1028
|
+
streamed_result: RunResultStreaming,
|
|
1029
|
+
new_response: ModelResponse,
|
|
1030
|
+
output_schema: AgentOutputSchemaBase | None,
|
|
1031
|
+
handoffs: list[Handoff],
|
|
1032
|
+
hooks: RunHooks[TContext],
|
|
1033
|
+
context_wrapper: RunContextWrapper[TContext],
|
|
1034
|
+
run_config: RunConfig,
|
|
1035
|
+
tool_use_tracker: AgentToolUseTracker,
|
|
1036
|
+
) -> SingleStepResult:
|
|
1037
|
+
|
|
1038
|
+
original_input = streamed_result.input
|
|
1039
|
+
pre_step_items = streamed_result.new_items
|
|
1040
|
+
event_queue = streamed_result._event_queue
|
|
1041
|
+
|
|
1042
|
+
processed_response = RunImpl.process_model_response(
|
|
1043
|
+
agent=agent,
|
|
1044
|
+
all_tools=all_tools,
|
|
1045
|
+
response=new_response,
|
|
1046
|
+
output_schema=output_schema,
|
|
1047
|
+
handoffs=handoffs,
|
|
1048
|
+
)
|
|
1049
|
+
new_items_processed_response = processed_response.new_items
|
|
1050
|
+
tool_use_tracker.add_tool_use(agent, processed_response.tools_used)
|
|
1051
|
+
RunImpl.stream_step_items_to_queue(new_items_processed_response, event_queue)
|
|
1052
|
+
|
|
1053
|
+
single_step_result = await RunImpl.execute_tools_and_side_effects(
|
|
1054
|
+
agent=agent,
|
|
1055
|
+
original_input=original_input,
|
|
1056
|
+
pre_step_items=pre_step_items,
|
|
1057
|
+
new_response=new_response,
|
|
1058
|
+
processed_response=processed_response,
|
|
1059
|
+
output_schema=output_schema,
|
|
1060
|
+
hooks=hooks,
|
|
1061
|
+
context_wrapper=context_wrapper,
|
|
1062
|
+
run_config=run_config,
|
|
1063
|
+
)
|
|
1064
|
+
new_step_items = [
|
|
1065
|
+
item
|
|
1066
|
+
for item in single_step_result.new_step_items
|
|
1067
|
+
if item not in new_items_processed_response
|
|
1068
|
+
]
|
|
1069
|
+
RunImpl.stream_step_items_to_queue(new_step_items, event_queue)
|
|
1070
|
+
|
|
1071
|
+
return single_step_result
|
|
1072
|
+
|
|
1026
1073
|
@classmethod
|
|
1027
1074
|
async def _run_input_guardrails(
|
|
1028
1075
|
cls,
|
agents/tracing/processors.py
CHANGED
|
@@ -69,9 +69,12 @@ class BackendSpanExporter(TracingExporter):
|
|
|
69
69
|
api_key: The OpenAI API key to use. This is the same key used by the OpenAI Python
|
|
70
70
|
client.
|
|
71
71
|
"""
|
|
72
|
-
#
|
|
72
|
+
# Clear the cached property if it exists
|
|
73
|
+
if 'api_key' in self.__dict__:
|
|
74
|
+
del self.__dict__['api_key']
|
|
75
|
+
|
|
76
|
+
# Update the private attribute
|
|
73
77
|
self._api_key = api_key
|
|
74
|
-
self.api_key = api_key
|
|
75
78
|
|
|
76
79
|
@cached_property
|
|
77
80
|
def api_key(self):
|
agents/tracing/provider.py
CHANGED
|
@@ -43,28 +43,40 @@ class SynchronousMultiTracingProcessor(TracingProcessor):
|
|
|
43
43
|
Called when a trace is started.
|
|
44
44
|
"""
|
|
45
45
|
for processor in self._processors:
|
|
46
|
-
|
|
46
|
+
try:
|
|
47
|
+
processor.on_trace_start(trace)
|
|
48
|
+
except Exception as e:
|
|
49
|
+
logger.error(f"Error in trace processor {processor} during on_trace_start: {e}")
|
|
47
50
|
|
|
48
51
|
def on_trace_end(self, trace: Trace) -> None:
|
|
49
52
|
"""
|
|
50
53
|
Called when a trace is finished.
|
|
51
54
|
"""
|
|
52
55
|
for processor in self._processors:
|
|
53
|
-
|
|
56
|
+
try:
|
|
57
|
+
processor.on_trace_end(trace)
|
|
58
|
+
except Exception as e:
|
|
59
|
+
logger.error(f"Error in trace processor {processor} during on_trace_end: {e}")
|
|
54
60
|
|
|
55
61
|
def on_span_start(self, span: Span[Any]) -> None:
|
|
56
62
|
"""
|
|
57
63
|
Called when a span is started.
|
|
58
64
|
"""
|
|
59
65
|
for processor in self._processors:
|
|
60
|
-
|
|
66
|
+
try:
|
|
67
|
+
processor.on_span_start(span)
|
|
68
|
+
except Exception as e:
|
|
69
|
+
logger.error(f"Error in trace processor {processor} during on_span_start: {e}")
|
|
61
70
|
|
|
62
71
|
def on_span_end(self, span: Span[Any]) -> None:
|
|
63
72
|
"""
|
|
64
73
|
Called when a span is finished.
|
|
65
74
|
"""
|
|
66
75
|
for processor in self._processors:
|
|
67
|
-
|
|
76
|
+
try:
|
|
77
|
+
processor.on_span_end(span)
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.error(f"Error in trace processor {processor} during on_span_end: {e}")
|
|
68
80
|
|
|
69
81
|
def shutdown(self) -> None:
|
|
70
82
|
"""
|
|
@@ -72,14 +84,20 @@ class SynchronousMultiTracingProcessor(TracingProcessor):
|
|
|
72
84
|
"""
|
|
73
85
|
for processor in self._processors:
|
|
74
86
|
logger.debug(f"Shutting down trace processor {processor}")
|
|
75
|
-
|
|
87
|
+
try:
|
|
88
|
+
processor.shutdown()
|
|
89
|
+
except Exception as e:
|
|
90
|
+
logger.error(f"Error shutting down trace processor {processor}: {e}")
|
|
76
91
|
|
|
77
92
|
def force_flush(self):
|
|
78
93
|
"""
|
|
79
94
|
Force the processors to flush their buffers.
|
|
80
95
|
"""
|
|
81
96
|
for processor in self._processors:
|
|
82
|
-
|
|
97
|
+
try:
|
|
98
|
+
processor.force_flush()
|
|
99
|
+
except Exception as e:
|
|
100
|
+
logger.error(f"Error flushing trace processor {processor}: {e}")
|
|
83
101
|
|
|
84
102
|
|
|
85
103
|
class TraceProvider(ABC):
|
|
@@ -247,7 +265,7 @@ class DefaultTraceProvider(TraceProvider):
|
|
|
247
265
|
current_trace = Scope.get_current_trace()
|
|
248
266
|
if current_trace is None:
|
|
249
267
|
logger.error(
|
|
250
|
-
"No active trace. Make sure to start a trace with `trace()` first"
|
|
268
|
+
"No active trace. Make sure to start a trace with `trace()` first "
|
|
251
269
|
"Returning NoOpSpan."
|
|
252
270
|
)
|
|
253
271
|
return NoOpSpan(span_data)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.6
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://openai.github.io/openai-agents-python/
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -21,7 +21,7 @@ Classifier: Typing :: Typed
|
|
|
21
21
|
Requires-Python: >=3.9
|
|
22
22
|
Requires-Dist: griffe<2,>=1.5.6
|
|
23
23
|
Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
|
|
24
|
-
Requires-Dist: openai<2,>=1.
|
|
24
|
+
Requires-Dist: openai<2,>=1.99.6
|
|
25
25
|
Requires-Dist: pydantic<3,>=2.10
|
|
26
26
|
Requires-Dist: requests<3,>=2.0
|
|
27
27
|
Requires-Dist: types-requests<3,>=2.0
|
|
@@ -196,6 +196,10 @@ The Agents SDK is designed to be highly flexible, allowing you to model a wide r
|
|
|
196
196
|
|
|
197
197
|
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration), and [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing), which also includes a larger list of [external tracing processors](http://openai.github.io/openai-agents-python/tracing/#external-tracing-processors-list).
|
|
198
198
|
|
|
199
|
+
## Long running agents & human-in-the-loop
|
|
200
|
+
|
|
201
|
+
You can use the Agents SDK [Temporal](https://temporal.io/) integration to run durable, long-running workflows, including human-in-the-loop tasks. View a demo of Temporal and the Agents SDK working in action to complete long-running tasks [in this video](https://www.youtube.com/watch?v=fFBZqzT4DD8), and [view docs here](https://github.com/temporalio/sdk-python/tree/main/temporalio/contrib/openai_agents).
|
|
202
|
+
|
|
199
203
|
## Sessions
|
|
200
204
|
|
|
201
205
|
The Agents SDK provides built-in session memory to automatically maintain conversation history across multiple agent runs, eliminating the need to manually handle `.to_input_list()` between turns.
|
|
@@ -1,23 +1,23 @@
|
|
|
1
|
-
agents/__init__.py,sha256=
|
|
1
|
+
agents/__init__.py,sha256=YXcfllpLrUjafU_5KwIZvVEdUzcjZYhatqCS5tb03UQ,7908
|
|
2
2
|
agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
|
|
3
3
|
agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
|
|
4
|
-
agents/_run_impl.py,sha256=
|
|
5
|
-
agents/agent.py,sha256=
|
|
4
|
+
agents/_run_impl.py,sha256=8Bc8YIHzv8Qf40tUAcHV5qqUkGSUxSraNkV0Y5xLFFQ,44894
|
|
5
|
+
agents/agent.py,sha256=zBhC_bL5WuAmXAHJTj_ZgN5Nxj8jq8vZspdX8B0do38,12648
|
|
6
6
|
agents/agent_output.py,sha256=teTFK8unUN3esXhmEBO0bQGYQm1Axd5rYleDt9TFDgw,7153
|
|
7
7
|
agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
|
|
8
8
|
agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
|
|
9
9
|
agents/function_schema.py,sha256=yZ3PEOmfy836Me_W4QlItMeFq2j4BtpuI2FmQswbIcQ,13590
|
|
10
10
|
agents/guardrail.py,sha256=7P-kd9rKPhgB8rtI31MCV5ho4ZrEaNCQxHvE8IK3EOk,9582
|
|
11
|
-
agents/handoffs.py,sha256=
|
|
12
|
-
agents/items.py,sha256=
|
|
13
|
-
agents/lifecycle.py,sha256=
|
|
11
|
+
agents/handoffs.py,sha256=31-rQ-iMWlWNd93ivgTTSMGkqlariXrNfWI_udMWt7s,11409
|
|
12
|
+
agents/items.py,sha256=ntrJ-HuqSMC8HtIwS9pcqHYXtiQ2TJB6lHR-bcvNn4c,9848
|
|
13
|
+
agents/lifecycle.py,sha256=sJwESHBHbml7rSYH360-P6x1bLyENcQWm4bT4rQcbuo,3129
|
|
14
14
|
agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
|
|
15
|
-
agents/model_settings.py,sha256=
|
|
15
|
+
agents/model_settings.py,sha256=SKCrfV5A7u0zaY8fh2PZRe08W5sEhArHC3YHpEfeip0,6357
|
|
16
16
|
agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
|
|
17
17
|
agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
18
18
|
agents/repl.py,sha256=FKZlkGfw6QxItTkjFkCAQwXuV_pn69DIamGd3PiKQFk,2361
|
|
19
19
|
agents/result.py,sha256=YCGYHoc5X1_vLKu5QiK6F8C1ZXI3tTfLXaZoqbYgUMA,10753
|
|
20
|
-
agents/run.py,sha256=
|
|
20
|
+
agents/run.py,sha256=Q0UcLVjlmWjpEvXpWm-0obDU5Gu5T9eJ7xW29wW-QEA,52453
|
|
21
21
|
agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
|
|
22
22
|
agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
|
|
23
23
|
agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
|
|
@@ -26,49 +26,49 @@ agents/tool_context.py,sha256=lbnctijZeanXAThddkklF7vDrXK1Ie2_wx6JZPCOihI,1434
|
|
|
26
26
|
agents/usage.py,sha256=Tb5udGd3DPgD0JBdRD8fDctTE4M-zKML5uRn8ZG1yBc,1675
|
|
27
27
|
agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
|
|
28
28
|
agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
29
|
-
agents/extensions/handoff_filters.py,sha256=
|
|
29
|
+
agents/extensions/handoff_filters.py,sha256=Bzkjb1SmIHoibgO26oesNO2Qdx2avfDGkHrSTb-XAr0,2029
|
|
30
30
|
agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
|
|
31
|
-
agents/extensions/visualization.py,sha256=
|
|
31
|
+
agents/extensions/visualization.py,sha256=sf9D_C-HMwkbWdZccTZvvMPRy_NSiwbm48tRJlESQBI,5144
|
|
32
32
|
agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
33
|
-
agents/extensions/models/litellm_model.py,sha256=
|
|
33
|
+
agents/extensions/models/litellm_model.py,sha256=4m6MVYaa-pJzXuBNRZGv0vw2R73R32B0EAZ1kXanVVw,15692
|
|
34
34
|
agents/extensions/models/litellm_provider.py,sha256=wTm00Anq8YoNb9AnyT0JOunDG-HCDm_98ORNy7aNJdw,928
|
|
35
35
|
agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
|
|
36
36
|
agents/mcp/server.py,sha256=mTXQL4om5oA2fYevk63SUlwDri-RcUleUH_4hFrA0QM,24266
|
|
37
|
-
agents/mcp/util.py,sha256=
|
|
37
|
+
agents/mcp/util.py,sha256=YVdPst1wWkTwbeshs-FYbr_MtrYJwO_4NzhSwj5aE5c,8239
|
|
38
38
|
agents/memory/__init__.py,sha256=bo2Rb3PqwSCo9PhBVVJOjvjMM1TfytuDPAFEDADYwwA,84
|
|
39
39
|
agents/memory/session.py,sha256=9RQ1I7qGh_9DzsyUd9srSPrxRBlw7jks-67NxYqKvvs,13060
|
|
40
40
|
agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
41
41
|
agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
|
|
42
|
-
agents/models/chatcmpl_converter.py,sha256=
|
|
42
|
+
agents/models/chatcmpl_converter.py,sha256=fdA-4_O7GabTCFZJOrtI6TdxFvjS4Bn4vf2RwVC9yNA,20012
|
|
43
43
|
agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
|
|
44
44
|
agents/models/chatcmpl_stream_handler.py,sha256=XUoMnNEcSqK6IRMI6GPH8CwMCXi6NhbfHfpCY3SXJOM,24124
|
|
45
45
|
agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
|
|
46
46
|
agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
|
|
47
47
|
agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
|
|
48
|
-
agents/models/openai_chatcompletions.py,sha256=
|
|
48
|
+
agents/models/openai_chatcompletions.py,sha256=N_8U_rKRhB1pgMju29bOok1QFWF_EL7JoatlKzy7hLY,13102
|
|
49
49
|
agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
|
|
50
|
-
agents/models/openai_responses.py,sha256=
|
|
50
|
+
agents/models/openai_responses.py,sha256=pBAHIwz_kq561bmzqMwz6L4dFd_R4V5C7R21xLBM__o,17048
|
|
51
51
|
agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
|
|
52
52
|
agents/realtime/__init__.py,sha256=7qvzK8QJuHRnPHxDgDj21v8-lnSN4Uurg9znwJv_Tqg,4923
|
|
53
53
|
agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfPP54d78,1765
|
|
54
54
|
agents/realtime/_util.py,sha256=uawurhWKi3_twNFcZ5Yn1mVvv0RKl4IoyCSag8hGxrE,313
|
|
55
|
-
agents/realtime/agent.py,sha256=
|
|
56
|
-
agents/realtime/config.py,sha256=
|
|
55
|
+
agents/realtime/agent.py,sha256=yZDgycnLFtJcfl7UHak5GEyL2vdBGxegfqEiuuzGPEk,4027
|
|
56
|
+
agents/realtime/config.py,sha256=FMLT2BdxjOCHmBnvd35sZk68U4jEXypngMRAPkm-irk,5828
|
|
57
57
|
agents/realtime/events.py,sha256=YnyXmkc2rkIAcCDoW5yxylMYeXeaq_QTlyRR5u5VsaM,5534
|
|
58
58
|
agents/realtime/handoffs.py,sha256=avLFix5kEutel57IRcddssGiVHzGptOzWL9OqPaLVh8,6702
|
|
59
59
|
agents/realtime/items.py,sha256=psT6AH65qmngmPsgwk6CXacVo5tEDYq0Za3EitHFpTA,5052
|
|
60
60
|
agents/realtime/model.py,sha256=RJBA8-Dkd2JTqGzbKacoX4dN_qTWn_p7npL73To3ymw,6143
|
|
61
61
|
agents/realtime/model_events.py,sha256=X7UrUU_g4u5gWaf2mUesJJ-Ik1Z1QE0Z-ZP7kDmX1t0,4034
|
|
62
62
|
agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
|
|
63
|
-
agents/realtime/openai_realtime.py,sha256=
|
|
63
|
+
agents/realtime/openai_realtime.py,sha256=vgzgklFcRpB9ZfsDda7DtXlBn3NF6bZdysta1DwQhrM,30120
|
|
64
64
|
agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
|
|
65
|
-
agents/realtime/session.py,sha256=
|
|
65
|
+
agents/realtime/session.py,sha256=yMHFhqhBKDHzlK-k6JTuqXKggMPW3dPt-aavqDoKsec,23375
|
|
66
66
|
agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
|
|
67
67
|
agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
|
|
68
68
|
agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
|
|
69
69
|
agents/tracing/processor_interface.py,sha256=e1mWcIAoQFHID1BapcrAZ6MxZg98bPVYgbOPclVoCXc,1660
|
|
70
|
-
agents/tracing/processors.py,sha256=
|
|
71
|
-
agents/tracing/provider.py,sha256=
|
|
70
|
+
agents/tracing/processors.py,sha256=IKZ_dfQmcs8OaMqNbzWRtimY4nm1xfNRjVguWl6I8SY,11432
|
|
71
|
+
agents/tracing/provider.py,sha256=a8bOZtBUih13Gjq8OtyIcx3AWJmCErc43gqPrccx_5k,10098
|
|
72
72
|
agents/tracing/scope.py,sha256=u17_m8RPpGvbHrTkaO_kDi5ROBWhfOAIgBe7suiaRD4,1445
|
|
73
73
|
agents/tracing/setup.py,sha256=2h9TH1GAKcXKM1U99dOKKR3XlHp8JKzh2JG3DQPKyhY,612
|
|
74
74
|
agents/tracing/span_data.py,sha256=nI2Fbu1ORE8ybE6m6RuddTJF5E5xFmEj8Mq5bSFv4bE,9017
|
|
@@ -97,7 +97,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
|
97
97
|
agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
|
|
98
98
|
agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
|
|
99
99
|
agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
|
|
100
|
-
openai_agents-0.2.
|
|
101
|
-
openai_agents-0.2.
|
|
102
|
-
openai_agents-0.2.
|
|
103
|
-
openai_agents-0.2.
|
|
100
|
+
openai_agents-0.2.6.dist-info/METADATA,sha256=E_Fnl2A-qaNEFT07zAH1lx7zIj-XVh7Wli5P5NhfjR0,12104
|
|
101
|
+
openai_agents-0.2.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
102
|
+
openai_agents-0.2.6.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
|
|
103
|
+
openai_agents-0.2.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|