openai-agents 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (53) hide show
  1. agents/__init__.py +223 -0
  2. agents/_config.py +23 -0
  3. agents/_debug.py +17 -0
  4. agents/_run_impl.py +792 -0
  5. agents/_utils.py +61 -0
  6. agents/agent.py +159 -0
  7. agents/agent_output.py +144 -0
  8. agents/computer.py +107 -0
  9. agents/exceptions.py +63 -0
  10. agents/extensions/handoff_filters.py +67 -0
  11. agents/extensions/handoff_prompt.py +19 -0
  12. agents/function_schema.py +340 -0
  13. agents/guardrail.py +320 -0
  14. agents/handoffs.py +236 -0
  15. agents/items.py +246 -0
  16. agents/lifecycle.py +105 -0
  17. agents/logger.py +3 -0
  18. agents/model_settings.py +36 -0
  19. agents/models/__init__.py +0 -0
  20. agents/models/_openai_shared.py +34 -0
  21. agents/models/fake_id.py +5 -0
  22. agents/models/interface.py +107 -0
  23. agents/models/openai_chatcompletions.py +952 -0
  24. agents/models/openai_provider.py +65 -0
  25. agents/models/openai_responses.py +384 -0
  26. agents/result.py +220 -0
  27. agents/run.py +904 -0
  28. agents/run_context.py +26 -0
  29. agents/stream_events.py +58 -0
  30. agents/strict_schema.py +167 -0
  31. agents/tool.py +288 -0
  32. agents/tracing/__init__.py +97 -0
  33. agents/tracing/create.py +306 -0
  34. agents/tracing/logger.py +3 -0
  35. agents/tracing/processor_interface.py +69 -0
  36. agents/tracing/processors.py +261 -0
  37. agents/tracing/scope.py +45 -0
  38. agents/tracing/setup.py +211 -0
  39. agents/tracing/span_data.py +188 -0
  40. agents/tracing/spans.py +264 -0
  41. agents/tracing/traces.py +195 -0
  42. agents/tracing/util.py +17 -0
  43. agents/usage.py +22 -0
  44. agents/version.py +7 -0
  45. openai_agents-0.0.3.dist-info/METADATA +204 -0
  46. openai_agents-0.0.3.dist-info/RECORD +49 -0
  47. openai_agents-0.0.3.dist-info/licenses/LICENSE +21 -0
  48. openai-agents/example.py +0 -2
  49. openai_agents-0.0.1.dist-info/METADATA +0 -17
  50. openai_agents-0.0.1.dist-info/RECORD +0 -6
  51. openai_agents-0.0.1.dist-info/licenses/LICENSE +0 -20
  52. {openai-agents → agents/extensions}/__init__.py +0 -0
  53. {openai_agents-0.0.1.dist-info → openai_agents-0.0.3.dist-info}/WHEEL +0 -0
agents/handoffs.py ADDED
@@ -0,0 +1,236 @@
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ from collections.abc import Awaitable
5
+ from dataclasses import dataclass
6
+ from typing import TYPE_CHECKING, Any, Callable, Generic, cast, overload
7
+
8
+ from pydantic import TypeAdapter
9
+ from typing_extensions import TypeAlias, TypeVar
10
+
11
+ from . import _utils
12
+ from .exceptions import ModelBehaviorError, UserError
13
+ from .items import RunItem, TResponseInputItem
14
+ from .run_context import RunContextWrapper, TContext
15
+ from .strict_schema import ensure_strict_json_schema
16
+ from .tracing.spans import SpanError
17
+
18
+ if TYPE_CHECKING:
19
+ from .agent import Agent
20
+
21
+
22
+ # The handoff input type is the type of data passed when the agent is called via a handoff.
23
+ THandoffInput = TypeVar("THandoffInput", default=Any)
24
+
25
+ OnHandoffWithInput = Callable[[RunContextWrapper[Any], THandoffInput], Any]
26
+ OnHandoffWithoutInput = Callable[[RunContextWrapper[Any]], Any]
27
+
28
+
29
+ @dataclass(frozen=True)
30
+ class HandoffInputData:
31
+ input_history: str | tuple[TResponseInputItem, ...]
32
+ """
33
+ The input history before `Runner.run()` was called.
34
+ """
35
+
36
+ pre_handoff_items: tuple[RunItem, ...]
37
+ """
38
+ The items generated before the agent turn where the handoff was invoked.
39
+ """
40
+
41
+ new_items: tuple[RunItem, ...]
42
+ """
43
+ The new items generated during the current agent turn, including the item that triggered the
44
+ handoff and the tool output message representing the response from the handoff output.
45
+ """
46
+
47
+
48
+ HandoffInputFilter: TypeAlias = Callable[[HandoffInputData], HandoffInputData]
49
+ """A function that filters the input data passed to the next agent."""
50
+
51
+
52
+ @dataclass
53
+ class Handoff(Generic[TContext]):
54
+ """A handoff is when an agent delegates a task to another agent.
55
+ For example, in a customer support scenario you might have a "triage agent" that determines
56
+ which agent should handle the user's request, and sub-agents that specialize in different
57
+ areas like billing, account management, etc.
58
+ """
59
+
60
+ tool_name: str
61
+ """The name of the tool that represents the handoff."""
62
+
63
+ tool_description: str
64
+ """The description of the tool that represents the handoff."""
65
+
66
+ input_json_schema: dict[str, Any]
67
+ """The JSON schema for the handoff input. Can be empty if the handoff does not take an input.
68
+ """
69
+
70
+ on_invoke_handoff: Callable[[RunContextWrapper[Any], str], Awaitable[Agent[TContext]]]
71
+ """The function that invokes the handoff. The parameters passed are:
72
+ 1. The handoff run context
73
+ 2. The arguments from the LLM, as a JSON string. Empty string if input_json_schema is empty.
74
+
75
+ Must return an agent.
76
+ """
77
+
78
+ agent_name: str
79
+ """The name of the agent that is being handed off to."""
80
+
81
+ input_filter: HandoffInputFilter | None = None
82
+ """A function that filters the inputs that are passed to the next agent. By default, the new
83
+ agent sees the entire conversation history. In some cases, you may want to filter inputs e.g.
84
+ to remove older inputs, or remove tools from existing inputs.
85
+
86
+ The function will receive the entire conversation history so far, including the input item
87
+ that triggered the handoff and a tool call output item representing the handoff tool's output.
88
+
89
+ You are free to modify the input history or new items as you see fit. The next agent that
90
+ runs will receive `handoff_input_data.all_items`.
91
+
92
+ IMPORTANT: in streaming mode, we will not stream anything as a result of this function. The
93
+ items generated before will already have been streamed.
94
+ """
95
+
96
+ strict_json_schema: bool = True
97
+ """Whether the input JSON schema is in strict mode. We **strongly** recommend setting this to
98
+ True, as it increases the likelihood of correct JSON input.
99
+ """
100
+
101
+ def get_transfer_message(self, agent: Agent[Any]) -> str:
102
+ base = f"{{'assistant': '{agent.name}'}}"
103
+ return base
104
+
105
+ @classmethod
106
+ def default_tool_name(cls, agent: Agent[Any]) -> str:
107
+ return _utils.transform_string_function_style(f"transfer_to_{agent.name}")
108
+
109
+ @classmethod
110
+ def default_tool_description(cls, agent: Agent[Any]) -> str:
111
+ return (
112
+ f"Handoff to the {agent.name} agent to handle the request. "
113
+ f"{agent.handoff_description or ''}"
114
+ )
115
+
116
+
117
+ @overload
118
+ def handoff(
119
+ agent: Agent[TContext],
120
+ *,
121
+ tool_name_override: str | None = None,
122
+ tool_description_override: str | None = None,
123
+ input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,
124
+ ) -> Handoff[TContext]: ...
125
+
126
+
127
+ @overload
128
+ def handoff(
129
+ agent: Agent[TContext],
130
+ *,
131
+ on_handoff: OnHandoffWithInput[THandoffInput],
132
+ input_type: type[THandoffInput],
133
+ tool_description_override: str | None = None,
134
+ tool_name_override: str | None = None,
135
+ input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,
136
+ ) -> Handoff[TContext]: ...
137
+
138
+
139
+ @overload
140
+ def handoff(
141
+ agent: Agent[TContext],
142
+ *,
143
+ on_handoff: OnHandoffWithoutInput,
144
+ tool_description_override: str | None = None,
145
+ tool_name_override: str | None = None,
146
+ input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,
147
+ ) -> Handoff[TContext]: ...
148
+
149
+
150
+ def handoff(
151
+ agent: Agent[TContext],
152
+ tool_name_override: str | None = None,
153
+ tool_description_override: str | None = None,
154
+ on_handoff: OnHandoffWithInput[THandoffInput] | OnHandoffWithoutInput | None = None,
155
+ input_type: type[THandoffInput] | None = None,
156
+ input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,
157
+ ) -> Handoff[TContext]:
158
+ """Create a handoff from an agent.
159
+
160
+ Args:
161
+ agent: The agent to handoff to, or a function that returns an agent.
162
+ tool_name_override: Optional override for the name of the tool that represents the handoff.
163
+ tool_description_override: Optional override for the description of the tool that
164
+ represents the handoff.
165
+ on_handoff: A function that runs when the handoff is invoked.
166
+ input_type: the type of the input to the handoff. If provided, the input will be validated
167
+ against this type. Only relevant if you pass a function that takes an input.
168
+ input_filter: a function that filters the inputs that are passed to the next agent.
169
+ """
170
+ assert (on_handoff and input_type) or not (on_handoff and input_type), (
171
+ "You must provide either both on_input and input_type, or neither"
172
+ )
173
+ type_adapter: TypeAdapter[Any] | None
174
+ if input_type is not None:
175
+ assert callable(on_handoff), "on_handoff must be callable"
176
+ sig = inspect.signature(on_handoff)
177
+ if len(sig.parameters) != 2:
178
+ raise UserError("on_handoff must take two arguments: context and input")
179
+
180
+ type_adapter = TypeAdapter(input_type)
181
+ input_json_schema = type_adapter.json_schema()
182
+ else:
183
+ type_adapter = None
184
+ input_json_schema = {}
185
+ if on_handoff is not None:
186
+ sig = inspect.signature(on_handoff)
187
+ if len(sig.parameters) != 1:
188
+ raise UserError("on_handoff must take one argument: context")
189
+
190
+ async def _invoke_handoff(
191
+ ctx: RunContextWrapper[Any], input_json: str | None = None
192
+ ) -> Agent[Any]:
193
+ if input_type is not None and type_adapter is not None:
194
+ if input_json is None:
195
+ _utils.attach_error_to_current_span(
196
+ SpanError(
197
+ message="Handoff function expected non-null input, but got None",
198
+ data={"details": "input_json is None"},
199
+ )
200
+ )
201
+ raise ModelBehaviorError("Handoff function expected non-null input, but got None")
202
+
203
+ validated_input = _utils.validate_json(
204
+ json_str=input_json,
205
+ type_adapter=type_adapter,
206
+ partial=False,
207
+ )
208
+ input_func = cast(OnHandoffWithInput[THandoffInput], on_handoff)
209
+ if inspect.iscoroutinefunction(input_func):
210
+ await input_func(ctx, validated_input)
211
+ else:
212
+ input_func(ctx, validated_input)
213
+ elif on_handoff is not None:
214
+ no_input_func = cast(OnHandoffWithoutInput, on_handoff)
215
+ if inspect.iscoroutinefunction(no_input_func):
216
+ await no_input_func(ctx)
217
+ else:
218
+ no_input_func(ctx)
219
+
220
+ return agent
221
+
222
+ tool_name = tool_name_override or Handoff.default_tool_name(agent)
223
+ tool_description = tool_description_override or Handoff.default_tool_description(agent)
224
+
225
+ # Always ensure the input JSON schema is in strict mode
226
+ # If there is a need, we can make this configurable in the future
227
+ input_json_schema = ensure_strict_json_schema(input_json_schema)
228
+
229
+ return Handoff(
230
+ tool_name=tool_name,
231
+ tool_description=tool_description,
232
+ input_json_schema=input_json_schema,
233
+ on_invoke_handoff=_invoke_handoff,
234
+ input_filter=input_filter,
235
+ agent_name=agent.name,
236
+ )
agents/items.py ADDED
@@ -0,0 +1,246 @@
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ import copy
5
+ from dataclasses import dataclass
6
+ from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, Union
7
+
8
+ from openai.types.responses import (
9
+ Response,
10
+ ResponseComputerToolCall,
11
+ ResponseFileSearchToolCall,
12
+ ResponseFunctionToolCall,
13
+ ResponseFunctionWebSearch,
14
+ ResponseInputItemParam,
15
+ ResponseOutputItem,
16
+ ResponseOutputMessage,
17
+ ResponseOutputRefusal,
18
+ ResponseOutputText,
19
+ ResponseStreamEvent,
20
+ )
21
+ from openai.types.responses.response_input_item_param import ComputerCallOutput, FunctionCallOutput
22
+ from openai.types.responses.response_reasoning_item import ResponseReasoningItem
23
+ from pydantic import BaseModel
24
+ from typing_extensions import TypeAlias
25
+
26
+ from .exceptions import AgentsException, ModelBehaviorError
27
+ from .usage import Usage
28
+
29
+ if TYPE_CHECKING:
30
+ from .agent import Agent
31
+
32
+ TResponse = Response
33
+ """A type alias for the Response type from the OpenAI SDK."""
34
+
35
+ TResponseInputItem = ResponseInputItemParam
36
+ """A type alias for the ResponseInputItemParam type from the OpenAI SDK."""
37
+
38
+ TResponseOutputItem = ResponseOutputItem
39
+ """A type alias for the ResponseOutputItem type from the OpenAI SDK."""
40
+
41
+ TResponseStreamEvent = ResponseStreamEvent
42
+ """A type alias for the ResponseStreamEvent type from the OpenAI SDK."""
43
+
44
+ T = TypeVar("T", bound=Union[TResponseOutputItem, TResponseInputItem])
45
+
46
+
47
+ @dataclass
48
+ class RunItemBase(Generic[T], abc.ABC):
49
+ agent: Agent[Any]
50
+ """The agent whose run caused this item to be generated."""
51
+
52
+ raw_item: T
53
+ """The raw Responses item from the run. This will always be a either an output item (i.e.
54
+ `openai.types.responses.ResponseOutputItem` or an input item
55
+ (i.e. `openai.types.responses.ResponseInputItemParam`).
56
+ """
57
+
58
+ def to_input_item(self) -> TResponseInputItem:
59
+ """Converts this item into an input item suitable for passing to the model."""
60
+ if isinstance(self.raw_item, dict):
61
+ # We know that input items are dicts, so we can ignore the type error
62
+ return self.raw_item # type: ignore
63
+ elif isinstance(self.raw_item, BaseModel):
64
+ # All output items are Pydantic models that can be converted to input items.
65
+ return self.raw_item.model_dump(exclude_unset=True) # type: ignore
66
+ else:
67
+ raise AgentsException(f"Unexpected raw item type: {type(self.raw_item)}")
68
+
69
+
70
+ @dataclass
71
+ class MessageOutputItem(RunItemBase[ResponseOutputMessage]):
72
+ """Represents a message from the LLM."""
73
+
74
+ raw_item: ResponseOutputMessage
75
+ """The raw response output message."""
76
+
77
+ type: Literal["message_output_item"] = "message_output_item"
78
+
79
+
80
+ @dataclass
81
+ class HandoffCallItem(RunItemBase[ResponseFunctionToolCall]):
82
+ """Represents a tool call for a handoff from one agent to another."""
83
+
84
+ raw_item: ResponseFunctionToolCall
85
+ """The raw response function tool call that represents the handoff."""
86
+
87
+ type: Literal["handoff_call_item"] = "handoff_call_item"
88
+
89
+
90
+ @dataclass
91
+ class HandoffOutputItem(RunItemBase[TResponseInputItem]):
92
+ """Represents the output of a handoff."""
93
+
94
+ raw_item: TResponseInputItem
95
+ """The raw input item that represents the handoff taking place."""
96
+
97
+ source_agent: Agent[Any]
98
+ """The agent that made the handoff."""
99
+
100
+ target_agent: Agent[Any]
101
+ """The agent that is being handed off to."""
102
+
103
+ type: Literal["handoff_output_item"] = "handoff_output_item"
104
+
105
+
106
+ ToolCallItemTypes: TypeAlias = Union[
107
+ ResponseFunctionToolCall,
108
+ ResponseComputerToolCall,
109
+ ResponseFileSearchToolCall,
110
+ ResponseFunctionWebSearch,
111
+ ]
112
+ """A type that represents a tool call item."""
113
+
114
+
115
+ @dataclass
116
+ class ToolCallItem(RunItemBase[ToolCallItemTypes]):
117
+ """Represents a tool call e.g. a function call or computer action call."""
118
+
119
+ raw_item: ToolCallItemTypes
120
+ """The raw tool call item."""
121
+
122
+ type: Literal["tool_call_item"] = "tool_call_item"
123
+
124
+
125
+ @dataclass
126
+ class ToolCallOutputItem(RunItemBase[Union[FunctionCallOutput, ComputerCallOutput]]):
127
+ """Represents the output of a tool call."""
128
+
129
+ raw_item: FunctionCallOutput | ComputerCallOutput
130
+ """The raw item from the model."""
131
+
132
+ output: str
133
+ """The output of the tool call."""
134
+
135
+ type: Literal["tool_call_output_item"] = "tool_call_output_item"
136
+
137
+
138
+ @dataclass
139
+ class ReasoningItem(RunItemBase[ResponseReasoningItem]):
140
+ """Represents a reasoning item."""
141
+
142
+ raw_item: ResponseReasoningItem
143
+ """The raw reasoning item."""
144
+
145
+ type: Literal["reasoning_item"] = "reasoning_item"
146
+
147
+
148
+ RunItem: TypeAlias = Union[
149
+ MessageOutputItem,
150
+ HandoffCallItem,
151
+ HandoffOutputItem,
152
+ ToolCallItem,
153
+ ToolCallOutputItem,
154
+ ReasoningItem,
155
+ ]
156
+ """An item generated by an agent."""
157
+
158
+
159
+ @dataclass
160
+ class ModelResponse:
161
+ output: list[TResponseOutputItem]
162
+ """A list of outputs (messages, tool calls, etc) generated by the model"""
163
+
164
+ usage: Usage
165
+ """The usage information for the response."""
166
+
167
+ referenceable_id: str | None
168
+ """An ID for the response which can be used to refer to the response in subsequent calls to the
169
+ model. Not supported by all model providers.
170
+ """
171
+
172
+ def to_input_items(self) -> list[TResponseInputItem]:
173
+ """Convert the output into a list of input items suitable for passing to the model."""
174
+ # We happen to know that the shape of the Pydantic output items are the same as the
175
+ # equivalent TypedDict input items, so we can just convert each one.
176
+ # This is also tested via unit tests.
177
+ return [it.model_dump(exclude_unset=True) for it in self.output] # type: ignore
178
+
179
+
180
+ class ItemHelpers:
181
+ @classmethod
182
+ def extract_last_content(cls, message: TResponseOutputItem) -> str:
183
+ """Extracts the last text content or refusal from a message."""
184
+ if not isinstance(message, ResponseOutputMessage):
185
+ return ""
186
+
187
+ last_content = message.content[-1]
188
+ if isinstance(last_content, ResponseOutputText):
189
+ return last_content.text
190
+ elif isinstance(last_content, ResponseOutputRefusal):
191
+ return last_content.refusal
192
+ else:
193
+ raise ModelBehaviorError(f"Unexpected content type: {type(last_content)}")
194
+
195
+ @classmethod
196
+ def extract_last_text(cls, message: TResponseOutputItem) -> str | None:
197
+ """Extracts the last text content from a message, if any. Ignores refusals."""
198
+ if isinstance(message, ResponseOutputMessage):
199
+ last_content = message.content[-1]
200
+ if isinstance(last_content, ResponseOutputText):
201
+ return last_content.text
202
+
203
+ return None
204
+
205
+ @classmethod
206
+ def input_to_new_input_list(
207
+ cls, input: str | list[TResponseInputItem]
208
+ ) -> list[TResponseInputItem]:
209
+ """Converts a string or list of input items into a list of input items."""
210
+ if isinstance(input, str):
211
+ return [
212
+ {
213
+ "content": input,
214
+ "role": "user",
215
+ }
216
+ ]
217
+ return copy.deepcopy(input)
218
+
219
+ @classmethod
220
+ def text_message_outputs(cls, items: list[RunItem]) -> str:
221
+ """Concatenates all the text content from a list of message output items."""
222
+ text = ""
223
+ for item in items:
224
+ if isinstance(item, MessageOutputItem):
225
+ text += cls.text_message_output(item)
226
+ return text
227
+
228
+ @classmethod
229
+ def text_message_output(cls, message: MessageOutputItem) -> str:
230
+ """Extracts all the text content from a single message output item."""
231
+ text = ""
232
+ for item in message.raw_item.content:
233
+ if isinstance(item, ResponseOutputText):
234
+ text += item.text
235
+ return text
236
+
237
+ @classmethod
238
+ def tool_call_output_item(
239
+ cls, tool_call: ResponseFunctionToolCall, output: str
240
+ ) -> FunctionCallOutput:
241
+ """Creates a tool call output item from a tool call and its output."""
242
+ return {
243
+ "call_id": tool_call.call_id,
244
+ "output": output,
245
+ "type": "function_call_output",
246
+ }
agents/lifecycle.py ADDED
@@ -0,0 +1,105 @@
1
+ from typing import Any, Generic
2
+
3
+ from .agent import Agent
4
+ from .run_context import RunContextWrapper, TContext
5
+ from .tool import Tool
6
+
7
+
8
+ class RunHooks(Generic[TContext]):
9
+ """A class that receives callbacks on various lifecycle events in an agent run. Subclass and
10
+ override the methods you need.
11
+ """
12
+
13
+ async def on_agent_start(
14
+ self, context: RunContextWrapper[TContext], agent: Agent[TContext]
15
+ ) -> None:
16
+ """Called before the agent is invoked. Called each time the current agent changes."""
17
+ pass
18
+
19
+ async def on_agent_end(
20
+ self,
21
+ context: RunContextWrapper[TContext],
22
+ agent: Agent[TContext],
23
+ output: Any,
24
+ ) -> None:
25
+ """Called when the agent produces a final output."""
26
+ pass
27
+
28
+ async def on_handoff(
29
+ self,
30
+ context: RunContextWrapper[TContext],
31
+ from_agent: Agent[TContext],
32
+ to_agent: Agent[TContext],
33
+ ) -> None:
34
+ """Called when a handoff occurs."""
35
+ pass
36
+
37
+ async def on_tool_start(
38
+ self,
39
+ context: RunContextWrapper[TContext],
40
+ agent: Agent[TContext],
41
+ tool: Tool,
42
+ ) -> None:
43
+ """Called before a tool is invoked."""
44
+ pass
45
+
46
+ async def on_tool_end(
47
+ self,
48
+ context: RunContextWrapper[TContext],
49
+ agent: Agent[TContext],
50
+ tool: Tool,
51
+ result: str,
52
+ ) -> None:
53
+ """Called after a tool is invoked."""
54
+ pass
55
+
56
+
57
+ class AgentHooks(Generic[TContext]):
58
+ """A class that receives callbacks on various lifecycle events for a specific agent. You can
59
+ set this on `agent.hooks` to receive events for that specific agent.
60
+
61
+ Subclass and override the methods you need.
62
+ """
63
+
64
+ async def on_start(self, context: RunContextWrapper[TContext], agent: Agent[TContext]) -> None:
65
+ """Called before the agent is invoked. Called each time the running agent is changed to this
66
+ agent."""
67
+ pass
68
+
69
+ async def on_end(
70
+ self,
71
+ context: RunContextWrapper[TContext],
72
+ agent: Agent[TContext],
73
+ output: Any,
74
+ ) -> None:
75
+ """Called when the agent produces a final output."""
76
+ pass
77
+
78
+ async def on_handoff(
79
+ self,
80
+ context: RunContextWrapper[TContext],
81
+ agent: Agent[TContext],
82
+ source: Agent[TContext],
83
+ ) -> None:
84
+ """Called when the agent is being handed off to. The `source` is the agent that is handing
85
+ off to this agent."""
86
+ pass
87
+
88
+ async def on_tool_start(
89
+ self,
90
+ context: RunContextWrapper[TContext],
91
+ agent: Agent[TContext],
92
+ tool: Tool,
93
+ ) -> None:
94
+ """Called before a tool is invoked."""
95
+ pass
96
+
97
+ async def on_tool_end(
98
+ self,
99
+ context: RunContextWrapper[TContext],
100
+ agent: Agent[TContext],
101
+ tool: Tool,
102
+ result: str,
103
+ ) -> None:
104
+ """Called after a tool is invoked."""
105
+ pass
agents/logger.py ADDED
@@ -0,0 +1,3 @@
1
+ import logging
2
+
3
+ logger = logging.getLogger("openai.agents")
@@ -0,0 +1,36 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Literal
5
+
6
+
7
+ @dataclass
8
+ class ModelSettings:
9
+ """Settings to use when calling an LLM.
10
+
11
+ This class holds optional model configuration parameters (e.g. temperature,
12
+ top_p, penalties, truncation, etc.).
13
+ """
14
+
15
+ temperature: float | None = None
16
+ top_p: float | None = None
17
+ frequency_penalty: float | None = None
18
+ presence_penalty: float | None = None
19
+ tool_choice: Literal["auto", "required", "none"] | str | None = None
20
+ parallel_tool_calls: bool | None = False
21
+ truncation: Literal["auto", "disabled"] | None = None
22
+
23
+ def resolve(self, override: ModelSettings | None) -> ModelSettings:
24
+ """Produce a new ModelSettings by overlaying any non-None values from the
25
+ override on top of this instance."""
26
+ if override is None:
27
+ return self
28
+ return ModelSettings(
29
+ temperature=override.temperature or self.temperature,
30
+ top_p=override.top_p or self.top_p,
31
+ frequency_penalty=override.frequency_penalty or self.frequency_penalty,
32
+ presence_penalty=override.presence_penalty or self.presence_penalty,
33
+ tool_choice=override.tool_choice or self.tool_choice,
34
+ parallel_tool_calls=override.parallel_tool_calls or self.parallel_tool_calls,
35
+ truncation=override.truncation or self.truncation,
36
+ )
File without changes
@@ -0,0 +1,34 @@
1
+ from __future__ import annotations
2
+
3
+ from openai import AsyncOpenAI
4
+
5
+ _default_openai_key: str | None = None
6
+ _default_openai_client: AsyncOpenAI | None = None
7
+ _use_responses_by_default: bool = True
8
+
9
+
10
+ def set_default_openai_key(key: str) -> None:
11
+ global _default_openai_key
12
+ _default_openai_key = key
13
+
14
+
15
+ def get_default_openai_key() -> str | None:
16
+ return _default_openai_key
17
+
18
+
19
+ def set_default_openai_client(client: AsyncOpenAI) -> None:
20
+ global _default_openai_client
21
+ _default_openai_client = client
22
+
23
+
24
+ def get_default_openai_client() -> AsyncOpenAI | None:
25
+ return _default_openai_client
26
+
27
+
28
+ def set_use_responses_by_default(use_responses: bool) -> None:
29
+ global _use_responses_by_default
30
+ _use_responses_by_default = use_responses
31
+
32
+
33
+ def get_use_responses_by_default() -> bool:
34
+ return _use_responses_by_default
@@ -0,0 +1,5 @@
1
+ FAKE_RESPONSES_ID = "__fake_id__"
2
+ """This is a placeholder ID used to fill in the `id` field in Responses API related objects. It's
3
+ useful when you're creating Responses objects from non-Responses APIs, e.g. the OpenAI Chat
4
+ Completions API or other LLM providers.
5
+ """