openai-agents 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +223 -0
- agents/_config.py +23 -0
- agents/_debug.py +17 -0
- agents/_run_impl.py +792 -0
- agents/_utils.py +61 -0
- agents/agent.py +159 -0
- agents/agent_output.py +144 -0
- agents/computer.py +107 -0
- agents/exceptions.py +63 -0
- agents/extensions/handoff_filters.py +67 -0
- agents/extensions/handoff_prompt.py +19 -0
- agents/function_schema.py +340 -0
- agents/guardrail.py +320 -0
- agents/handoffs.py +236 -0
- agents/items.py +246 -0
- agents/lifecycle.py +105 -0
- agents/logger.py +3 -0
- agents/model_settings.py +35 -0
- agents/models/__init__.py +0 -0
- agents/models/_openai_shared.py +34 -0
- agents/models/fake_id.py +5 -0
- agents/models/interface.py +107 -0
- agents/models/openai_chatcompletions.py +952 -0
- agents/models/openai_provider.py +65 -0
- agents/models/openai_responses.py +384 -0
- agents/result.py +220 -0
- agents/run.py +904 -0
- agents/run_context.py +26 -0
- agents/stream_events.py +58 -0
- agents/strict_schema.py +167 -0
- agents/tool.py +286 -0
- agents/tracing/__init__.py +97 -0
- agents/tracing/create.py +306 -0
- agents/tracing/logger.py +3 -0
- agents/tracing/processor_interface.py +69 -0
- agents/tracing/processors.py +261 -0
- agents/tracing/scope.py +45 -0
- agents/tracing/setup.py +211 -0
- agents/tracing/span_data.py +188 -0
- agents/tracing/spans.py +264 -0
- agents/tracing/traces.py +195 -0
- agents/tracing/util.py +17 -0
- agents/usage.py +22 -0
- agents/version.py +7 -0
- openai_agents-0.0.2.dist-info/METADATA +202 -0
- openai_agents-0.0.2.dist-info/RECORD +49 -0
- openai_agents-0.0.2.dist-info/licenses/LICENSE +21 -0
- openai-agents/example.py +0 -2
- openai_agents-0.0.1.dist-info/METADATA +0 -17
- openai_agents-0.0.1.dist-info/RECORD +0 -6
- openai_agents-0.0.1.dist-info/licenses/LICENSE +0 -20
- {openai-agents → agents/extensions}/__init__.py +0 -0
- {openai_agents-0.0.1.dist-info → openai_agents-0.0.2.dist-info}/WHEEL +0 -0
agents/__init__.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import sys
|
|
3
|
+
from typing import Literal
|
|
4
|
+
|
|
5
|
+
from openai import AsyncOpenAI
|
|
6
|
+
|
|
7
|
+
from . import _config
|
|
8
|
+
from .agent import Agent
|
|
9
|
+
from .agent_output import AgentOutputSchema
|
|
10
|
+
from .computer import AsyncComputer, Button, Computer, Environment
|
|
11
|
+
from .exceptions import (
|
|
12
|
+
AgentsException,
|
|
13
|
+
InputGuardrailTripwireTriggered,
|
|
14
|
+
MaxTurnsExceeded,
|
|
15
|
+
ModelBehaviorError,
|
|
16
|
+
OutputGuardrailTripwireTriggered,
|
|
17
|
+
UserError,
|
|
18
|
+
)
|
|
19
|
+
from .guardrail import (
|
|
20
|
+
GuardrailFunctionOutput,
|
|
21
|
+
InputGuardrail,
|
|
22
|
+
InputGuardrailResult,
|
|
23
|
+
OutputGuardrail,
|
|
24
|
+
OutputGuardrailResult,
|
|
25
|
+
input_guardrail,
|
|
26
|
+
output_guardrail,
|
|
27
|
+
)
|
|
28
|
+
from .handoffs import Handoff, HandoffInputData, HandoffInputFilter, handoff
|
|
29
|
+
from .items import (
|
|
30
|
+
HandoffCallItem,
|
|
31
|
+
HandoffOutputItem,
|
|
32
|
+
ItemHelpers,
|
|
33
|
+
MessageOutputItem,
|
|
34
|
+
ModelResponse,
|
|
35
|
+
ReasoningItem,
|
|
36
|
+
RunItem,
|
|
37
|
+
ToolCallItem,
|
|
38
|
+
ToolCallOutputItem,
|
|
39
|
+
TResponseInputItem,
|
|
40
|
+
)
|
|
41
|
+
from .lifecycle import AgentHooks, RunHooks
|
|
42
|
+
from .model_settings import ModelSettings
|
|
43
|
+
from .models.interface import Model, ModelProvider, ModelTracing
|
|
44
|
+
from .models.openai_chatcompletions import OpenAIChatCompletionsModel
|
|
45
|
+
from .models.openai_provider import OpenAIProvider
|
|
46
|
+
from .models.openai_responses import OpenAIResponsesModel
|
|
47
|
+
from .result import RunResult, RunResultStreaming
|
|
48
|
+
from .run import RunConfig, Runner
|
|
49
|
+
from .run_context import RunContextWrapper, TContext
|
|
50
|
+
from .stream_events import (
|
|
51
|
+
AgentUpdatedStreamEvent,
|
|
52
|
+
RawResponsesStreamEvent,
|
|
53
|
+
RunItemStreamEvent,
|
|
54
|
+
StreamEvent,
|
|
55
|
+
)
|
|
56
|
+
from .tool import (
|
|
57
|
+
ComputerTool,
|
|
58
|
+
FileSearchTool,
|
|
59
|
+
FunctionTool,
|
|
60
|
+
Tool,
|
|
61
|
+
WebSearchTool,
|
|
62
|
+
default_tool_error_function,
|
|
63
|
+
function_tool,
|
|
64
|
+
)
|
|
65
|
+
from .tracing import (
|
|
66
|
+
AgentSpanData,
|
|
67
|
+
CustomSpanData,
|
|
68
|
+
FunctionSpanData,
|
|
69
|
+
GenerationSpanData,
|
|
70
|
+
GuardrailSpanData,
|
|
71
|
+
HandoffSpanData,
|
|
72
|
+
Span,
|
|
73
|
+
SpanData,
|
|
74
|
+
SpanError,
|
|
75
|
+
Trace,
|
|
76
|
+
add_trace_processor,
|
|
77
|
+
agent_span,
|
|
78
|
+
custom_span,
|
|
79
|
+
function_span,
|
|
80
|
+
gen_span_id,
|
|
81
|
+
gen_trace_id,
|
|
82
|
+
generation_span,
|
|
83
|
+
get_current_span,
|
|
84
|
+
get_current_trace,
|
|
85
|
+
guardrail_span,
|
|
86
|
+
handoff_span,
|
|
87
|
+
set_trace_processors,
|
|
88
|
+
set_tracing_disabled,
|
|
89
|
+
set_tracing_export_api_key,
|
|
90
|
+
trace,
|
|
91
|
+
)
|
|
92
|
+
from .usage import Usage
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def set_default_openai_key(key: str) -> None:
|
|
96
|
+
"""Set the default OpenAI API key to use for LLM requests and tracing. This is only necessary if
|
|
97
|
+
the OPENAI_API_KEY environment variable is not already set.
|
|
98
|
+
|
|
99
|
+
If provided, this key will be used instead of the OPENAI_API_KEY environment variable.
|
|
100
|
+
"""
|
|
101
|
+
_config.set_default_openai_key(key)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool = True) -> None:
|
|
105
|
+
"""Set the default OpenAI client to use for LLM requests and/or tracing. If provided, this
|
|
106
|
+
client will be used instead of the default OpenAI client.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
client: The OpenAI client to use.
|
|
110
|
+
use_for_tracing: Whether to use the API key from this client for uploading traces. If False,
|
|
111
|
+
you'll either need to set the OPENAI_API_KEY environment variable or call
|
|
112
|
+
set_tracing_export_api_key() with the API key you want to use for tracing.
|
|
113
|
+
"""
|
|
114
|
+
_config.set_default_openai_client(client, use_for_tracing)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> None:
|
|
118
|
+
"""Set the default API to use for OpenAI LLM requests. By default, we will use the responses API
|
|
119
|
+
but you can set this to use the chat completions API instead.
|
|
120
|
+
"""
|
|
121
|
+
_config.set_default_openai_api(api)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def enable_verbose_stdout_logging():
|
|
125
|
+
"""Enables verbose logging to stdout. This is useful for debugging."""
|
|
126
|
+
for name in ["openai.agents", "openai.agents.tracing"]:
|
|
127
|
+
logger = logging.getLogger(name)
|
|
128
|
+
logger.setLevel(logging.DEBUG)
|
|
129
|
+
logger.addHandler(logging.StreamHandler(sys.stdout))
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
__all__ = [
|
|
133
|
+
"Agent",
|
|
134
|
+
"Runner",
|
|
135
|
+
"Model",
|
|
136
|
+
"ModelProvider",
|
|
137
|
+
"ModelTracing",
|
|
138
|
+
"ModelSettings",
|
|
139
|
+
"OpenAIChatCompletionsModel",
|
|
140
|
+
"OpenAIProvider",
|
|
141
|
+
"OpenAIResponsesModel",
|
|
142
|
+
"AgentOutputSchema",
|
|
143
|
+
"Computer",
|
|
144
|
+
"AsyncComputer",
|
|
145
|
+
"Environment",
|
|
146
|
+
"Button",
|
|
147
|
+
"AgentsException",
|
|
148
|
+
"InputGuardrailTripwireTriggered",
|
|
149
|
+
"OutputGuardrailTripwireTriggered",
|
|
150
|
+
"MaxTurnsExceeded",
|
|
151
|
+
"ModelBehaviorError",
|
|
152
|
+
"UserError",
|
|
153
|
+
"InputGuardrail",
|
|
154
|
+
"InputGuardrailResult",
|
|
155
|
+
"OutputGuardrail",
|
|
156
|
+
"OutputGuardrailResult",
|
|
157
|
+
"GuardrailFunctionOutput",
|
|
158
|
+
"input_guardrail",
|
|
159
|
+
"output_guardrail",
|
|
160
|
+
"handoff",
|
|
161
|
+
"Handoff",
|
|
162
|
+
"HandoffInputData",
|
|
163
|
+
"HandoffInputFilter",
|
|
164
|
+
"TResponseInputItem",
|
|
165
|
+
"MessageOutputItem",
|
|
166
|
+
"ModelResponse",
|
|
167
|
+
"RunItem",
|
|
168
|
+
"HandoffCallItem",
|
|
169
|
+
"HandoffOutputItem",
|
|
170
|
+
"ToolCallItem",
|
|
171
|
+
"ToolCallOutputItem",
|
|
172
|
+
"ReasoningItem",
|
|
173
|
+
"ModelResponse",
|
|
174
|
+
"ItemHelpers",
|
|
175
|
+
"RunHooks",
|
|
176
|
+
"AgentHooks",
|
|
177
|
+
"RunContextWrapper",
|
|
178
|
+
"TContext",
|
|
179
|
+
"RunResult",
|
|
180
|
+
"RunResultStreaming",
|
|
181
|
+
"RunConfig",
|
|
182
|
+
"RawResponsesStreamEvent",
|
|
183
|
+
"RunItemStreamEvent",
|
|
184
|
+
"AgentUpdatedStreamEvent",
|
|
185
|
+
"StreamEvent",
|
|
186
|
+
"FunctionTool",
|
|
187
|
+
"ComputerTool",
|
|
188
|
+
"FileSearchTool",
|
|
189
|
+
"Tool",
|
|
190
|
+
"WebSearchTool",
|
|
191
|
+
"function_tool",
|
|
192
|
+
"Usage",
|
|
193
|
+
"add_trace_processor",
|
|
194
|
+
"agent_span",
|
|
195
|
+
"custom_span",
|
|
196
|
+
"function_span",
|
|
197
|
+
"generation_span",
|
|
198
|
+
"get_current_span",
|
|
199
|
+
"get_current_trace",
|
|
200
|
+
"guardrail_span",
|
|
201
|
+
"handoff_span",
|
|
202
|
+
"set_trace_processors",
|
|
203
|
+
"set_tracing_disabled",
|
|
204
|
+
"trace",
|
|
205
|
+
"Trace",
|
|
206
|
+
"SpanError",
|
|
207
|
+
"Span",
|
|
208
|
+
"SpanData",
|
|
209
|
+
"AgentSpanData",
|
|
210
|
+
"CustomSpanData",
|
|
211
|
+
"FunctionSpanData",
|
|
212
|
+
"GenerationSpanData",
|
|
213
|
+
"GuardrailSpanData",
|
|
214
|
+
"HandoffSpanData",
|
|
215
|
+
"set_default_openai_key",
|
|
216
|
+
"set_default_openai_client",
|
|
217
|
+
"set_default_openai_api",
|
|
218
|
+
"set_tracing_export_api_key",
|
|
219
|
+
"enable_verbose_stdout_logging",
|
|
220
|
+
"gen_trace_id",
|
|
221
|
+
"gen_span_id",
|
|
222
|
+
"default_tool_error_function",
|
|
223
|
+
]
|
agents/_config.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from openai import AsyncOpenAI
|
|
2
|
+
from typing_extensions import Literal
|
|
3
|
+
|
|
4
|
+
from .models import _openai_shared
|
|
5
|
+
from .tracing import set_tracing_export_api_key
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def set_default_openai_key(key: str) -> None:
|
|
9
|
+
set_tracing_export_api_key(key)
|
|
10
|
+
_openai_shared.set_default_openai_key(key)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None:
|
|
14
|
+
if use_for_tracing:
|
|
15
|
+
set_tracing_export_api_key(client.api_key)
|
|
16
|
+
_openai_shared.set_default_openai_client(client)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> None:
|
|
20
|
+
if api == "chat_completions":
|
|
21
|
+
_openai_shared.set_use_responses_by_default(False)
|
|
22
|
+
else:
|
|
23
|
+
_openai_shared.set_use_responses_by_default(True)
|
agents/_debug.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def _debug_flag_enabled(flag: str) -> bool:
|
|
5
|
+
flag_value = os.getenv(flag)
|
|
6
|
+
return flag_value is not None and (flag_value == "1" or flag_value.lower() == "true")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
DONT_LOG_MODEL_DATA = _debug_flag_enabled("OPENAI_AGENTS_DONT_LOG_MODEL_DATA")
|
|
10
|
+
"""By default we don't log LLM inputs/outputs, to prevent exposing sensitive information. Set this
|
|
11
|
+
flag to enable logging them.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
DONT_LOG_TOOL_DATA = _debug_flag_enabled("OPENAI_AGENTS_DONT_LOG_TOOL_DATA")
|
|
15
|
+
"""By default we don't log tool call inputs/outputs, to prevent exposing sensitive information. Set
|
|
16
|
+
this flag to enable logging them.
|
|
17
|
+
"""
|