openai-agents 0.0.18__py3-none-any.whl → 0.0.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +2 -0
- agents/run.py +206 -95
- agents/tracing/__init__.py +11 -5
- agents/tracing/create.py +16 -16
- agents/tracing/provider.py +294 -0
- agents/tracing/setup.py +13 -206
- agents/tracing/util.py +9 -10
- {openai_agents-0.0.18.dist-info → openai_agents-0.0.19.dist-info}/METADATA +1 -1
- {openai_agents-0.0.18.dist-info → openai_agents-0.0.19.dist-info}/RECORD +11 -10
- {openai_agents-0.0.18.dist-info → openai_agents-0.0.19.dist-info}/WHEEL +0 -0
- {openai_agents-0.0.18.dist-info → openai_agents-0.0.19.dist-info}/licenses/LICENSE +0 -0
agents/__init__.py
CHANGED
|
@@ -104,6 +104,7 @@ from .tracing import (
|
|
|
104
104
|
handoff_span,
|
|
105
105
|
mcp_tools_span,
|
|
106
106
|
set_trace_processors,
|
|
107
|
+
set_trace_provider,
|
|
107
108
|
set_tracing_disabled,
|
|
108
109
|
set_tracing_export_api_key,
|
|
109
110
|
speech_group_span,
|
|
@@ -246,6 +247,7 @@ __all__ = [
|
|
|
246
247
|
"guardrail_span",
|
|
247
248
|
"handoff_span",
|
|
248
249
|
"set_trace_processors",
|
|
250
|
+
"set_trace_provider",
|
|
249
251
|
"set_tracing_disabled",
|
|
250
252
|
"speech_group_span",
|
|
251
253
|
"transcription_span",
|
agents/run.py
CHANGED
|
@@ -3,12 +3,13 @@ from __future__ import annotations
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import copy
|
|
5
5
|
from dataclasses import dataclass, field
|
|
6
|
-
from typing import Any, cast
|
|
6
|
+
from typing import Any, Generic, cast
|
|
7
7
|
|
|
8
8
|
from openai.types.responses import ResponseCompletedEvent
|
|
9
9
|
from openai.types.responses.response_prompt_param import (
|
|
10
10
|
ResponsePromptParam,
|
|
11
11
|
)
|
|
12
|
+
from typing_extensions import NotRequired, TypedDict, Unpack
|
|
12
13
|
|
|
13
14
|
from ._run_impl import (
|
|
14
15
|
AgentToolUseTracker,
|
|
@@ -31,7 +32,12 @@ from .exceptions import (
|
|
|
31
32
|
OutputGuardrailTripwireTriggered,
|
|
32
33
|
RunErrorDetails,
|
|
33
34
|
)
|
|
34
|
-
from .guardrail import
|
|
35
|
+
from .guardrail import (
|
|
36
|
+
InputGuardrail,
|
|
37
|
+
InputGuardrailResult,
|
|
38
|
+
OutputGuardrail,
|
|
39
|
+
OutputGuardrailResult,
|
|
40
|
+
)
|
|
35
41
|
from .handoffs import Handoff, HandoffInputFilter, handoff
|
|
36
42
|
from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
|
|
37
43
|
from .lifecycle import RunHooks
|
|
@@ -50,6 +56,27 @@ from .util import _coro, _error_tracing
|
|
|
50
56
|
|
|
51
57
|
DEFAULT_MAX_TURNS = 10
|
|
52
58
|
|
|
59
|
+
DEFAULT_AGENT_RUNNER: AgentRunner = None # type: ignore
|
|
60
|
+
# the value is set at the end of the module
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def set_default_agent_runner(runner: AgentRunner | None) -> None:
|
|
64
|
+
"""
|
|
65
|
+
WARNING: this class is experimental and not part of the public API
|
|
66
|
+
It should not be used directly.
|
|
67
|
+
"""
|
|
68
|
+
global DEFAULT_AGENT_RUNNER
|
|
69
|
+
DEFAULT_AGENT_RUNNER = runner or AgentRunner()
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def get_default_agent_runner() -> AgentRunner:
|
|
73
|
+
"""
|
|
74
|
+
WARNING: this class is experimental and not part of the public API
|
|
75
|
+
It should not be used directly.
|
|
76
|
+
"""
|
|
77
|
+
global DEFAULT_AGENT_RUNNER
|
|
78
|
+
return DEFAULT_AGENT_RUNNER
|
|
79
|
+
|
|
53
80
|
|
|
54
81
|
@dataclass
|
|
55
82
|
class RunConfig:
|
|
@@ -110,6 +137,25 @@ class RunConfig:
|
|
|
110
137
|
"""
|
|
111
138
|
|
|
112
139
|
|
|
140
|
+
class RunOptions(TypedDict, Generic[TContext]):
|
|
141
|
+
"""Arguments for ``AgentRunner`` methods."""
|
|
142
|
+
|
|
143
|
+
context: NotRequired[TContext | None]
|
|
144
|
+
"""The context for the run."""
|
|
145
|
+
|
|
146
|
+
max_turns: NotRequired[int]
|
|
147
|
+
"""The maximum number of turns to run for."""
|
|
148
|
+
|
|
149
|
+
hooks: NotRequired[RunHooks[TContext] | None]
|
|
150
|
+
"""Lifecycle hooks for the run."""
|
|
151
|
+
|
|
152
|
+
run_config: NotRequired[RunConfig | None]
|
|
153
|
+
"""Run configuration."""
|
|
154
|
+
|
|
155
|
+
previous_response_id: NotRequired[str | None]
|
|
156
|
+
"""The ID of the previous response, if any."""
|
|
157
|
+
|
|
158
|
+
|
|
113
159
|
class Runner:
|
|
114
160
|
@classmethod
|
|
115
161
|
async def run(
|
|
@@ -130,13 +176,10 @@ class Runner:
|
|
|
130
176
|
`agent.output_type`, the loop terminates.
|
|
131
177
|
3. If there's a handoff, we run the loop again, with the new agent.
|
|
132
178
|
4. Else, we run tool calls (if any), and re-run the loop.
|
|
133
|
-
|
|
134
179
|
In two cases, the agent may raise an exception:
|
|
135
180
|
1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised.
|
|
136
181
|
2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised.
|
|
137
|
-
|
|
138
182
|
Note that only the first agent's input guardrails are run.
|
|
139
|
-
|
|
140
183
|
Args:
|
|
141
184
|
starting_agent: The starting agent to run.
|
|
142
185
|
input: The initial input to the agent. You can pass a single string for a user message,
|
|
@@ -148,11 +191,139 @@ class Runner:
|
|
|
148
191
|
run_config: Global settings for the entire agent run.
|
|
149
192
|
previous_response_id: The ID of the previous response, if using OpenAI models via the
|
|
150
193
|
Responses API, this allows you to skip passing in input from the previous turn.
|
|
194
|
+
Returns:
|
|
195
|
+
A run result containing all the inputs, guardrail results and the output of the last
|
|
196
|
+
agent. Agents may perform handoffs, so we don't know the specific type of the output.
|
|
197
|
+
"""
|
|
198
|
+
runner = DEFAULT_AGENT_RUNNER
|
|
199
|
+
return await runner.run(
|
|
200
|
+
starting_agent,
|
|
201
|
+
input,
|
|
202
|
+
context=context,
|
|
203
|
+
max_turns=max_turns,
|
|
204
|
+
hooks=hooks,
|
|
205
|
+
run_config=run_config,
|
|
206
|
+
previous_response_id=previous_response_id,
|
|
207
|
+
)
|
|
151
208
|
|
|
209
|
+
@classmethod
|
|
210
|
+
def run_sync(
|
|
211
|
+
cls,
|
|
212
|
+
starting_agent: Agent[TContext],
|
|
213
|
+
input: str | list[TResponseInputItem],
|
|
214
|
+
*,
|
|
215
|
+
context: TContext | None = None,
|
|
216
|
+
max_turns: int = DEFAULT_MAX_TURNS,
|
|
217
|
+
hooks: RunHooks[TContext] | None = None,
|
|
218
|
+
run_config: RunConfig | None = None,
|
|
219
|
+
previous_response_id: str | None = None,
|
|
220
|
+
) -> RunResult:
|
|
221
|
+
"""Run a workflow synchronously, starting at the given agent. Note that this just wraps the
|
|
222
|
+
`run` method, so it will not work if there's already an event loop (e.g. inside an async
|
|
223
|
+
function, or in a Jupyter notebook or async context like FastAPI). For those cases, use
|
|
224
|
+
the `run` method instead.
|
|
225
|
+
The agent will run in a loop until a final output is generated. The loop runs like so:
|
|
226
|
+
1. The agent is invoked with the given input.
|
|
227
|
+
2. If there is a final output (i.e. the agent produces something of type
|
|
228
|
+
`agent.output_type`, the loop terminates.
|
|
229
|
+
3. If there's a handoff, we run the loop again, with the new agent.
|
|
230
|
+
4. Else, we run tool calls (if any), and re-run the loop.
|
|
231
|
+
In two cases, the agent may raise an exception:
|
|
232
|
+
1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised.
|
|
233
|
+
2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised.
|
|
234
|
+
Note that only the first agent's input guardrails are run.
|
|
235
|
+
Args:
|
|
236
|
+
starting_agent: The starting agent to run.
|
|
237
|
+
input: The initial input to the agent. You can pass a single string for a user message,
|
|
238
|
+
or a list of input items.
|
|
239
|
+
context: The context to run the agent with.
|
|
240
|
+
max_turns: The maximum number of turns to run the agent for. A turn is defined as one
|
|
241
|
+
AI invocation (including any tool calls that might occur).
|
|
242
|
+
hooks: An object that receives callbacks on various lifecycle events.
|
|
243
|
+
run_config: Global settings for the entire agent run.
|
|
244
|
+
previous_response_id: The ID of the previous response, if using OpenAI models via the
|
|
245
|
+
Responses API, this allows you to skip passing in input from the previous turn.
|
|
152
246
|
Returns:
|
|
153
247
|
A run result containing all the inputs, guardrail results and the output of the last
|
|
154
248
|
agent. Agents may perform handoffs, so we don't know the specific type of the output.
|
|
155
249
|
"""
|
|
250
|
+
runner = DEFAULT_AGENT_RUNNER
|
|
251
|
+
return runner.run_sync(
|
|
252
|
+
starting_agent,
|
|
253
|
+
input,
|
|
254
|
+
context=context,
|
|
255
|
+
max_turns=max_turns,
|
|
256
|
+
hooks=hooks,
|
|
257
|
+
run_config=run_config,
|
|
258
|
+
previous_response_id=previous_response_id,
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
@classmethod
|
|
262
|
+
def run_streamed(
|
|
263
|
+
cls,
|
|
264
|
+
starting_agent: Agent[TContext],
|
|
265
|
+
input: str | list[TResponseInputItem],
|
|
266
|
+
context: TContext | None = None,
|
|
267
|
+
max_turns: int = DEFAULT_MAX_TURNS,
|
|
268
|
+
hooks: RunHooks[TContext] | None = None,
|
|
269
|
+
run_config: RunConfig | None = None,
|
|
270
|
+
previous_response_id: str | None = None,
|
|
271
|
+
) -> RunResultStreaming:
|
|
272
|
+
"""Run a workflow starting at the given agent in streaming mode. The returned result object
|
|
273
|
+
contains a method you can use to stream semantic events as they are generated.
|
|
274
|
+
The agent will run in a loop until a final output is generated. The loop runs like so:
|
|
275
|
+
1. The agent is invoked with the given input.
|
|
276
|
+
2. If there is a final output (i.e. the agent produces something of type
|
|
277
|
+
`agent.output_type`, the loop terminates.
|
|
278
|
+
3. If there's a handoff, we run the loop again, with the new agent.
|
|
279
|
+
4. Else, we run tool calls (if any), and re-run the loop.
|
|
280
|
+
In two cases, the agent may raise an exception:
|
|
281
|
+
1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised.
|
|
282
|
+
2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised.
|
|
283
|
+
Note that only the first agent's input guardrails are run.
|
|
284
|
+
Args:
|
|
285
|
+
starting_agent: The starting agent to run.
|
|
286
|
+
input: The initial input to the agent. You can pass a single string for a user message,
|
|
287
|
+
or a list of input items.
|
|
288
|
+
context: The context to run the agent with.
|
|
289
|
+
max_turns: The maximum number of turns to run the agent for. A turn is defined as one
|
|
290
|
+
AI invocation (including any tool calls that might occur).
|
|
291
|
+
hooks: An object that receives callbacks on various lifecycle events.
|
|
292
|
+
run_config: Global settings for the entire agent run.
|
|
293
|
+
previous_response_id: The ID of the previous response, if using OpenAI models via the
|
|
294
|
+
Responses API, this allows you to skip passing in input from the previous turn.
|
|
295
|
+
Returns:
|
|
296
|
+
A result object that contains data about the run, as well as a method to stream events.
|
|
297
|
+
"""
|
|
298
|
+
runner = DEFAULT_AGENT_RUNNER
|
|
299
|
+
return runner.run_streamed(
|
|
300
|
+
starting_agent,
|
|
301
|
+
input,
|
|
302
|
+
context=context,
|
|
303
|
+
max_turns=max_turns,
|
|
304
|
+
hooks=hooks,
|
|
305
|
+
run_config=run_config,
|
|
306
|
+
previous_response_id=previous_response_id,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
class AgentRunner:
|
|
311
|
+
"""
|
|
312
|
+
WARNING: this class is experimental and not part of the public API
|
|
313
|
+
It should not be used directly or subclassed.
|
|
314
|
+
"""
|
|
315
|
+
|
|
316
|
+
async def run(
|
|
317
|
+
self,
|
|
318
|
+
starting_agent: Agent[TContext],
|
|
319
|
+
input: str | list[TResponseInputItem],
|
|
320
|
+
**kwargs: Unpack[RunOptions[TContext]],
|
|
321
|
+
) -> RunResult:
|
|
322
|
+
context = kwargs.get("context")
|
|
323
|
+
max_turns = kwargs.get("max_turns", DEFAULT_MAX_TURNS)
|
|
324
|
+
hooks = kwargs.get("hooks")
|
|
325
|
+
run_config = kwargs.get("run_config")
|
|
326
|
+
previous_response_id = kwargs.get("previous_response_id")
|
|
156
327
|
if hooks is None:
|
|
157
328
|
hooks = RunHooks[Any]()
|
|
158
329
|
if run_config is None:
|
|
@@ -184,13 +355,15 @@ class Runner:
|
|
|
184
355
|
|
|
185
356
|
try:
|
|
186
357
|
while True:
|
|
187
|
-
all_tools = await
|
|
358
|
+
all_tools = await AgentRunner._get_all_tools(current_agent, context_wrapper)
|
|
188
359
|
|
|
189
360
|
# Start an agent span if we don't have one. This span is ended if the current
|
|
190
361
|
# agent changes, or if the agent loop ends.
|
|
191
362
|
if current_span is None:
|
|
192
|
-
handoff_names = [
|
|
193
|
-
|
|
363
|
+
handoff_names = [
|
|
364
|
+
h.agent_name for h in AgentRunner._get_handoffs(current_agent)
|
|
365
|
+
]
|
|
366
|
+
if output_schema := AgentRunner._get_output_schema(current_agent):
|
|
194
367
|
output_type_name = output_schema.name()
|
|
195
368
|
else:
|
|
196
369
|
output_type_name = "str"
|
|
@@ -220,14 +393,14 @@ class Runner:
|
|
|
220
393
|
|
|
221
394
|
if current_turn == 1:
|
|
222
395
|
input_guardrail_results, turn_result = await asyncio.gather(
|
|
223
|
-
|
|
396
|
+
self._run_input_guardrails(
|
|
224
397
|
starting_agent,
|
|
225
398
|
starting_agent.input_guardrails
|
|
226
399
|
+ (run_config.input_guardrails or []),
|
|
227
400
|
copy.deepcopy(input),
|
|
228
401
|
context_wrapper,
|
|
229
402
|
),
|
|
230
|
-
|
|
403
|
+
self._run_single_turn(
|
|
231
404
|
agent=current_agent,
|
|
232
405
|
all_tools=all_tools,
|
|
233
406
|
original_input=original_input,
|
|
@@ -241,7 +414,7 @@ class Runner:
|
|
|
241
414
|
),
|
|
242
415
|
)
|
|
243
416
|
else:
|
|
244
|
-
turn_result = await
|
|
417
|
+
turn_result = await self._run_single_turn(
|
|
245
418
|
agent=current_agent,
|
|
246
419
|
all_tools=all_tools,
|
|
247
420
|
original_input=original_input,
|
|
@@ -260,7 +433,7 @@ class Runner:
|
|
|
260
433
|
generated_items = turn_result.generated_items
|
|
261
434
|
|
|
262
435
|
if isinstance(turn_result.next_step, NextStepFinalOutput):
|
|
263
|
-
output_guardrail_results = await
|
|
436
|
+
output_guardrail_results = await self._run_output_guardrails(
|
|
264
437
|
current_agent.output_guardrails + (run_config.output_guardrails or []),
|
|
265
438
|
current_agent,
|
|
266
439
|
turn_result.next_step.output,
|
|
@@ -302,54 +475,19 @@ class Runner:
|
|
|
302
475
|
if current_span:
|
|
303
476
|
current_span.finish(reset_current=True)
|
|
304
477
|
|
|
305
|
-
@classmethod
|
|
306
478
|
def run_sync(
|
|
307
|
-
|
|
479
|
+
self,
|
|
308
480
|
starting_agent: Agent[TContext],
|
|
309
481
|
input: str | list[TResponseInputItem],
|
|
310
|
-
|
|
311
|
-
context: TContext | None = None,
|
|
312
|
-
max_turns: int = DEFAULT_MAX_TURNS,
|
|
313
|
-
hooks: RunHooks[TContext] | None = None,
|
|
314
|
-
run_config: RunConfig | None = None,
|
|
315
|
-
previous_response_id: str | None = None,
|
|
482
|
+
**kwargs: Unpack[RunOptions[TContext]],
|
|
316
483
|
) -> RunResult:
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
The agent will run in a loop until a final output is generated. The loop runs like so:
|
|
323
|
-
1. The agent is invoked with the given input.
|
|
324
|
-
2. If there is a final output (i.e. the agent produces something of type
|
|
325
|
-
`agent.output_type`, the loop terminates.
|
|
326
|
-
3. If there's a handoff, we run the loop again, with the new agent.
|
|
327
|
-
4. Else, we run tool calls (if any), and re-run the loop.
|
|
328
|
-
|
|
329
|
-
In two cases, the agent may raise an exception:
|
|
330
|
-
1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised.
|
|
331
|
-
2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised.
|
|
332
|
-
|
|
333
|
-
Note that only the first agent's input guardrails are run.
|
|
334
|
-
|
|
335
|
-
Args:
|
|
336
|
-
starting_agent: The starting agent to run.
|
|
337
|
-
input: The initial input to the agent. You can pass a single string for a user message,
|
|
338
|
-
or a list of input items.
|
|
339
|
-
context: The context to run the agent with.
|
|
340
|
-
max_turns: The maximum number of turns to run the agent for. A turn is defined as one
|
|
341
|
-
AI invocation (including any tool calls that might occur).
|
|
342
|
-
hooks: An object that receives callbacks on various lifecycle events.
|
|
343
|
-
run_config: Global settings for the entire agent run.
|
|
344
|
-
previous_response_id: The ID of the previous response, if using OpenAI models via the
|
|
345
|
-
Responses API, this allows you to skip passing in input from the previous turn.
|
|
346
|
-
|
|
347
|
-
Returns:
|
|
348
|
-
A run result containing all the inputs, guardrail results and the output of the last
|
|
349
|
-
agent. Agents may perform handoffs, so we don't know the specific type of the output.
|
|
350
|
-
"""
|
|
484
|
+
context = kwargs.get("context")
|
|
485
|
+
max_turns = kwargs.get("max_turns", DEFAULT_MAX_TURNS)
|
|
486
|
+
hooks = kwargs.get("hooks")
|
|
487
|
+
run_config = kwargs.get("run_config")
|
|
488
|
+
previous_response_id = kwargs.get("previous_response_id")
|
|
351
489
|
return asyncio.get_event_loop().run_until_complete(
|
|
352
|
-
|
|
490
|
+
self.run(
|
|
353
491
|
starting_agent,
|
|
354
492
|
input,
|
|
355
493
|
context=context,
|
|
@@ -360,47 +498,17 @@ class Runner:
|
|
|
360
498
|
)
|
|
361
499
|
)
|
|
362
500
|
|
|
363
|
-
@classmethod
|
|
364
501
|
def run_streamed(
|
|
365
|
-
|
|
502
|
+
self,
|
|
366
503
|
starting_agent: Agent[TContext],
|
|
367
504
|
input: str | list[TResponseInputItem],
|
|
368
|
-
|
|
369
|
-
max_turns: int = DEFAULT_MAX_TURNS,
|
|
370
|
-
hooks: RunHooks[TContext] | None = None,
|
|
371
|
-
run_config: RunConfig | None = None,
|
|
372
|
-
previous_response_id: str | None = None,
|
|
505
|
+
**kwargs: Unpack[RunOptions[TContext]],
|
|
373
506
|
) -> RunResultStreaming:
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
2. If there is a final output (i.e. the agent produces something of type
|
|
380
|
-
`agent.output_type`, the loop terminates.
|
|
381
|
-
3. If there's a handoff, we run the loop again, with the new agent.
|
|
382
|
-
4. Else, we run tool calls (if any), and re-run the loop.
|
|
383
|
-
|
|
384
|
-
In two cases, the agent may raise an exception:
|
|
385
|
-
1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised.
|
|
386
|
-
2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised.
|
|
387
|
-
|
|
388
|
-
Note that only the first agent's input guardrails are run.
|
|
389
|
-
|
|
390
|
-
Args:
|
|
391
|
-
starting_agent: The starting agent to run.
|
|
392
|
-
input: The initial input to the agent. You can pass a single string for a user message,
|
|
393
|
-
or a list of input items.
|
|
394
|
-
context: The context to run the agent with.
|
|
395
|
-
max_turns: The maximum number of turns to run the agent for. A turn is defined as one
|
|
396
|
-
AI invocation (including any tool calls that might occur).
|
|
397
|
-
hooks: An object that receives callbacks on various lifecycle events.
|
|
398
|
-
run_config: Global settings for the entire agent run.
|
|
399
|
-
previous_response_id: The ID of the previous response, if using OpenAI models via the
|
|
400
|
-
Responses API, this allows you to skip passing in input from the previous turn.
|
|
401
|
-
Returns:
|
|
402
|
-
A result object that contains data about the run, as well as a method to stream events.
|
|
403
|
-
"""
|
|
507
|
+
context = kwargs.get("context")
|
|
508
|
+
max_turns = kwargs.get("max_turns", DEFAULT_MAX_TURNS)
|
|
509
|
+
hooks = kwargs.get("hooks")
|
|
510
|
+
run_config = kwargs.get("run_config")
|
|
511
|
+
previous_response_id = kwargs.get("previous_response_id")
|
|
404
512
|
if hooks is None:
|
|
405
513
|
hooks = RunHooks[Any]()
|
|
406
514
|
if run_config is None:
|
|
@@ -421,7 +529,7 @@ class Runner:
|
|
|
421
529
|
)
|
|
422
530
|
)
|
|
423
531
|
|
|
424
|
-
output_schema =
|
|
532
|
+
output_schema = AgentRunner._get_output_schema(starting_agent)
|
|
425
533
|
context_wrapper: RunContextWrapper[TContext] = RunContextWrapper(
|
|
426
534
|
context=context # type: ignore
|
|
427
535
|
)
|
|
@@ -444,7 +552,7 @@ class Runner:
|
|
|
444
552
|
|
|
445
553
|
# Kick off the actual agent loop in the background and return the streamed result object.
|
|
446
554
|
streamed_result._run_impl_task = asyncio.create_task(
|
|
447
|
-
|
|
555
|
+
self._start_streaming(
|
|
448
556
|
starting_input=input,
|
|
449
557
|
streamed_result=streamed_result,
|
|
450
558
|
starting_agent=starting_agent,
|
|
@@ -501,7 +609,7 @@ class Runner:
|
|
|
501
609
|
streamed_result.input_guardrail_results = guardrail_results
|
|
502
610
|
|
|
503
611
|
@classmethod
|
|
504
|
-
async def
|
|
612
|
+
async def _start_streaming(
|
|
505
613
|
cls,
|
|
506
614
|
starting_input: str | list[TResponseInputItem],
|
|
507
615
|
streamed_result: RunResultStreaming,
|
|
@@ -1008,3 +1116,6 @@ class Runner:
|
|
|
1008
1116
|
return agent.model
|
|
1009
1117
|
|
|
1010
1118
|
return run_config.model_provider.get_model(agent.model)
|
|
1119
|
+
|
|
1120
|
+
|
|
1121
|
+
DEFAULT_AGENT_RUNNER = AgentRunner()
|
agents/tracing/__init__.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import atexit
|
|
2
2
|
|
|
3
|
+
from agents.tracing.provider import DefaultTraceProvider, TraceProvider
|
|
4
|
+
|
|
3
5
|
from .create import (
|
|
4
6
|
agent_span,
|
|
5
7
|
custom_span,
|
|
@@ -18,7 +20,7 @@ from .create import (
|
|
|
18
20
|
)
|
|
19
21
|
from .processor_interface import TracingProcessor
|
|
20
22
|
from .processors import default_exporter, default_processor
|
|
21
|
-
from .setup import
|
|
23
|
+
from .setup import get_trace_provider, set_trace_provider
|
|
22
24
|
from .span_data import (
|
|
23
25
|
AgentSpanData,
|
|
24
26
|
CustomSpanData,
|
|
@@ -45,10 +47,12 @@ __all__ = [
|
|
|
45
47
|
"generation_span",
|
|
46
48
|
"get_current_span",
|
|
47
49
|
"get_current_trace",
|
|
50
|
+
"get_trace_provider",
|
|
48
51
|
"guardrail_span",
|
|
49
52
|
"handoff_span",
|
|
50
53
|
"response_span",
|
|
51
54
|
"set_trace_processors",
|
|
55
|
+
"set_trace_provider",
|
|
52
56
|
"set_tracing_disabled",
|
|
53
57
|
"trace",
|
|
54
58
|
"Trace",
|
|
@@ -67,6 +71,7 @@ __all__ = [
|
|
|
67
71
|
"SpeechSpanData",
|
|
68
72
|
"TranscriptionSpanData",
|
|
69
73
|
"TracingProcessor",
|
|
74
|
+
"TraceProvider",
|
|
70
75
|
"gen_trace_id",
|
|
71
76
|
"gen_span_id",
|
|
72
77
|
"speech_group_span",
|
|
@@ -80,21 +85,21 @@ def add_trace_processor(span_processor: TracingProcessor) -> None:
|
|
|
80
85
|
"""
|
|
81
86
|
Adds a new trace processor. This processor will receive all traces/spans.
|
|
82
87
|
"""
|
|
83
|
-
|
|
88
|
+
get_trace_provider().register_processor(span_processor)
|
|
84
89
|
|
|
85
90
|
|
|
86
91
|
def set_trace_processors(processors: list[TracingProcessor]) -> None:
|
|
87
92
|
"""
|
|
88
93
|
Set the list of trace processors. This will replace the current list of processors.
|
|
89
94
|
"""
|
|
90
|
-
|
|
95
|
+
get_trace_provider().set_processors(processors)
|
|
91
96
|
|
|
92
97
|
|
|
93
98
|
def set_tracing_disabled(disabled: bool) -> None:
|
|
94
99
|
"""
|
|
95
100
|
Set whether tracing is globally disabled.
|
|
96
101
|
"""
|
|
97
|
-
|
|
102
|
+
get_trace_provider().set_disabled(disabled)
|
|
98
103
|
|
|
99
104
|
|
|
100
105
|
def set_tracing_export_api_key(api_key: str) -> None:
|
|
@@ -104,10 +109,11 @@ def set_tracing_export_api_key(api_key: str) -> None:
|
|
|
104
109
|
default_exporter().set_api_key(api_key)
|
|
105
110
|
|
|
106
111
|
|
|
112
|
+
set_trace_provider(DefaultTraceProvider())
|
|
107
113
|
# Add the default processor, which exports traces and spans to the backend in batches. You can
|
|
108
114
|
# change the default behavior by either:
|
|
109
115
|
# 1. calling add_trace_processor(), which adds additional processors, or
|
|
110
116
|
# 2. calling set_trace_processors(), which replaces the default processor.
|
|
111
117
|
add_trace_processor(default_processor())
|
|
112
118
|
|
|
113
|
-
atexit.register(
|
|
119
|
+
atexit.register(get_trace_provider().shutdown)
|
agents/tracing/create.py
CHANGED
|
@@ -4,7 +4,7 @@ from collections.abc import Mapping, Sequence
|
|
|
4
4
|
from typing import TYPE_CHECKING, Any
|
|
5
5
|
|
|
6
6
|
from ..logger import logger
|
|
7
|
-
from .setup import
|
|
7
|
+
from .setup import get_trace_provider
|
|
8
8
|
from .span_data import (
|
|
9
9
|
AgentSpanData,
|
|
10
10
|
CustomSpanData,
|
|
@@ -56,13 +56,13 @@ def trace(
|
|
|
56
56
|
Returns:
|
|
57
57
|
The newly created trace object.
|
|
58
58
|
"""
|
|
59
|
-
current_trace =
|
|
59
|
+
current_trace = get_trace_provider().get_current_trace()
|
|
60
60
|
if current_trace:
|
|
61
61
|
logger.warning(
|
|
62
62
|
"Trace already exists. Creating a new trace, but this is probably a mistake."
|
|
63
63
|
)
|
|
64
64
|
|
|
65
|
-
return
|
|
65
|
+
return get_trace_provider().create_trace(
|
|
66
66
|
name=workflow_name,
|
|
67
67
|
trace_id=trace_id,
|
|
68
68
|
group_id=group_id,
|
|
@@ -73,12 +73,12 @@ def trace(
|
|
|
73
73
|
|
|
74
74
|
def get_current_trace() -> Trace | None:
|
|
75
75
|
"""Returns the currently active trace, if present."""
|
|
76
|
-
return
|
|
76
|
+
return get_trace_provider().get_current_trace()
|
|
77
77
|
|
|
78
78
|
|
|
79
79
|
def get_current_span() -> Span[Any] | None:
|
|
80
80
|
"""Returns the currently active span, if present."""
|
|
81
|
-
return
|
|
81
|
+
return get_trace_provider().get_current_span()
|
|
82
82
|
|
|
83
83
|
|
|
84
84
|
def agent_span(
|
|
@@ -108,7 +108,7 @@ def agent_span(
|
|
|
108
108
|
Returns:
|
|
109
109
|
The newly created agent span.
|
|
110
110
|
"""
|
|
111
|
-
return
|
|
111
|
+
return get_trace_provider().create_span(
|
|
112
112
|
span_data=AgentSpanData(name=name, handoffs=handoffs, tools=tools, output_type=output_type),
|
|
113
113
|
span_id=span_id,
|
|
114
114
|
parent=parent,
|
|
@@ -141,7 +141,7 @@ def function_span(
|
|
|
141
141
|
Returns:
|
|
142
142
|
The newly created function span.
|
|
143
143
|
"""
|
|
144
|
-
return
|
|
144
|
+
return get_trace_provider().create_span(
|
|
145
145
|
span_data=FunctionSpanData(name=name, input=input, output=output),
|
|
146
146
|
span_id=span_id,
|
|
147
147
|
parent=parent,
|
|
@@ -183,7 +183,7 @@ def generation_span(
|
|
|
183
183
|
Returns:
|
|
184
184
|
The newly created generation span.
|
|
185
185
|
"""
|
|
186
|
-
return
|
|
186
|
+
return get_trace_provider().create_span(
|
|
187
187
|
span_data=GenerationSpanData(
|
|
188
188
|
input=input,
|
|
189
189
|
output=output,
|
|
@@ -215,7 +215,7 @@ def response_span(
|
|
|
215
215
|
trace/span as the parent.
|
|
216
216
|
disabled: If True, we will return a Span but the Span will not be recorded.
|
|
217
217
|
"""
|
|
218
|
-
return
|
|
218
|
+
return get_trace_provider().create_span(
|
|
219
219
|
span_data=ResponseSpanData(response=response),
|
|
220
220
|
span_id=span_id,
|
|
221
221
|
parent=parent,
|
|
@@ -246,7 +246,7 @@ def handoff_span(
|
|
|
246
246
|
Returns:
|
|
247
247
|
The newly created handoff span.
|
|
248
248
|
"""
|
|
249
|
-
return
|
|
249
|
+
return get_trace_provider().create_span(
|
|
250
250
|
span_data=HandoffSpanData(from_agent=from_agent, to_agent=to_agent),
|
|
251
251
|
span_id=span_id,
|
|
252
252
|
parent=parent,
|
|
@@ -278,7 +278,7 @@ def custom_span(
|
|
|
278
278
|
Returns:
|
|
279
279
|
The newly created custom span.
|
|
280
280
|
"""
|
|
281
|
-
return
|
|
281
|
+
return get_trace_provider().create_span(
|
|
282
282
|
span_data=CustomSpanData(name=name, data=data or {}),
|
|
283
283
|
span_id=span_id,
|
|
284
284
|
parent=parent,
|
|
@@ -306,7 +306,7 @@ def guardrail_span(
|
|
|
306
306
|
trace/span as the parent.
|
|
307
307
|
disabled: If True, we will return a Span but the Span will not be recorded.
|
|
308
308
|
"""
|
|
309
|
-
return
|
|
309
|
+
return get_trace_provider().create_span(
|
|
310
310
|
span_data=GuardrailSpanData(name=name, triggered=triggered),
|
|
311
311
|
span_id=span_id,
|
|
312
312
|
parent=parent,
|
|
@@ -344,7 +344,7 @@ def transcription_span(
|
|
|
344
344
|
Returns:
|
|
345
345
|
The newly created speech-to-text span.
|
|
346
346
|
"""
|
|
347
|
-
return
|
|
347
|
+
return get_trace_provider().create_span(
|
|
348
348
|
span_data=TranscriptionSpanData(
|
|
349
349
|
input=input,
|
|
350
350
|
input_format=input_format,
|
|
@@ -386,7 +386,7 @@ def speech_span(
|
|
|
386
386
|
trace/span as the parent.
|
|
387
387
|
disabled: If True, we will return a Span but the Span will not be recorded.
|
|
388
388
|
"""
|
|
389
|
-
return
|
|
389
|
+
return get_trace_provider().create_span(
|
|
390
390
|
span_data=SpeechSpanData(
|
|
391
391
|
model=model,
|
|
392
392
|
input=input,
|
|
@@ -419,7 +419,7 @@ def speech_group_span(
|
|
|
419
419
|
trace/span as the parent.
|
|
420
420
|
disabled: If True, we will return a Span but the Span will not be recorded.
|
|
421
421
|
"""
|
|
422
|
-
return
|
|
422
|
+
return get_trace_provider().create_span(
|
|
423
423
|
span_data=SpeechGroupSpanData(input=input),
|
|
424
424
|
span_id=span_id,
|
|
425
425
|
parent=parent,
|
|
@@ -447,7 +447,7 @@ def mcp_tools_span(
|
|
|
447
447
|
trace/span as the parent.
|
|
448
448
|
disabled: If True, we will return a Span but the Span will not be recorded.
|
|
449
449
|
"""
|
|
450
|
-
return
|
|
450
|
+
return get_trace_provider().create_span(
|
|
451
451
|
span_data=MCPListToolsSpanData(server=server, result=result),
|
|
452
452
|
span_id=span_id,
|
|
453
453
|
parent=parent,
|
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import threading
|
|
5
|
+
import uuid
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from ..logger import logger
|
|
11
|
+
from .processor_interface import TracingProcessor
|
|
12
|
+
from .scope import Scope
|
|
13
|
+
from .spans import NoOpSpan, Span, SpanImpl, TSpanData
|
|
14
|
+
from .traces import NoOpTrace, Trace, TraceImpl
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SynchronousMultiTracingProcessor(TracingProcessor):
|
|
18
|
+
"""
|
|
19
|
+
Forwards all calls to a list of TracingProcessors, in order of registration.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self):
|
|
23
|
+
# Using a tuple to avoid race conditions when iterating over processors
|
|
24
|
+
self._processors: tuple[TracingProcessor, ...] = ()
|
|
25
|
+
self._lock = threading.Lock()
|
|
26
|
+
|
|
27
|
+
def add_tracing_processor(self, tracing_processor: TracingProcessor):
|
|
28
|
+
"""
|
|
29
|
+
Add a processor to the list of processors. Each processor will receive all traces/spans.
|
|
30
|
+
"""
|
|
31
|
+
with self._lock:
|
|
32
|
+
self._processors += (tracing_processor,)
|
|
33
|
+
|
|
34
|
+
def set_processors(self, processors: list[TracingProcessor]):
|
|
35
|
+
"""
|
|
36
|
+
Set the list of processors. This will replace the current list of processors.
|
|
37
|
+
"""
|
|
38
|
+
with self._lock:
|
|
39
|
+
self._processors = tuple(processors)
|
|
40
|
+
|
|
41
|
+
def on_trace_start(self, trace: Trace) -> None:
|
|
42
|
+
"""
|
|
43
|
+
Called when a trace is started.
|
|
44
|
+
"""
|
|
45
|
+
for processor in self._processors:
|
|
46
|
+
processor.on_trace_start(trace)
|
|
47
|
+
|
|
48
|
+
def on_trace_end(self, trace: Trace) -> None:
|
|
49
|
+
"""
|
|
50
|
+
Called when a trace is finished.
|
|
51
|
+
"""
|
|
52
|
+
for processor in self._processors:
|
|
53
|
+
processor.on_trace_end(trace)
|
|
54
|
+
|
|
55
|
+
def on_span_start(self, span: Span[Any]) -> None:
|
|
56
|
+
"""
|
|
57
|
+
Called when a span is started.
|
|
58
|
+
"""
|
|
59
|
+
for processor in self._processors:
|
|
60
|
+
processor.on_span_start(span)
|
|
61
|
+
|
|
62
|
+
def on_span_end(self, span: Span[Any]) -> None:
|
|
63
|
+
"""
|
|
64
|
+
Called when a span is finished.
|
|
65
|
+
"""
|
|
66
|
+
for processor in self._processors:
|
|
67
|
+
processor.on_span_end(span)
|
|
68
|
+
|
|
69
|
+
def shutdown(self) -> None:
|
|
70
|
+
"""
|
|
71
|
+
Called when the application stops.
|
|
72
|
+
"""
|
|
73
|
+
for processor in self._processors:
|
|
74
|
+
logger.debug(f"Shutting down trace processor {processor}")
|
|
75
|
+
processor.shutdown()
|
|
76
|
+
|
|
77
|
+
def force_flush(self):
|
|
78
|
+
"""
|
|
79
|
+
Force the processors to flush their buffers.
|
|
80
|
+
"""
|
|
81
|
+
for processor in self._processors:
|
|
82
|
+
processor.force_flush()
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class TraceProvider(ABC):
|
|
86
|
+
"""Interface for creating traces and spans."""
|
|
87
|
+
|
|
88
|
+
@abstractmethod
|
|
89
|
+
def register_processor(self, processor: TracingProcessor) -> None:
|
|
90
|
+
"""Add a processor that will receive all traces and spans."""
|
|
91
|
+
|
|
92
|
+
@abstractmethod
|
|
93
|
+
def set_processors(self, processors: list[TracingProcessor]) -> None:
|
|
94
|
+
"""Replace the list of processors with ``processors``."""
|
|
95
|
+
|
|
96
|
+
@abstractmethod
|
|
97
|
+
def get_current_trace(self) -> Trace | None:
|
|
98
|
+
"""Return the currently active trace, if any."""
|
|
99
|
+
|
|
100
|
+
@abstractmethod
|
|
101
|
+
def get_current_span(self) -> Span[Any] | None:
|
|
102
|
+
"""Return the currently active span, if any."""
|
|
103
|
+
|
|
104
|
+
@abstractmethod
|
|
105
|
+
def set_disabled(self, disabled: bool) -> None:
|
|
106
|
+
"""Enable or disable tracing globally."""
|
|
107
|
+
|
|
108
|
+
@abstractmethod
|
|
109
|
+
def time_iso(self) -> str:
|
|
110
|
+
"""Return the current time in ISO 8601 format."""
|
|
111
|
+
|
|
112
|
+
@abstractmethod
|
|
113
|
+
def gen_trace_id(self) -> str:
|
|
114
|
+
"""Generate a new trace identifier."""
|
|
115
|
+
|
|
116
|
+
@abstractmethod
|
|
117
|
+
def gen_span_id(self) -> str:
|
|
118
|
+
"""Generate a new span identifier."""
|
|
119
|
+
|
|
120
|
+
@abstractmethod
|
|
121
|
+
def gen_group_id(self) -> str:
|
|
122
|
+
"""Generate a new group identifier."""
|
|
123
|
+
|
|
124
|
+
@abstractmethod
|
|
125
|
+
def create_trace(
|
|
126
|
+
self,
|
|
127
|
+
name: str,
|
|
128
|
+
trace_id: str | None = None,
|
|
129
|
+
group_id: str | None = None,
|
|
130
|
+
metadata: dict[str, Any] | None = None,
|
|
131
|
+
disabled: bool = False,
|
|
132
|
+
) -> Trace:
|
|
133
|
+
"""Create a new trace."""
|
|
134
|
+
|
|
135
|
+
@abstractmethod
|
|
136
|
+
def create_span(
|
|
137
|
+
self,
|
|
138
|
+
span_data: TSpanData,
|
|
139
|
+
span_id: str | None = None,
|
|
140
|
+
parent: Trace | Span[Any] | None = None,
|
|
141
|
+
disabled: bool = False,
|
|
142
|
+
) -> Span[TSpanData]:
|
|
143
|
+
"""Create a new span."""
|
|
144
|
+
|
|
145
|
+
@abstractmethod
|
|
146
|
+
def shutdown(self) -> None:
|
|
147
|
+
"""Clean up any resources used by the provider."""
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class DefaultTraceProvider(TraceProvider):
|
|
151
|
+
def __init__(self) -> None:
|
|
152
|
+
self._multi_processor = SynchronousMultiTracingProcessor()
|
|
153
|
+
self._disabled = os.environ.get("OPENAI_AGENTS_DISABLE_TRACING", "false").lower() in (
|
|
154
|
+
"true",
|
|
155
|
+
"1",
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
def register_processor(self, processor: TracingProcessor):
|
|
159
|
+
"""
|
|
160
|
+
Add a processor to the list of processors. Each processor will receive all traces/spans.
|
|
161
|
+
"""
|
|
162
|
+
self._multi_processor.add_tracing_processor(processor)
|
|
163
|
+
|
|
164
|
+
def set_processors(self, processors: list[TracingProcessor]):
|
|
165
|
+
"""
|
|
166
|
+
Set the list of processors. This will replace the current list of processors.
|
|
167
|
+
"""
|
|
168
|
+
self._multi_processor.set_processors(processors)
|
|
169
|
+
|
|
170
|
+
def get_current_trace(self) -> Trace | None:
|
|
171
|
+
"""
|
|
172
|
+
Returns the currently active trace, if any.
|
|
173
|
+
"""
|
|
174
|
+
return Scope.get_current_trace()
|
|
175
|
+
|
|
176
|
+
def get_current_span(self) -> Span[Any] | None:
|
|
177
|
+
"""
|
|
178
|
+
Returns the currently active span, if any.
|
|
179
|
+
"""
|
|
180
|
+
return Scope.get_current_span()
|
|
181
|
+
|
|
182
|
+
def set_disabled(self, disabled: bool) -> None:
|
|
183
|
+
"""
|
|
184
|
+
Set whether tracing is disabled.
|
|
185
|
+
"""
|
|
186
|
+
self._disabled = disabled
|
|
187
|
+
|
|
188
|
+
def time_iso(self) -> str:
|
|
189
|
+
"""Return the current time in ISO 8601 format."""
|
|
190
|
+
return datetime.now(timezone.utc).isoformat()
|
|
191
|
+
|
|
192
|
+
def gen_trace_id(self) -> str:
|
|
193
|
+
"""Generate a new trace ID."""
|
|
194
|
+
return f"trace_{uuid.uuid4().hex}"
|
|
195
|
+
|
|
196
|
+
def gen_span_id(self) -> str:
|
|
197
|
+
"""Generate a new span ID."""
|
|
198
|
+
return f"span_{uuid.uuid4().hex[:24]}"
|
|
199
|
+
|
|
200
|
+
def gen_group_id(self) -> str:
|
|
201
|
+
"""Generate a new group ID."""
|
|
202
|
+
return f"group_{uuid.uuid4().hex[:24]}"
|
|
203
|
+
|
|
204
|
+
def create_trace(
|
|
205
|
+
self,
|
|
206
|
+
name: str,
|
|
207
|
+
trace_id: str | None = None,
|
|
208
|
+
group_id: str | None = None,
|
|
209
|
+
metadata: dict[str, Any] | None = None,
|
|
210
|
+
disabled: bool = False,
|
|
211
|
+
) -> Trace:
|
|
212
|
+
"""
|
|
213
|
+
Create a new trace.
|
|
214
|
+
"""
|
|
215
|
+
if self._disabled or disabled:
|
|
216
|
+
logger.debug(f"Tracing is disabled. Not creating trace {name}")
|
|
217
|
+
return NoOpTrace()
|
|
218
|
+
|
|
219
|
+
trace_id = trace_id or self.gen_trace_id()
|
|
220
|
+
|
|
221
|
+
logger.debug(f"Creating trace {name} with id {trace_id}")
|
|
222
|
+
|
|
223
|
+
return TraceImpl(
|
|
224
|
+
name=name,
|
|
225
|
+
trace_id=trace_id,
|
|
226
|
+
group_id=group_id,
|
|
227
|
+
metadata=metadata,
|
|
228
|
+
processor=self._multi_processor,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
def create_span(
|
|
232
|
+
self,
|
|
233
|
+
span_data: TSpanData,
|
|
234
|
+
span_id: str | None = None,
|
|
235
|
+
parent: Trace | Span[Any] | None = None,
|
|
236
|
+
disabled: bool = False,
|
|
237
|
+
) -> Span[TSpanData]:
|
|
238
|
+
"""
|
|
239
|
+
Create a new span.
|
|
240
|
+
"""
|
|
241
|
+
if self._disabled or disabled:
|
|
242
|
+
logger.debug(f"Tracing is disabled. Not creating span {span_data}")
|
|
243
|
+
return NoOpSpan(span_data)
|
|
244
|
+
|
|
245
|
+
if not parent:
|
|
246
|
+
current_span = Scope.get_current_span()
|
|
247
|
+
current_trace = Scope.get_current_trace()
|
|
248
|
+
if current_trace is None:
|
|
249
|
+
logger.error(
|
|
250
|
+
"No active trace. Make sure to start a trace with `trace()` first"
|
|
251
|
+
"Returning NoOpSpan."
|
|
252
|
+
)
|
|
253
|
+
return NoOpSpan(span_data)
|
|
254
|
+
elif isinstance(current_trace, NoOpTrace) or isinstance(current_span, NoOpSpan):
|
|
255
|
+
logger.debug(
|
|
256
|
+
f"Parent {current_span} or {current_trace} is no-op, returning NoOpSpan"
|
|
257
|
+
)
|
|
258
|
+
return NoOpSpan(span_data)
|
|
259
|
+
|
|
260
|
+
parent_id = current_span.span_id if current_span else None
|
|
261
|
+
trace_id = current_trace.trace_id
|
|
262
|
+
|
|
263
|
+
elif isinstance(parent, Trace):
|
|
264
|
+
if isinstance(parent, NoOpTrace):
|
|
265
|
+
logger.debug(f"Parent {parent} is no-op, returning NoOpSpan")
|
|
266
|
+
return NoOpSpan(span_data)
|
|
267
|
+
trace_id = parent.trace_id
|
|
268
|
+
parent_id = None
|
|
269
|
+
elif isinstance(parent, Span):
|
|
270
|
+
if isinstance(parent, NoOpSpan):
|
|
271
|
+
logger.debug(f"Parent {parent} is no-op, returning NoOpSpan")
|
|
272
|
+
return NoOpSpan(span_data)
|
|
273
|
+
parent_id = parent.span_id
|
|
274
|
+
trace_id = parent.trace_id
|
|
275
|
+
|
|
276
|
+
logger.debug(f"Creating span {span_data} with id {span_id}")
|
|
277
|
+
|
|
278
|
+
return SpanImpl(
|
|
279
|
+
trace_id=trace_id,
|
|
280
|
+
span_id=span_id or self.gen_span_id(),
|
|
281
|
+
parent_id=parent_id,
|
|
282
|
+
processor=self._multi_processor,
|
|
283
|
+
span_data=span_data,
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
def shutdown(self) -> None:
|
|
287
|
+
if self._disabled:
|
|
288
|
+
return
|
|
289
|
+
|
|
290
|
+
try:
|
|
291
|
+
logger.debug("Shutting down trace provider")
|
|
292
|
+
self._multi_processor.shutdown()
|
|
293
|
+
except Exception as e:
|
|
294
|
+
logger.error(f"Error shutting down trace provider: {e}")
|
agents/tracing/setup.py
CHANGED
|
@@ -1,214 +1,21 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import
|
|
4
|
-
import threading
|
|
5
|
-
from typing import Any
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
6
4
|
|
|
7
|
-
|
|
8
|
-
from . import
|
|
9
|
-
from .processor_interface import TracingProcessor
|
|
10
|
-
from .scope import Scope
|
|
11
|
-
from .spans import NoOpSpan, Span, SpanImpl, TSpanData
|
|
12
|
-
from .traces import NoOpTrace, Trace, TraceImpl
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from .provider import TraceProvider
|
|
13
7
|
|
|
8
|
+
GLOBAL_TRACE_PROVIDER: TraceProvider | None = None
|
|
14
9
|
|
|
15
|
-
class SynchronousMultiTracingProcessor(TracingProcessor):
|
|
16
|
-
"""
|
|
17
|
-
Forwards all calls to a list of TracingProcessors, in order of registration.
|
|
18
|
-
"""
|
|
19
10
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
11
|
+
def set_trace_provider(provider: TraceProvider) -> None:
|
|
12
|
+
"""Set the global trace provider used by tracing utilities."""
|
|
13
|
+
global GLOBAL_TRACE_PROVIDER
|
|
14
|
+
GLOBAL_TRACE_PROVIDER = provider
|
|
24
15
|
|
|
25
|
-
def add_tracing_processor(self, tracing_processor: TracingProcessor):
|
|
26
|
-
"""
|
|
27
|
-
Add a processor to the list of processors. Each processor will receive all traces/spans.
|
|
28
|
-
"""
|
|
29
|
-
with self._lock:
|
|
30
|
-
self._processors += (tracing_processor,)
|
|
31
16
|
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
""
|
|
36
|
-
|
|
37
|
-
self._processors = tuple(processors)
|
|
38
|
-
|
|
39
|
-
def on_trace_start(self, trace: Trace) -> None:
|
|
40
|
-
"""
|
|
41
|
-
Called when a trace is started.
|
|
42
|
-
"""
|
|
43
|
-
for processor in self._processors:
|
|
44
|
-
processor.on_trace_start(trace)
|
|
45
|
-
|
|
46
|
-
def on_trace_end(self, trace: Trace) -> None:
|
|
47
|
-
"""
|
|
48
|
-
Called when a trace is finished.
|
|
49
|
-
"""
|
|
50
|
-
for processor in self._processors:
|
|
51
|
-
processor.on_trace_end(trace)
|
|
52
|
-
|
|
53
|
-
def on_span_start(self, span: Span[Any]) -> None:
|
|
54
|
-
"""
|
|
55
|
-
Called when a span is started.
|
|
56
|
-
"""
|
|
57
|
-
for processor in self._processors:
|
|
58
|
-
processor.on_span_start(span)
|
|
59
|
-
|
|
60
|
-
def on_span_end(self, span: Span[Any]) -> None:
|
|
61
|
-
"""
|
|
62
|
-
Called when a span is finished.
|
|
63
|
-
"""
|
|
64
|
-
for processor in self._processors:
|
|
65
|
-
processor.on_span_end(span)
|
|
66
|
-
|
|
67
|
-
def shutdown(self) -> None:
|
|
68
|
-
"""
|
|
69
|
-
Called when the application stops.
|
|
70
|
-
"""
|
|
71
|
-
for processor in self._processors:
|
|
72
|
-
logger.debug(f"Shutting down trace processor {processor}")
|
|
73
|
-
processor.shutdown()
|
|
74
|
-
|
|
75
|
-
def force_flush(self):
|
|
76
|
-
"""
|
|
77
|
-
Force the processors to flush their buffers.
|
|
78
|
-
"""
|
|
79
|
-
for processor in self._processors:
|
|
80
|
-
processor.force_flush()
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
class TraceProvider:
|
|
84
|
-
def __init__(self):
|
|
85
|
-
self._multi_processor = SynchronousMultiTracingProcessor()
|
|
86
|
-
self._disabled = os.environ.get("OPENAI_AGENTS_DISABLE_TRACING", "false").lower() in (
|
|
87
|
-
"true",
|
|
88
|
-
"1",
|
|
89
|
-
)
|
|
90
|
-
|
|
91
|
-
def register_processor(self, processor: TracingProcessor):
|
|
92
|
-
"""
|
|
93
|
-
Add a processor to the list of processors. Each processor will receive all traces/spans.
|
|
94
|
-
"""
|
|
95
|
-
self._multi_processor.add_tracing_processor(processor)
|
|
96
|
-
|
|
97
|
-
def set_processors(self, processors: list[TracingProcessor]):
|
|
98
|
-
"""
|
|
99
|
-
Set the list of processors. This will replace the current list of processors.
|
|
100
|
-
"""
|
|
101
|
-
self._multi_processor.set_processors(processors)
|
|
102
|
-
|
|
103
|
-
def get_current_trace(self) -> Trace | None:
|
|
104
|
-
"""
|
|
105
|
-
Returns the currently active trace, if any.
|
|
106
|
-
"""
|
|
107
|
-
return Scope.get_current_trace()
|
|
108
|
-
|
|
109
|
-
def get_current_span(self) -> Span[Any] | None:
|
|
110
|
-
"""
|
|
111
|
-
Returns the currently active span, if any.
|
|
112
|
-
"""
|
|
113
|
-
return Scope.get_current_span()
|
|
114
|
-
|
|
115
|
-
def set_disabled(self, disabled: bool) -> None:
|
|
116
|
-
"""
|
|
117
|
-
Set whether tracing is disabled.
|
|
118
|
-
"""
|
|
119
|
-
self._disabled = disabled
|
|
120
|
-
|
|
121
|
-
def create_trace(
|
|
122
|
-
self,
|
|
123
|
-
name: str,
|
|
124
|
-
trace_id: str | None = None,
|
|
125
|
-
group_id: str | None = None,
|
|
126
|
-
metadata: dict[str, Any] | None = None,
|
|
127
|
-
disabled: bool = False,
|
|
128
|
-
) -> Trace:
|
|
129
|
-
"""
|
|
130
|
-
Create a new trace.
|
|
131
|
-
"""
|
|
132
|
-
if self._disabled or disabled:
|
|
133
|
-
logger.debug(f"Tracing is disabled. Not creating trace {name}")
|
|
134
|
-
return NoOpTrace()
|
|
135
|
-
|
|
136
|
-
trace_id = trace_id or util.gen_trace_id()
|
|
137
|
-
|
|
138
|
-
logger.debug(f"Creating trace {name} with id {trace_id}")
|
|
139
|
-
|
|
140
|
-
return TraceImpl(
|
|
141
|
-
name=name,
|
|
142
|
-
trace_id=trace_id,
|
|
143
|
-
group_id=group_id,
|
|
144
|
-
metadata=metadata,
|
|
145
|
-
processor=self._multi_processor,
|
|
146
|
-
)
|
|
147
|
-
|
|
148
|
-
def create_span(
|
|
149
|
-
self,
|
|
150
|
-
span_data: TSpanData,
|
|
151
|
-
span_id: str | None = None,
|
|
152
|
-
parent: Trace | Span[Any] | None = None,
|
|
153
|
-
disabled: bool = False,
|
|
154
|
-
) -> Span[TSpanData]:
|
|
155
|
-
"""
|
|
156
|
-
Create a new span.
|
|
157
|
-
"""
|
|
158
|
-
if self._disabled or disabled:
|
|
159
|
-
logger.debug(f"Tracing is disabled. Not creating span {span_data}")
|
|
160
|
-
return NoOpSpan(span_data)
|
|
161
|
-
|
|
162
|
-
if not parent:
|
|
163
|
-
current_span = Scope.get_current_span()
|
|
164
|
-
current_trace = Scope.get_current_trace()
|
|
165
|
-
if current_trace is None:
|
|
166
|
-
logger.error(
|
|
167
|
-
"No active trace. Make sure to start a trace with `trace()` first"
|
|
168
|
-
"Returning NoOpSpan."
|
|
169
|
-
)
|
|
170
|
-
return NoOpSpan(span_data)
|
|
171
|
-
elif isinstance(current_trace, NoOpTrace) or isinstance(current_span, NoOpSpan):
|
|
172
|
-
logger.debug(
|
|
173
|
-
f"Parent {current_span} or {current_trace} is no-op, returning NoOpSpan"
|
|
174
|
-
)
|
|
175
|
-
return NoOpSpan(span_data)
|
|
176
|
-
|
|
177
|
-
parent_id = current_span.span_id if current_span else None
|
|
178
|
-
trace_id = current_trace.trace_id
|
|
179
|
-
|
|
180
|
-
elif isinstance(parent, Trace):
|
|
181
|
-
if isinstance(parent, NoOpTrace):
|
|
182
|
-
logger.debug(f"Parent {parent} is no-op, returning NoOpSpan")
|
|
183
|
-
return NoOpSpan(span_data)
|
|
184
|
-
trace_id = parent.trace_id
|
|
185
|
-
parent_id = None
|
|
186
|
-
elif isinstance(parent, Span):
|
|
187
|
-
if isinstance(parent, NoOpSpan):
|
|
188
|
-
logger.debug(f"Parent {parent} is no-op, returning NoOpSpan")
|
|
189
|
-
return NoOpSpan(span_data)
|
|
190
|
-
parent_id = parent.span_id
|
|
191
|
-
trace_id = parent.trace_id
|
|
192
|
-
|
|
193
|
-
logger.debug(f"Creating span {span_data} with id {span_id}")
|
|
194
|
-
|
|
195
|
-
return SpanImpl(
|
|
196
|
-
trace_id=trace_id,
|
|
197
|
-
span_id=span_id,
|
|
198
|
-
parent_id=parent_id,
|
|
199
|
-
processor=self._multi_processor,
|
|
200
|
-
span_data=span_data,
|
|
201
|
-
)
|
|
202
|
-
|
|
203
|
-
def shutdown(self) -> None:
|
|
204
|
-
if self._disabled:
|
|
205
|
-
return
|
|
206
|
-
|
|
207
|
-
try:
|
|
208
|
-
logger.debug("Shutting down trace provider")
|
|
209
|
-
self._multi_processor.shutdown()
|
|
210
|
-
except Exception as e:
|
|
211
|
-
logger.error(f"Error shutting down trace provider: {e}")
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
GLOBAL_TRACE_PROVIDER = TraceProvider()
|
|
17
|
+
def get_trace_provider() -> TraceProvider:
|
|
18
|
+
"""Get the global trace provider used by tracing utilities."""
|
|
19
|
+
if GLOBAL_TRACE_PROVIDER is None:
|
|
20
|
+
raise RuntimeError("Trace provider not set")
|
|
21
|
+
return GLOBAL_TRACE_PROVIDER
|
agents/tracing/util.py
CHANGED
|
@@ -1,22 +1,21 @@
|
|
|
1
|
-
import
|
|
2
|
-
from datetime import datetime, timezone
|
|
1
|
+
from .setup import get_trace_provider
|
|
3
2
|
|
|
4
3
|
|
|
5
4
|
def time_iso() -> str:
|
|
6
|
-
"""
|
|
7
|
-
return
|
|
5
|
+
"""Return the current time in ISO 8601 format."""
|
|
6
|
+
return get_trace_provider().time_iso()
|
|
8
7
|
|
|
9
8
|
|
|
10
9
|
def gen_trace_id() -> str:
|
|
11
|
-
"""
|
|
12
|
-
return
|
|
10
|
+
"""Generate a new trace ID."""
|
|
11
|
+
return get_trace_provider().gen_trace_id()
|
|
13
12
|
|
|
14
13
|
|
|
15
14
|
def gen_span_id() -> str:
|
|
16
|
-
"""
|
|
17
|
-
return
|
|
15
|
+
"""Generate a new span ID."""
|
|
16
|
+
return get_trace_provider().gen_span_id()
|
|
18
17
|
|
|
19
18
|
|
|
20
19
|
def gen_group_id() -> str:
|
|
21
|
-
"""
|
|
22
|
-
return
|
|
20
|
+
"""Generate a new group ID."""
|
|
21
|
+
return get_trace_provider().gen_group_id()
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
agents/__init__.py,sha256=
|
|
1
|
+
agents/__init__.py,sha256=r-WDSAsm-x9AMTjBdcUqFxxRB1KxqzDeF9s2lddTLBY,7695
|
|
2
2
|
agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
|
|
3
3
|
agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
|
|
4
4
|
agents/_run_impl.py,sha256=_3XbxIKNLXJHvjCQgnQTosT0CWCS9F7qFtW_wOdDeNQ,42863
|
|
@@ -17,7 +17,7 @@ agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
|
|
|
17
17
|
agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
18
18
|
agents/repl.py,sha256=v06JyiZnHfqWZHpHEUj9CSH4RTfIVKQ9NJYwN_YiwT0,2578
|
|
19
19
|
agents/result.py,sha256=YCGYHoc5X1_vLKu5QiK6F8C1ZXI3tTfLXaZoqbYgUMA,10753
|
|
20
|
-
agents/run.py,sha256=
|
|
20
|
+
agents/run.py,sha256=VPtvY9Ch22A2W84aMqI-mlfgEDBNGaEgkgZUJy4iHYo,46004
|
|
21
21
|
agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
|
|
22
22
|
agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
|
|
23
23
|
agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
|
|
@@ -46,17 +46,18 @@ agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qc
|
|
|
46
46
|
agents/models/openai_chatcompletions.py,sha256=brGc48JXbNJYKFD6Fz6HmC_-p8FfwvpFd8_cQtJdEAk,11877
|
|
47
47
|
agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
|
|
48
48
|
agents/models/openai_responses.py,sha256=9XtVlZbzch0g96E8lT4wbvTHN_12W1re-U4r4h4VPSY,15875
|
|
49
|
-
agents/tracing/__init__.py,sha256
|
|
50
|
-
agents/tracing/create.py,sha256=
|
|
49
|
+
agents/tracing/__init__.py,sha256=kyuNg3ieMca2m_EG0EqnwIh-SiR7h2ubnd410k49hJU,3006
|
|
50
|
+
agents/tracing/create.py,sha256=Gm9N5O2DeBy6UU86tRN0wnmzWyXb-qAUBbTj9oxIHao,18106
|
|
51
51
|
agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
|
|
52
52
|
agents/tracing/processor_interface.py,sha256=wNyZCwNJko5CrUIWD_lMou5ppQ67CFYwvWRsJRM3up8,1659
|
|
53
53
|
agents/tracing/processors.py,sha256=lOdZHwo0rQAflVkKWOZinnWyLtS0stALyydiFOC0gss,11389
|
|
54
|
+
agents/tracing/provider.py,sha256=hiMTAiVnmnZ2RW6HYvL1hckXE-GQEqTSRvZCVcBY7pI,9212
|
|
54
55
|
agents/tracing/scope.py,sha256=u17_m8RPpGvbHrTkaO_kDi5ROBWhfOAIgBe7suiaRD4,1445
|
|
55
|
-
agents/tracing/setup.py,sha256=
|
|
56
|
+
agents/tracing/setup.py,sha256=2h9TH1GAKcXKM1U99dOKKR3XlHp8JKzh2JG3DQPKyhY,612
|
|
56
57
|
agents/tracing/span_data.py,sha256=nI2Fbu1ORE8ybE6m6RuddTJF5E5xFmEj8Mq5bSFv4bE,9017
|
|
57
58
|
agents/tracing/spans.py,sha256=6vVzocGMsdgIma1ksqkBZmhar91xj4RpgcpUC3iibqg,6606
|
|
58
59
|
agents/tracing/traces.py,sha256=G5LlECSK-DBRFP-bjT8maZjBQulz6SaHILYauUVlfq8,4775
|
|
59
|
-
agents/tracing/util.py,sha256=
|
|
60
|
+
agents/tracing/util.py,sha256=J7IZgVDmeW0aZDw8LBSjBKrlQbcOmaqZE7XQjolPwi8,490
|
|
60
61
|
agents/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
61
62
|
agents/util/_coro.py,sha256=S38XUYFC7bqTELSgMUBsAX1GoRlIrV7coupcUAWH__4,45
|
|
62
63
|
agents/util/_error_tracing.py,sha256=hdkYNx180b18lP0PSB1toE5atNHsMg_Bm9Osw812vLo,421
|
|
@@ -79,7 +80,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
|
79
80
|
agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
|
|
80
81
|
agents/voice/models/openai_stt.py,sha256=rRsldkvkPhH4T0waX1dhccEqIwmPYh-teK_LRvBgiNI,16882
|
|
81
82
|
agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
|
|
82
|
-
openai_agents-0.0.
|
|
83
|
-
openai_agents-0.0.
|
|
84
|
-
openai_agents-0.0.
|
|
85
|
-
openai_agents-0.0.
|
|
83
|
+
openai_agents-0.0.19.dist-info/METADATA,sha256=as2IOufjz7sJh4QGTo-or6vpcrFkRVZ2Ts-hK9uyRN8,8297
|
|
84
|
+
openai_agents-0.0.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
85
|
+
openai_agents-0.0.19.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
|
|
86
|
+
openai_agents-0.0.19.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|