openai-agents 0.2.8__py3-none-any.whl → 0.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +105 -4
- agents/_debug.py +15 -4
- agents/_run_impl.py +1203 -96
- agents/agent.py +164 -19
- agents/apply_diff.py +329 -0
- agents/editor.py +47 -0
- agents/exceptions.py +35 -0
- agents/extensions/experimental/__init__.py +6 -0
- agents/extensions/experimental/codex/__init__.py +92 -0
- agents/extensions/experimental/codex/codex.py +89 -0
- agents/extensions/experimental/codex/codex_options.py +35 -0
- agents/extensions/experimental/codex/codex_tool.py +1142 -0
- agents/extensions/experimental/codex/events.py +162 -0
- agents/extensions/experimental/codex/exec.py +263 -0
- agents/extensions/experimental/codex/items.py +245 -0
- agents/extensions/experimental/codex/output_schema_file.py +50 -0
- agents/extensions/experimental/codex/payloads.py +31 -0
- agents/extensions/experimental/codex/thread.py +214 -0
- agents/extensions/experimental/codex/thread_options.py +54 -0
- agents/extensions/experimental/codex/turn_options.py +36 -0
- agents/extensions/handoff_filters.py +13 -1
- agents/extensions/memory/__init__.py +120 -0
- agents/extensions/memory/advanced_sqlite_session.py +1285 -0
- agents/extensions/memory/async_sqlite_session.py +239 -0
- agents/extensions/memory/dapr_session.py +423 -0
- agents/extensions/memory/encrypt_session.py +185 -0
- agents/extensions/memory/redis_session.py +261 -0
- agents/extensions/memory/sqlalchemy_session.py +334 -0
- agents/extensions/models/litellm_model.py +449 -36
- agents/extensions/models/litellm_provider.py +3 -1
- agents/function_schema.py +47 -5
- agents/guardrail.py +16 -2
- agents/{handoffs.py → handoffs/__init__.py} +89 -47
- agents/handoffs/history.py +268 -0
- agents/items.py +237 -11
- agents/lifecycle.py +75 -14
- agents/mcp/server.py +280 -37
- agents/mcp/util.py +24 -3
- agents/memory/__init__.py +22 -2
- agents/memory/openai_conversations_session.py +91 -0
- agents/memory/openai_responses_compaction_session.py +249 -0
- agents/memory/session.py +19 -261
- agents/memory/sqlite_session.py +275 -0
- agents/memory/util.py +20 -0
- agents/model_settings.py +14 -3
- agents/models/__init__.py +13 -0
- agents/models/chatcmpl_converter.py +303 -50
- agents/models/chatcmpl_helpers.py +63 -0
- agents/models/chatcmpl_stream_handler.py +290 -68
- agents/models/default_models.py +58 -0
- agents/models/interface.py +4 -0
- agents/models/openai_chatcompletions.py +103 -49
- agents/models/openai_provider.py +10 -4
- agents/models/openai_responses.py +162 -46
- agents/realtime/__init__.py +4 -0
- agents/realtime/_util.py +14 -3
- agents/realtime/agent.py +7 -0
- agents/realtime/audio_formats.py +53 -0
- agents/realtime/config.py +78 -10
- agents/realtime/events.py +18 -0
- agents/realtime/handoffs.py +2 -2
- agents/realtime/items.py +17 -1
- agents/realtime/model.py +13 -0
- agents/realtime/model_events.py +12 -0
- agents/realtime/model_inputs.py +18 -1
- agents/realtime/openai_realtime.py +696 -150
- agents/realtime/session.py +243 -23
- agents/repl.py +7 -3
- agents/result.py +197 -38
- agents/run.py +949 -168
- agents/run_context.py +13 -2
- agents/stream_events.py +1 -0
- agents/strict_schema.py +14 -0
- agents/tool.py +413 -15
- agents/tool_context.py +22 -1
- agents/tool_guardrails.py +279 -0
- agents/tracing/__init__.py +2 -0
- agents/tracing/config.py +9 -0
- agents/tracing/create.py +4 -0
- agents/tracing/processor_interface.py +84 -11
- agents/tracing/processors.py +65 -54
- agents/tracing/provider.py +64 -7
- agents/tracing/spans.py +105 -0
- agents/tracing/traces.py +116 -16
- agents/usage.py +134 -12
- agents/util/_json.py +19 -1
- agents/util/_transforms.py +12 -2
- agents/voice/input.py +5 -4
- agents/voice/models/openai_stt.py +17 -9
- agents/voice/pipeline.py +2 -0
- agents/voice/pipeline_config.py +4 -0
- {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/METADATA +44 -19
- openai_agents-0.6.8.dist-info/RECORD +134 -0
- {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/WHEEL +1 -1
- openai_agents-0.2.8.dist-info/RECORD +0 -103
- {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import inspect
|
|
4
|
+
from collections.abc import Awaitable
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, overload
|
|
7
|
+
|
|
8
|
+
from typing_extensions import TypedDict, TypeVar
|
|
9
|
+
|
|
10
|
+
from .exceptions import UserError
|
|
11
|
+
from .tool_context import ToolContext
|
|
12
|
+
from .util._types import MaybeAwaitable
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from .agent import Agent
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class ToolInputGuardrailResult:
|
|
20
|
+
"""The result of a tool input guardrail run."""
|
|
21
|
+
|
|
22
|
+
guardrail: ToolInputGuardrail[Any]
|
|
23
|
+
"""The guardrail that was run."""
|
|
24
|
+
|
|
25
|
+
output: ToolGuardrailFunctionOutput
|
|
26
|
+
"""The output of the guardrail function."""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class ToolOutputGuardrailResult:
|
|
31
|
+
"""The result of a tool output guardrail run."""
|
|
32
|
+
|
|
33
|
+
guardrail: ToolOutputGuardrail[Any]
|
|
34
|
+
"""The guardrail that was run."""
|
|
35
|
+
|
|
36
|
+
output: ToolGuardrailFunctionOutput
|
|
37
|
+
"""The output of the guardrail function."""
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class RejectContentBehavior(TypedDict):
|
|
41
|
+
"""Rejects the tool call/output but continues execution with a message to the model."""
|
|
42
|
+
|
|
43
|
+
type: Literal["reject_content"]
|
|
44
|
+
message: str
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class RaiseExceptionBehavior(TypedDict):
|
|
48
|
+
"""Raises an exception to halt execution."""
|
|
49
|
+
|
|
50
|
+
type: Literal["raise_exception"]
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class AllowBehavior(TypedDict):
|
|
54
|
+
"""Allows normal tool execution to continue."""
|
|
55
|
+
|
|
56
|
+
type: Literal["allow"]
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@dataclass
|
|
60
|
+
class ToolGuardrailFunctionOutput:
|
|
61
|
+
"""The output of a tool guardrail function."""
|
|
62
|
+
|
|
63
|
+
output_info: Any
|
|
64
|
+
"""
|
|
65
|
+
Optional data about checks performed. For example, the guardrail could include
|
|
66
|
+
information about the checks it performed and granular results.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
behavior: RejectContentBehavior | RaiseExceptionBehavior | AllowBehavior = field(
|
|
70
|
+
default_factory=lambda: AllowBehavior(type="allow")
|
|
71
|
+
)
|
|
72
|
+
"""
|
|
73
|
+
Defines how the system should respond when this guardrail result is processed.
|
|
74
|
+
- allow: Allow normal tool execution to continue without interference (default)
|
|
75
|
+
- reject_content: Reject the tool call/output but continue execution with a message to the model
|
|
76
|
+
- raise_exception: Halt execution by raising a ToolGuardrailTripwireTriggered exception
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def allow(cls, output_info: Any = None) -> ToolGuardrailFunctionOutput:
|
|
81
|
+
"""Create a guardrail output that allows the tool execution to continue normally.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
output_info: Optional data about checks performed.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
ToolGuardrailFunctionOutput configured to allow normal execution.
|
|
88
|
+
"""
|
|
89
|
+
return cls(output_info=output_info, behavior=AllowBehavior(type="allow"))
|
|
90
|
+
|
|
91
|
+
@classmethod
|
|
92
|
+
def reject_content(cls, message: str, output_info: Any = None) -> ToolGuardrailFunctionOutput:
|
|
93
|
+
"""Create a guardrail output that rejects the tool call/output but continues execution.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
message: Message to send to the model instead of the tool result.
|
|
97
|
+
output_info: Optional data about checks performed.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
ToolGuardrailFunctionOutput configured to reject the content.
|
|
101
|
+
"""
|
|
102
|
+
return cls(
|
|
103
|
+
output_info=output_info,
|
|
104
|
+
behavior=RejectContentBehavior(type="reject_content", message=message),
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
@classmethod
|
|
108
|
+
def raise_exception(cls, output_info: Any = None) -> ToolGuardrailFunctionOutput:
|
|
109
|
+
"""Create a guardrail output that raises an exception to halt execution.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
output_info: Optional data about checks performed.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
ToolGuardrailFunctionOutput configured to raise an exception.
|
|
116
|
+
"""
|
|
117
|
+
return cls(output_info=output_info, behavior=RaiseExceptionBehavior(type="raise_exception"))
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
@dataclass
|
|
121
|
+
class ToolInputGuardrailData:
|
|
122
|
+
"""Input data passed to a tool input guardrail function."""
|
|
123
|
+
|
|
124
|
+
context: ToolContext[Any]
|
|
125
|
+
"""
|
|
126
|
+
The tool context containing information about the current tool execution.
|
|
127
|
+
"""
|
|
128
|
+
|
|
129
|
+
agent: Agent[Any]
|
|
130
|
+
"""
|
|
131
|
+
The agent that is executing the tool.
|
|
132
|
+
"""
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
@dataclass
|
|
136
|
+
class ToolOutputGuardrailData(ToolInputGuardrailData):
|
|
137
|
+
"""Input data passed to a tool output guardrail function.
|
|
138
|
+
|
|
139
|
+
Extends input data with the tool's output.
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
output: Any
|
|
143
|
+
"""
|
|
144
|
+
The output produced by the tool function.
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
TContext_co = TypeVar("TContext_co", bound=Any, covariant=True)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
@dataclass
|
|
152
|
+
class ToolInputGuardrail(Generic[TContext_co]):
|
|
153
|
+
"""A guardrail that runs before a function tool is invoked."""
|
|
154
|
+
|
|
155
|
+
guardrail_function: Callable[
|
|
156
|
+
[ToolInputGuardrailData], MaybeAwaitable[ToolGuardrailFunctionOutput]
|
|
157
|
+
]
|
|
158
|
+
"""
|
|
159
|
+
The function that implements the guardrail logic.
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
name: str | None = None
|
|
163
|
+
"""
|
|
164
|
+
Optional name for the guardrail. If not provided, uses the function name.
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
def get_name(self) -> str:
|
|
168
|
+
return self.name or self.guardrail_function.__name__
|
|
169
|
+
|
|
170
|
+
async def run(self, data: ToolInputGuardrailData) -> ToolGuardrailFunctionOutput:
|
|
171
|
+
if not callable(self.guardrail_function):
|
|
172
|
+
raise UserError(f"Guardrail function must be callable, got {self.guardrail_function}")
|
|
173
|
+
|
|
174
|
+
result = self.guardrail_function(data)
|
|
175
|
+
if inspect.isawaitable(result):
|
|
176
|
+
return await result
|
|
177
|
+
return result
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
@dataclass
|
|
181
|
+
class ToolOutputGuardrail(Generic[TContext_co]):
|
|
182
|
+
"""A guardrail that runs after a function tool is invoked."""
|
|
183
|
+
|
|
184
|
+
guardrail_function: Callable[
|
|
185
|
+
[ToolOutputGuardrailData], MaybeAwaitable[ToolGuardrailFunctionOutput]
|
|
186
|
+
]
|
|
187
|
+
"""
|
|
188
|
+
The function that implements the guardrail logic.
|
|
189
|
+
"""
|
|
190
|
+
|
|
191
|
+
name: str | None = None
|
|
192
|
+
"""
|
|
193
|
+
Optional name for the guardrail. If not provided, uses the function name.
|
|
194
|
+
"""
|
|
195
|
+
|
|
196
|
+
def get_name(self) -> str:
|
|
197
|
+
return self.name or self.guardrail_function.__name__
|
|
198
|
+
|
|
199
|
+
async def run(self, data: ToolOutputGuardrailData) -> ToolGuardrailFunctionOutput:
|
|
200
|
+
if not callable(self.guardrail_function):
|
|
201
|
+
raise UserError(f"Guardrail function must be callable, got {self.guardrail_function}")
|
|
202
|
+
|
|
203
|
+
result = self.guardrail_function(data)
|
|
204
|
+
if inspect.isawaitable(result):
|
|
205
|
+
return await result
|
|
206
|
+
return result
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
# Decorators
|
|
210
|
+
_ToolInputFuncSync = Callable[[ToolInputGuardrailData], ToolGuardrailFunctionOutput]
|
|
211
|
+
_ToolInputFuncAsync = Callable[[ToolInputGuardrailData], Awaitable[ToolGuardrailFunctionOutput]]
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
@overload
|
|
215
|
+
def tool_input_guardrail(func: _ToolInputFuncSync): ...
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
@overload
|
|
219
|
+
def tool_input_guardrail(func: _ToolInputFuncAsync): ...
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
@overload
|
|
223
|
+
def tool_input_guardrail(
|
|
224
|
+
*, name: str | None = None
|
|
225
|
+
) -> Callable[[_ToolInputFuncSync | _ToolInputFuncAsync], ToolInputGuardrail[Any]]: ...
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def tool_input_guardrail(
|
|
229
|
+
func: _ToolInputFuncSync | _ToolInputFuncAsync | None = None,
|
|
230
|
+
*,
|
|
231
|
+
name: str | None = None,
|
|
232
|
+
) -> (
|
|
233
|
+
ToolInputGuardrail[Any]
|
|
234
|
+
| Callable[[_ToolInputFuncSync | _ToolInputFuncAsync], ToolInputGuardrail[Any]]
|
|
235
|
+
):
|
|
236
|
+
"""Decorator to create a ToolInputGuardrail from a function."""
|
|
237
|
+
|
|
238
|
+
def decorator(f: _ToolInputFuncSync | _ToolInputFuncAsync) -> ToolInputGuardrail[Any]:
|
|
239
|
+
return ToolInputGuardrail(guardrail_function=f, name=name or f.__name__)
|
|
240
|
+
|
|
241
|
+
if func is not None:
|
|
242
|
+
return decorator(func)
|
|
243
|
+
return decorator
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
_ToolOutputFuncSync = Callable[[ToolOutputGuardrailData], ToolGuardrailFunctionOutput]
|
|
247
|
+
_ToolOutputFuncAsync = Callable[[ToolOutputGuardrailData], Awaitable[ToolGuardrailFunctionOutput]]
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
@overload
|
|
251
|
+
def tool_output_guardrail(func: _ToolOutputFuncSync): ...
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
@overload
|
|
255
|
+
def tool_output_guardrail(func: _ToolOutputFuncAsync): ...
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
@overload
|
|
259
|
+
def tool_output_guardrail(
|
|
260
|
+
*, name: str | None = None
|
|
261
|
+
) -> Callable[[_ToolOutputFuncSync | _ToolOutputFuncAsync], ToolOutputGuardrail[Any]]: ...
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def tool_output_guardrail(
|
|
265
|
+
func: _ToolOutputFuncSync | _ToolOutputFuncAsync | None = None,
|
|
266
|
+
*,
|
|
267
|
+
name: str | None = None,
|
|
268
|
+
) -> (
|
|
269
|
+
ToolOutputGuardrail[Any]
|
|
270
|
+
| Callable[[_ToolOutputFuncSync | _ToolOutputFuncAsync], ToolOutputGuardrail[Any]]
|
|
271
|
+
):
|
|
272
|
+
"""Decorator to create a ToolOutputGuardrail from a function."""
|
|
273
|
+
|
|
274
|
+
def decorator(f: _ToolOutputFuncSync | _ToolOutputFuncAsync) -> ToolOutputGuardrail[Any]:
|
|
275
|
+
return ToolOutputGuardrail(guardrail_function=f, name=name or f.__name__)
|
|
276
|
+
|
|
277
|
+
if func is not None:
|
|
278
|
+
return decorator(func)
|
|
279
|
+
return decorator
|
agents/tracing/__init__.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import atexit
|
|
2
2
|
|
|
3
|
+
from .config import TracingConfig
|
|
3
4
|
from .create import (
|
|
4
5
|
agent_span,
|
|
5
6
|
custom_span,
|
|
@@ -53,6 +54,7 @@ __all__ = [
|
|
|
53
54
|
"set_trace_processors",
|
|
54
55
|
"set_trace_provider",
|
|
55
56
|
"set_tracing_disabled",
|
|
57
|
+
"TracingConfig",
|
|
56
58
|
"trace",
|
|
57
59
|
"Trace",
|
|
58
60
|
"SpanError",
|
agents/tracing/config.py
ADDED
agents/tracing/create.py
CHANGED
|
@@ -4,6 +4,7 @@ from collections.abc import Mapping, Sequence
|
|
|
4
4
|
from typing import TYPE_CHECKING, Any
|
|
5
5
|
|
|
6
6
|
from ..logger import logger
|
|
7
|
+
from .config import TracingConfig
|
|
7
8
|
from .setup import get_trace_provider
|
|
8
9
|
from .span_data import (
|
|
9
10
|
AgentSpanData,
|
|
@@ -30,6 +31,7 @@ def trace(
|
|
|
30
31
|
trace_id: str | None = None,
|
|
31
32
|
group_id: str | None = None,
|
|
32
33
|
metadata: dict[str, Any] | None = None,
|
|
34
|
+
tracing: TracingConfig | None = None,
|
|
33
35
|
disabled: bool = False,
|
|
34
36
|
) -> Trace:
|
|
35
37
|
"""
|
|
@@ -50,6 +52,7 @@ def trace(
|
|
|
50
52
|
group_id: Optional grouping identifier to link multiple traces from the same conversation
|
|
51
53
|
or process. For instance, you might use a chat thread ID.
|
|
52
54
|
metadata: Optional dictionary of additional metadata to attach to the trace.
|
|
55
|
+
tracing: Optional tracing configuration for exporting this trace.
|
|
53
56
|
disabled: If True, we will return a Trace but the Trace will not be recorded.
|
|
54
57
|
|
|
55
58
|
Returns:
|
|
@@ -66,6 +69,7 @@ def trace(
|
|
|
66
69
|
trace_id=trace_id,
|
|
67
70
|
group_id=group_id,
|
|
68
71
|
metadata=metadata,
|
|
72
|
+
tracing=tracing,
|
|
69
73
|
disabled=disabled,
|
|
70
74
|
)
|
|
71
75
|
|
|
@@ -7,52 +7,125 @@ if TYPE_CHECKING:
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class TracingProcessor(abc.ABC):
|
|
10
|
-
"""Interface for processing spans.
|
|
10
|
+
"""Interface for processing and monitoring traces and spans in the OpenAI Agents system.
|
|
11
|
+
|
|
12
|
+
This abstract class defines the interface that all tracing processors must implement.
|
|
13
|
+
Processors receive notifications when traces and spans start and end, allowing them
|
|
14
|
+
to collect, process, and export tracing data.
|
|
15
|
+
|
|
16
|
+
Example:
|
|
17
|
+
```python
|
|
18
|
+
class CustomProcessor(TracingProcessor):
|
|
19
|
+
def __init__(self):
|
|
20
|
+
self.active_traces = {}
|
|
21
|
+
self.active_spans = {}
|
|
22
|
+
|
|
23
|
+
def on_trace_start(self, trace):
|
|
24
|
+
self.active_traces[trace.trace_id] = trace
|
|
25
|
+
|
|
26
|
+
def on_trace_end(self, trace):
|
|
27
|
+
# Process completed trace
|
|
28
|
+
del self.active_traces[trace.trace_id]
|
|
29
|
+
|
|
30
|
+
def on_span_start(self, span):
|
|
31
|
+
self.active_spans[span.span_id] = span
|
|
32
|
+
|
|
33
|
+
def on_span_end(self, span):
|
|
34
|
+
# Process completed span
|
|
35
|
+
del self.active_spans[span.span_id]
|
|
36
|
+
|
|
37
|
+
def shutdown(self):
|
|
38
|
+
# Clean up resources
|
|
39
|
+
self.active_traces.clear()
|
|
40
|
+
self.active_spans.clear()
|
|
41
|
+
|
|
42
|
+
def force_flush(self):
|
|
43
|
+
# Force processing of any queued items
|
|
44
|
+
pass
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
Notes:
|
|
48
|
+
- All methods should be thread-safe
|
|
49
|
+
- Methods should not block for long periods
|
|
50
|
+
- Handle errors gracefully to prevent disrupting agent execution
|
|
51
|
+
"""
|
|
11
52
|
|
|
12
53
|
@abc.abstractmethod
|
|
13
54
|
def on_trace_start(self, trace: "Trace") -> None:
|
|
14
|
-
"""Called when a trace
|
|
55
|
+
"""Called when a new trace begins execution.
|
|
15
56
|
|
|
16
57
|
Args:
|
|
17
|
-
trace: The trace that started.
|
|
58
|
+
trace: The trace that started. Contains workflow name and metadata.
|
|
59
|
+
|
|
60
|
+
Notes:
|
|
61
|
+
- Called synchronously on trace start
|
|
62
|
+
- Should return quickly to avoid blocking execution
|
|
63
|
+
- Any errors should be caught and handled internally
|
|
18
64
|
"""
|
|
19
65
|
pass
|
|
20
66
|
|
|
21
67
|
@abc.abstractmethod
|
|
22
68
|
def on_trace_end(self, trace: "Trace") -> None:
|
|
23
|
-
"""Called when a trace
|
|
69
|
+
"""Called when a trace completes execution.
|
|
24
70
|
|
|
25
71
|
Args:
|
|
26
|
-
trace: The trace
|
|
72
|
+
trace: The completed trace containing all spans and results.
|
|
73
|
+
|
|
74
|
+
Notes:
|
|
75
|
+
- Called synchronously when trace finishes
|
|
76
|
+
- Good time to export/process the complete trace
|
|
77
|
+
- Should handle cleanup of any trace-specific resources
|
|
27
78
|
"""
|
|
28
79
|
pass
|
|
29
80
|
|
|
30
81
|
@abc.abstractmethod
|
|
31
82
|
def on_span_start(self, span: "Span[Any]") -> None:
|
|
32
|
-
"""Called when a span
|
|
83
|
+
"""Called when a new span begins execution.
|
|
33
84
|
|
|
34
85
|
Args:
|
|
35
|
-
span: The span that started.
|
|
86
|
+
span: The span that started. Contains operation details and context.
|
|
87
|
+
|
|
88
|
+
Notes:
|
|
89
|
+
- Called synchronously on span start
|
|
90
|
+
- Should return quickly to avoid blocking execution
|
|
91
|
+
- Spans are automatically nested under current trace/span
|
|
36
92
|
"""
|
|
37
93
|
pass
|
|
38
94
|
|
|
39
95
|
@abc.abstractmethod
|
|
40
96
|
def on_span_end(self, span: "Span[Any]") -> None:
|
|
41
|
-
"""Called when a span
|
|
97
|
+
"""Called when a span completes execution.
|
|
42
98
|
|
|
43
99
|
Args:
|
|
44
|
-
span: The span
|
|
100
|
+
span: The completed span containing execution results.
|
|
101
|
+
|
|
102
|
+
Notes:
|
|
103
|
+
- Called synchronously when span finishes
|
|
104
|
+
- Should not block or raise exceptions
|
|
105
|
+
- Good time to export/process the individual span
|
|
45
106
|
"""
|
|
46
107
|
pass
|
|
47
108
|
|
|
48
109
|
@abc.abstractmethod
|
|
49
110
|
def shutdown(self) -> None:
|
|
50
|
-
"""Called when the application stops.
|
|
111
|
+
"""Called when the application stops to clean up resources.
|
|
112
|
+
|
|
113
|
+
Should perform any necessary cleanup like:
|
|
114
|
+
- Flushing queued traces/spans
|
|
115
|
+
- Closing connections
|
|
116
|
+
- Releasing resources
|
|
117
|
+
"""
|
|
51
118
|
pass
|
|
52
119
|
|
|
53
120
|
@abc.abstractmethod
|
|
54
121
|
def force_flush(self) -> None:
|
|
55
|
-
"""Forces
|
|
122
|
+
"""Forces immediate processing of any queued traces/spans.
|
|
123
|
+
|
|
124
|
+
Notes:
|
|
125
|
+
- Should process all queued items before returning
|
|
126
|
+
- Useful before shutdown or when immediate processing is needed
|
|
127
|
+
- May block while processing completes
|
|
128
|
+
"""
|
|
56
129
|
pass
|
|
57
130
|
|
|
58
131
|
|
agents/tracing/processors.py
CHANGED
|
@@ -70,8 +70,8 @@ class BackendSpanExporter(TracingExporter):
|
|
|
70
70
|
client.
|
|
71
71
|
"""
|
|
72
72
|
# Clear the cached property if it exists
|
|
73
|
-
if
|
|
74
|
-
del self.__dict__[
|
|
73
|
+
if "api_key" in self.__dict__:
|
|
74
|
+
del self.__dict__["api_key"]
|
|
75
75
|
|
|
76
76
|
# Update the private attribute
|
|
77
77
|
self._api_key = api_key
|
|
@@ -92,62 +92,73 @@ class BackendSpanExporter(TracingExporter):
|
|
|
92
92
|
if not items:
|
|
93
93
|
return
|
|
94
94
|
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
headers
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
95
|
+
grouped_items: dict[str | None, list[Trace | Span[Any]]] = {}
|
|
96
|
+
for item in items:
|
|
97
|
+
key = item.tracing_api_key
|
|
98
|
+
grouped_items.setdefault(key, []).append(item)
|
|
99
|
+
|
|
100
|
+
for item_key, grouped in grouped_items.items():
|
|
101
|
+
api_key = item_key or self.api_key
|
|
102
|
+
if not api_key:
|
|
103
|
+
logger.warning("OPENAI_API_KEY is not set, skipping trace export")
|
|
104
|
+
continue
|
|
105
|
+
|
|
106
|
+
data = [item.export() for item in grouped if item.export()]
|
|
107
|
+
payload = {"data": data}
|
|
108
|
+
|
|
109
|
+
headers = {
|
|
110
|
+
"Authorization": f"Bearer {api_key}",
|
|
111
|
+
"Content-Type": "application/json",
|
|
112
|
+
"OpenAI-Beta": "traces=v1",
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
if self.organization:
|
|
116
|
+
headers["OpenAI-Organization"] = self.organization
|
|
117
|
+
|
|
118
|
+
if self.project:
|
|
119
|
+
headers["OpenAI-Project"] = self.project
|
|
120
|
+
|
|
121
|
+
# Exponential backoff loop
|
|
122
|
+
attempt = 0
|
|
123
|
+
delay = self.base_delay
|
|
124
|
+
while True:
|
|
125
|
+
attempt += 1
|
|
126
|
+
try:
|
|
127
|
+
response = self._client.post(url=self.endpoint, headers=headers, json=payload)
|
|
128
|
+
|
|
129
|
+
# If the response is successful, break out of the loop
|
|
130
|
+
if response.status_code < 300:
|
|
131
|
+
logger.debug(f"Exported {len(grouped)} items")
|
|
132
|
+
break
|
|
133
|
+
|
|
134
|
+
# If the response is a client error (4xx), we won't retry
|
|
135
|
+
if 400 <= response.status_code < 500:
|
|
136
|
+
logger.error(
|
|
137
|
+
"[non-fatal] Tracing client error %s: %s",
|
|
138
|
+
response.status_code,
|
|
139
|
+
response.text,
|
|
140
|
+
)
|
|
141
|
+
break
|
|
142
|
+
|
|
143
|
+
# For 5xx or other unexpected codes, treat it as transient and retry
|
|
144
|
+
logger.warning(
|
|
145
|
+
f"[non-fatal] Tracing: server error {response.status_code}, retrying."
|
|
146
|
+
)
|
|
147
|
+
except httpx.RequestError as exc:
|
|
148
|
+
# Network or other I/O error, we'll retry
|
|
149
|
+
logger.warning(f"[non-fatal] Tracing: request failed: {exc}")
|
|
126
150
|
|
|
127
|
-
# If
|
|
128
|
-
if
|
|
151
|
+
# If we reach here, we need to retry or give up
|
|
152
|
+
if attempt >= self.max_retries:
|
|
129
153
|
logger.error(
|
|
130
|
-
|
|
154
|
+
"[non-fatal] Tracing: max retries reached, giving up on this batch."
|
|
131
155
|
)
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
# For 5xx or other unexpected codes, treat it as transient and retry
|
|
135
|
-
logger.warning(
|
|
136
|
-
f"[non-fatal] Tracing: server error {response.status_code}, retrying."
|
|
137
|
-
)
|
|
138
|
-
except httpx.RequestError as exc:
|
|
139
|
-
# Network or other I/O error, we'll retry
|
|
140
|
-
logger.warning(f"[non-fatal] Tracing: request failed: {exc}")
|
|
141
|
-
|
|
142
|
-
# If we reach here, we need to retry or give up
|
|
143
|
-
if attempt >= self.max_retries:
|
|
144
|
-
logger.error("[non-fatal] Tracing: max retries reached, giving up on this batch.")
|
|
145
|
-
return
|
|
156
|
+
break
|
|
146
157
|
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
158
|
+
# Exponential backoff + jitter
|
|
159
|
+
sleep_time = delay + random.uniform(0, 0.1 * delay) # 10% jitter
|
|
160
|
+
time.sleep(sleep_time)
|
|
161
|
+
delay = min(delay * 2, self.max_delay)
|
|
151
162
|
|
|
152
163
|
def close(self):
|
|
153
164
|
"""Close the underlying HTTP client."""
|