openai-agents 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +223 -0
- agents/_config.py +23 -0
- agents/_debug.py +17 -0
- agents/_run_impl.py +792 -0
- agents/_utils.py +61 -0
- agents/agent.py +159 -0
- agents/agent_output.py +144 -0
- agents/computer.py +107 -0
- agents/exceptions.py +63 -0
- agents/extensions/handoff_filters.py +67 -0
- agents/extensions/handoff_prompt.py +19 -0
- agents/function_schema.py +340 -0
- agents/guardrail.py +320 -0
- agents/handoffs.py +236 -0
- agents/items.py +246 -0
- agents/lifecycle.py +105 -0
- agents/logger.py +3 -0
- agents/model_settings.py +36 -0
- agents/models/__init__.py +0 -0
- agents/models/_openai_shared.py +34 -0
- agents/models/fake_id.py +5 -0
- agents/models/interface.py +107 -0
- agents/models/openai_chatcompletions.py +952 -0
- agents/models/openai_provider.py +65 -0
- agents/models/openai_responses.py +384 -0
- agents/result.py +220 -0
- agents/run.py +904 -0
- agents/run_context.py +26 -0
- agents/stream_events.py +58 -0
- agents/strict_schema.py +167 -0
- agents/tool.py +288 -0
- agents/tracing/__init__.py +97 -0
- agents/tracing/create.py +306 -0
- agents/tracing/logger.py +3 -0
- agents/tracing/processor_interface.py +69 -0
- agents/tracing/processors.py +261 -0
- agents/tracing/scope.py +45 -0
- agents/tracing/setup.py +211 -0
- agents/tracing/span_data.py +188 -0
- agents/tracing/spans.py +264 -0
- agents/tracing/traces.py +195 -0
- agents/tracing/util.py +17 -0
- agents/usage.py +22 -0
- agents/version.py +7 -0
- openai_agents-0.0.3.dist-info/METADATA +204 -0
- openai_agents-0.0.3.dist-info/RECORD +49 -0
- openai_agents-0.0.3.dist-info/licenses/LICENSE +21 -0
- openai-agents/example.py +0 -2
- openai_agents-0.0.1.dist-info/METADATA +0 -17
- openai_agents-0.0.1.dist-info/RECORD +0 -6
- openai_agents-0.0.1.dist-info/licenses/LICENSE +0 -20
- {openai-agents → agents/extensions}/__init__.py +0 -0
- {openai_agents-0.0.1.dist-info → openai_agents-0.0.3.dist-info}/WHEEL +0 -0
agents/run_context.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
from typing import Any, Generic
|
|
3
|
+
|
|
4
|
+
from typing_extensions import TypeVar
|
|
5
|
+
|
|
6
|
+
from .usage import Usage
|
|
7
|
+
|
|
8
|
+
TContext = TypeVar("TContext", default=Any)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class RunContextWrapper(Generic[TContext]):
|
|
13
|
+
"""This wraps the context object that you passed to `Runner.run()`. It also contains
|
|
14
|
+
information about the usage of the agent run so far.
|
|
15
|
+
|
|
16
|
+
NOTE: Contexts are not passed to the LLM. They're a way to pass dependencies and data to code
|
|
17
|
+
you implement, like tool functions, callbacks, hooks, etc.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
context: TContext
|
|
21
|
+
"""The context object (or None), passed by you to `Runner.run()`"""
|
|
22
|
+
|
|
23
|
+
usage: Usage = field(default_factory=Usage)
|
|
24
|
+
"""The usage of the agent run so far. For streamed responses, the usage will be stale until the
|
|
25
|
+
last chunk of the stream is processed.
|
|
26
|
+
"""
|
agents/stream_events.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Any, Literal, Union
|
|
5
|
+
|
|
6
|
+
from typing_extensions import TypeAlias
|
|
7
|
+
|
|
8
|
+
from .agent import Agent
|
|
9
|
+
from .items import RunItem, TResponseStreamEvent
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class RawResponsesStreamEvent:
|
|
14
|
+
"""Streaming event from the LLM. These are 'raw' events, i.e. they are directly passed through
|
|
15
|
+
from the LLM.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
data: TResponseStreamEvent
|
|
19
|
+
"""The raw responses streaming event from the LLM."""
|
|
20
|
+
|
|
21
|
+
type: Literal["raw_response_event"] = "raw_response_event"
|
|
22
|
+
"""The type of the event."""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class RunItemStreamEvent:
|
|
27
|
+
"""Streaming events that wrap a `RunItem`. As the agent processes the LLM response, it will
|
|
28
|
+
generate these events for new messages, tool calls, tool outputs, handoffs, etc.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
name: Literal[
|
|
32
|
+
"message_output_created",
|
|
33
|
+
"handoff_requested",
|
|
34
|
+
"handoff_occured",
|
|
35
|
+
"tool_called",
|
|
36
|
+
"tool_output",
|
|
37
|
+
"reasoning_item_created",
|
|
38
|
+
]
|
|
39
|
+
"""The name of the event."""
|
|
40
|
+
|
|
41
|
+
item: RunItem
|
|
42
|
+
"""The item that was created."""
|
|
43
|
+
|
|
44
|
+
type: Literal["run_item_stream_event"] = "run_item_stream_event"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class AgentUpdatedStreamEvent:
|
|
49
|
+
"""Event that notifies that there is a new agent running."""
|
|
50
|
+
|
|
51
|
+
new_agent: Agent[Any]
|
|
52
|
+
"""The new agent."""
|
|
53
|
+
|
|
54
|
+
type: Literal["agent_updated_stream_event"] = "agent_updated_stream_event"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
StreamEvent: TypeAlias = Union[RawResponsesStreamEvent, RunItemStreamEvent, AgentUpdatedStreamEvent]
|
|
58
|
+
"""A streaming event from an agent."""
|
agents/strict_schema.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from openai import NOT_GIVEN
|
|
6
|
+
from typing_extensions import TypeGuard
|
|
7
|
+
|
|
8
|
+
from .exceptions import UserError
|
|
9
|
+
|
|
10
|
+
_EMPTY_SCHEMA = {
|
|
11
|
+
"additionalProperties": False,
|
|
12
|
+
"type": "object",
|
|
13
|
+
"properties": {},
|
|
14
|
+
"required": [],
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def ensure_strict_json_schema(
|
|
19
|
+
schema: dict[str, Any],
|
|
20
|
+
) -> dict[str, Any]:
|
|
21
|
+
"""Mutates the given JSON schema to ensure it conforms to the `strict` standard
|
|
22
|
+
that the OpenAI API expects.
|
|
23
|
+
"""
|
|
24
|
+
if schema == {}:
|
|
25
|
+
return _EMPTY_SCHEMA
|
|
26
|
+
return _ensure_strict_json_schema(schema, path=(), root=schema)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# Adapted from https://github.com/openai/openai-python/blob/main/src/openai/lib/_pydantic.py
|
|
30
|
+
def _ensure_strict_json_schema(
|
|
31
|
+
json_schema: object,
|
|
32
|
+
*,
|
|
33
|
+
path: tuple[str, ...],
|
|
34
|
+
root: dict[str, object],
|
|
35
|
+
) -> dict[str, Any]:
|
|
36
|
+
if not is_dict(json_schema):
|
|
37
|
+
raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}")
|
|
38
|
+
|
|
39
|
+
defs = json_schema.get("$defs")
|
|
40
|
+
if is_dict(defs):
|
|
41
|
+
for def_name, def_schema in defs.items():
|
|
42
|
+
_ensure_strict_json_schema(def_schema, path=(*path, "$defs", def_name), root=root)
|
|
43
|
+
|
|
44
|
+
definitions = json_schema.get("definitions")
|
|
45
|
+
if is_dict(definitions):
|
|
46
|
+
for definition_name, definition_schema in definitions.items():
|
|
47
|
+
_ensure_strict_json_schema(
|
|
48
|
+
definition_schema, path=(*path, "definitions", definition_name), root=root
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
typ = json_schema.get("type")
|
|
52
|
+
if typ == "object" and "additionalProperties" not in json_schema:
|
|
53
|
+
json_schema["additionalProperties"] = False
|
|
54
|
+
elif (
|
|
55
|
+
typ == "object"
|
|
56
|
+
and "additionalProperties" in json_schema
|
|
57
|
+
and json_schema["additionalProperties"] is True
|
|
58
|
+
):
|
|
59
|
+
raise UserError(
|
|
60
|
+
"additionalProperties should not be set for object types. This could be because "
|
|
61
|
+
"you're using an older version of Pydantic, or because you configured additional "
|
|
62
|
+
"properties to be allowed. If you really need this, update the function or output tool "
|
|
63
|
+
"to not use a strict schema."
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# object types
|
|
67
|
+
# { 'type': 'object', 'properties': { 'a': {...} } }
|
|
68
|
+
properties = json_schema.get("properties")
|
|
69
|
+
if is_dict(properties):
|
|
70
|
+
json_schema["required"] = list(properties.keys())
|
|
71
|
+
json_schema["properties"] = {
|
|
72
|
+
key: _ensure_strict_json_schema(prop_schema, path=(*path, "properties", key), root=root)
|
|
73
|
+
for key, prop_schema in properties.items()
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
# arrays
|
|
77
|
+
# { 'type': 'array', 'items': {...} }
|
|
78
|
+
items = json_schema.get("items")
|
|
79
|
+
if is_dict(items):
|
|
80
|
+
json_schema["items"] = _ensure_strict_json_schema(items, path=(*path, "items"), root=root)
|
|
81
|
+
|
|
82
|
+
# unions
|
|
83
|
+
any_of = json_schema.get("anyOf")
|
|
84
|
+
if is_list(any_of):
|
|
85
|
+
json_schema["anyOf"] = [
|
|
86
|
+
_ensure_strict_json_schema(variant, path=(*path, "anyOf", str(i)), root=root)
|
|
87
|
+
for i, variant in enumerate(any_of)
|
|
88
|
+
]
|
|
89
|
+
|
|
90
|
+
# intersections
|
|
91
|
+
all_of = json_schema.get("allOf")
|
|
92
|
+
if is_list(all_of):
|
|
93
|
+
if len(all_of) == 1:
|
|
94
|
+
json_schema.update(
|
|
95
|
+
_ensure_strict_json_schema(all_of[0], path=(*path, "allOf", "0"), root=root)
|
|
96
|
+
)
|
|
97
|
+
json_schema.pop("allOf")
|
|
98
|
+
else:
|
|
99
|
+
json_schema["allOf"] = [
|
|
100
|
+
_ensure_strict_json_schema(entry, path=(*path, "allOf", str(i)), root=root)
|
|
101
|
+
for i, entry in enumerate(all_of)
|
|
102
|
+
]
|
|
103
|
+
|
|
104
|
+
# strip `None` defaults as there's no meaningful distinction here
|
|
105
|
+
# the schema will still be `nullable` and the model will default
|
|
106
|
+
# to using `None` anyway
|
|
107
|
+
if json_schema.get("default", NOT_GIVEN) is None:
|
|
108
|
+
json_schema.pop("default")
|
|
109
|
+
|
|
110
|
+
# we can't use `$ref`s if there are also other properties defined, e.g.
|
|
111
|
+
# `{"$ref": "...", "description": "my description"}`
|
|
112
|
+
#
|
|
113
|
+
# so we unravel the ref
|
|
114
|
+
# `{"type": "string", "description": "my description"}`
|
|
115
|
+
ref = json_schema.get("$ref")
|
|
116
|
+
if ref and has_more_than_n_keys(json_schema, 1):
|
|
117
|
+
assert isinstance(ref, str), f"Received non-string $ref - {ref}"
|
|
118
|
+
|
|
119
|
+
resolved = resolve_ref(root=root, ref=ref)
|
|
120
|
+
if not is_dict(resolved):
|
|
121
|
+
raise ValueError(
|
|
122
|
+
f"Expected `$ref: {ref}` to resolved to a dictionary but got {resolved}"
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# properties from the json schema take priority over the ones on the `$ref`
|
|
126
|
+
json_schema.update({**resolved, **json_schema})
|
|
127
|
+
json_schema.pop("$ref")
|
|
128
|
+
# Since the schema expanded from `$ref` might not have `additionalProperties: false` applied
|
|
129
|
+
# we call `_ensure_strict_json_schema` again to fix the inlined schema and ensure it's valid
|
|
130
|
+
return _ensure_strict_json_schema(json_schema, path=path, root=root)
|
|
131
|
+
|
|
132
|
+
return json_schema
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def resolve_ref(*, root: dict[str, object], ref: str) -> object:
|
|
136
|
+
if not ref.startswith("#/"):
|
|
137
|
+
raise ValueError(f"Unexpected $ref format {ref!r}; Does not start with #/")
|
|
138
|
+
|
|
139
|
+
path = ref[2:].split("/")
|
|
140
|
+
resolved = root
|
|
141
|
+
for key in path:
|
|
142
|
+
value = resolved[key]
|
|
143
|
+
assert is_dict(value), (
|
|
144
|
+
f"encountered non-dictionary entry while resolving {ref} - {resolved}"
|
|
145
|
+
)
|
|
146
|
+
resolved = value
|
|
147
|
+
|
|
148
|
+
return resolved
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def is_dict(obj: object) -> TypeGuard[dict[str, object]]:
|
|
152
|
+
# just pretend that we know there are only `str` keys
|
|
153
|
+
# as that check is not worth the performance cost
|
|
154
|
+
return isinstance(obj, dict)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def is_list(obj: object) -> TypeGuard[list[object]]:
|
|
158
|
+
return isinstance(obj, list)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:
|
|
162
|
+
i = 0
|
|
163
|
+
for _ in obj.keys():
|
|
164
|
+
i += 1
|
|
165
|
+
if i > n:
|
|
166
|
+
return True
|
|
167
|
+
return False
|
agents/tool.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import inspect
|
|
4
|
+
import json
|
|
5
|
+
from collections.abc import Awaitable
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Callable, Literal, Union, overload
|
|
8
|
+
|
|
9
|
+
from openai.types.responses.file_search_tool_param import Filters, RankingOptions
|
|
10
|
+
from openai.types.responses.web_search_tool_param import UserLocation
|
|
11
|
+
from pydantic import ValidationError
|
|
12
|
+
from typing_extensions import Concatenate, ParamSpec
|
|
13
|
+
|
|
14
|
+
from . import _debug, _utils
|
|
15
|
+
from ._utils import MaybeAwaitable
|
|
16
|
+
from .computer import AsyncComputer, Computer
|
|
17
|
+
from .exceptions import ModelBehaviorError
|
|
18
|
+
from .function_schema import DocstringStyle, function_schema
|
|
19
|
+
from .logger import logger
|
|
20
|
+
from .run_context import RunContextWrapper
|
|
21
|
+
from .tracing import SpanError
|
|
22
|
+
|
|
23
|
+
ToolParams = ParamSpec("ToolParams")
|
|
24
|
+
|
|
25
|
+
ToolFunctionWithoutContext = Callable[ToolParams, Any]
|
|
26
|
+
ToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParams], Any]
|
|
27
|
+
|
|
28
|
+
ToolFunction = Union[ToolFunctionWithoutContext[ToolParams], ToolFunctionWithContext[ToolParams]]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class FunctionTool:
|
|
33
|
+
"""A tool that wraps a function. In most cases, you should use the `function_tool` helpers to
|
|
34
|
+
create a FunctionTool, as they let you easily wrap a Python function.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
name: str
|
|
38
|
+
"""The name of the tool, as shown to the LLM. Generally the name of the function."""
|
|
39
|
+
|
|
40
|
+
description: str
|
|
41
|
+
"""A description of the tool, as shown to the LLM."""
|
|
42
|
+
|
|
43
|
+
params_json_schema: dict[str, Any]
|
|
44
|
+
"""The JSON schema for the tool's parameters."""
|
|
45
|
+
|
|
46
|
+
on_invoke_tool: Callable[[RunContextWrapper[Any], str], Awaitable[str]]
|
|
47
|
+
"""A function that invokes the tool with the given context and parameters. The params passed
|
|
48
|
+
are:
|
|
49
|
+
1. The tool run context.
|
|
50
|
+
2. The arguments from the LLM, as a JSON string.
|
|
51
|
+
|
|
52
|
+
You must return a string representation of the tool output. In case of errors, you can either
|
|
53
|
+
raise an Exception (which will cause the run to fail) or return a string error message (which
|
|
54
|
+
will be sent back to the LLM).
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
strict_json_schema: bool = True
|
|
58
|
+
"""Whether the JSON schema is in strict mode. We **strongly** recommend setting this to True,
|
|
59
|
+
as it increases the likelihood of correct JSON input."""
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclass
|
|
63
|
+
class FileSearchTool:
|
|
64
|
+
"""A hosted tool that lets the LLM search through a vector store. Currently only supported with
|
|
65
|
+
OpenAI models, using the Responses API.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
vector_store_ids: list[str]
|
|
69
|
+
"""The IDs of the vector stores to search."""
|
|
70
|
+
|
|
71
|
+
max_num_results: int | None = None
|
|
72
|
+
"""The maximum number of results to return."""
|
|
73
|
+
|
|
74
|
+
include_search_results: bool = False
|
|
75
|
+
"""Whether to include the search results in the output produced by the LLM."""
|
|
76
|
+
|
|
77
|
+
ranking_options: RankingOptions | None = None
|
|
78
|
+
"""Ranking options for search."""
|
|
79
|
+
|
|
80
|
+
filters: Filters | None = None
|
|
81
|
+
"""A filter to apply based on file attributes."""
|
|
82
|
+
|
|
83
|
+
@property
|
|
84
|
+
def name(self):
|
|
85
|
+
return "file_search"
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
@dataclass
|
|
89
|
+
class WebSearchTool:
|
|
90
|
+
"""A hosted tool that lets the LLM search the web. Currently only supported with OpenAI models,
|
|
91
|
+
using the Responses API.
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
user_location: UserLocation | None = None
|
|
95
|
+
"""Optional location for the search. Lets you customize results to be relevant to a location."""
|
|
96
|
+
|
|
97
|
+
search_context_size: Literal["low", "medium", "high"] = "medium"
|
|
98
|
+
"""The amount of context to use for the search."""
|
|
99
|
+
|
|
100
|
+
@property
|
|
101
|
+
def name(self):
|
|
102
|
+
return "web_search_preview"
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
@dataclass
|
|
106
|
+
class ComputerTool:
|
|
107
|
+
"""A hosted tool that lets the LLM control a computer."""
|
|
108
|
+
|
|
109
|
+
computer: Computer | AsyncComputer
|
|
110
|
+
"""The computer implementation, which describes the environment and dimensions of the computer,
|
|
111
|
+
as well as implements the computer actions like click, screenshot, etc.
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def name(self):
|
|
116
|
+
return "computer_use_preview"
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
Tool = Union[FunctionTool, FileSearchTool, WebSearchTool, ComputerTool]
|
|
120
|
+
"""A tool that can be used in an agent."""
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str:
|
|
124
|
+
"""The default tool error function, which just returns a generic error message."""
|
|
125
|
+
return f"An error occurred while running the tool. Please try again. Error: {str(error)}"
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
ToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]]
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
@overload
|
|
132
|
+
def function_tool(
|
|
133
|
+
func: ToolFunction[...],
|
|
134
|
+
*,
|
|
135
|
+
name_override: str | None = None,
|
|
136
|
+
description_override: str | None = None,
|
|
137
|
+
docstring_style: DocstringStyle | None = None,
|
|
138
|
+
use_docstring_info: bool = True,
|
|
139
|
+
failure_error_function: ToolErrorFunction | None = None,
|
|
140
|
+
) -> FunctionTool:
|
|
141
|
+
"""Overload for usage as @function_tool (no parentheses)."""
|
|
142
|
+
...
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
@overload
|
|
146
|
+
def function_tool(
|
|
147
|
+
*,
|
|
148
|
+
name_override: str | None = None,
|
|
149
|
+
description_override: str | None = None,
|
|
150
|
+
docstring_style: DocstringStyle | None = None,
|
|
151
|
+
use_docstring_info: bool = True,
|
|
152
|
+
failure_error_function: ToolErrorFunction | None = None,
|
|
153
|
+
) -> Callable[[ToolFunction[...]], FunctionTool]:
|
|
154
|
+
"""Overload for usage as @function_tool(...)."""
|
|
155
|
+
...
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def function_tool(
|
|
159
|
+
func: ToolFunction[...] | None = None,
|
|
160
|
+
*,
|
|
161
|
+
name_override: str | None = None,
|
|
162
|
+
description_override: str | None = None,
|
|
163
|
+
docstring_style: DocstringStyle | None = None,
|
|
164
|
+
use_docstring_info: bool = True,
|
|
165
|
+
failure_error_function: ToolErrorFunction | None = default_tool_error_function,
|
|
166
|
+
) -> FunctionTool | Callable[[ToolFunction[...]], FunctionTool]:
|
|
167
|
+
"""
|
|
168
|
+
Decorator to create a FunctionTool from a function. By default, we will:
|
|
169
|
+
1. Parse the function signature to create a JSON schema for the tool's parameters.
|
|
170
|
+
2. Use the function's docstring to populate the tool's description.
|
|
171
|
+
3. Use the function's docstring to populate argument descriptions.
|
|
172
|
+
The docstring style is detected automatically, but you can override it.
|
|
173
|
+
|
|
174
|
+
If the function takes a `RunContextWrapper` as the first argument, it *must* match the
|
|
175
|
+
context type of the agent that uses the tool.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
func: The function to wrap.
|
|
179
|
+
name_override: If provided, use this name for the tool instead of the function's name.
|
|
180
|
+
description_override: If provided, use this description for the tool instead of the
|
|
181
|
+
function's docstring.
|
|
182
|
+
docstring_style: If provided, use this style for the tool's docstring. If not provided,
|
|
183
|
+
we will attempt to auto-detect the style.
|
|
184
|
+
use_docstring_info: If True, use the function's docstring to populate the tool's
|
|
185
|
+
description and argument descriptions.
|
|
186
|
+
failure_error_function: If provided, use this function to generate an error message when
|
|
187
|
+
the tool call fails. The error message is sent to the LLM. If you pass None, then no
|
|
188
|
+
error message will be sent and instead an Exception will be raised.
|
|
189
|
+
"""
|
|
190
|
+
|
|
191
|
+
def _create_function_tool(the_func: ToolFunction[...]) -> FunctionTool:
|
|
192
|
+
schema = function_schema(
|
|
193
|
+
func=the_func,
|
|
194
|
+
name_override=name_override,
|
|
195
|
+
description_override=description_override,
|
|
196
|
+
docstring_style=docstring_style,
|
|
197
|
+
use_docstring_info=use_docstring_info,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
async def _on_invoke_tool_impl(ctx: RunContextWrapper[Any], input: str) -> str:
|
|
201
|
+
try:
|
|
202
|
+
json_data: dict[str, Any] = json.loads(input) if input else {}
|
|
203
|
+
except Exception as e:
|
|
204
|
+
if _debug.DONT_LOG_TOOL_DATA:
|
|
205
|
+
logger.debug(f"Invalid JSON input for tool {schema.name}")
|
|
206
|
+
else:
|
|
207
|
+
logger.debug(f"Invalid JSON input for tool {schema.name}: {input}")
|
|
208
|
+
raise ModelBehaviorError(
|
|
209
|
+
f"Invalid JSON input for tool {schema.name}: {input}"
|
|
210
|
+
) from e
|
|
211
|
+
|
|
212
|
+
if _debug.DONT_LOG_TOOL_DATA:
|
|
213
|
+
logger.debug(f"Invoking tool {schema.name}")
|
|
214
|
+
else:
|
|
215
|
+
logger.debug(f"Invoking tool {schema.name} with input {input}")
|
|
216
|
+
|
|
217
|
+
try:
|
|
218
|
+
parsed = (
|
|
219
|
+
schema.params_pydantic_model(**json_data)
|
|
220
|
+
if json_data
|
|
221
|
+
else schema.params_pydantic_model()
|
|
222
|
+
)
|
|
223
|
+
except ValidationError as e:
|
|
224
|
+
raise ModelBehaviorError(f"Invalid JSON input for tool {schema.name}: {e}") from e
|
|
225
|
+
|
|
226
|
+
args, kwargs_dict = schema.to_call_args(parsed)
|
|
227
|
+
|
|
228
|
+
if not _debug.DONT_LOG_TOOL_DATA:
|
|
229
|
+
logger.debug(f"Tool call args: {args}, kwargs: {kwargs_dict}")
|
|
230
|
+
|
|
231
|
+
if inspect.iscoroutinefunction(the_func):
|
|
232
|
+
if schema.takes_context:
|
|
233
|
+
result = await the_func(ctx, *args, **kwargs_dict)
|
|
234
|
+
else:
|
|
235
|
+
result = await the_func(*args, **kwargs_dict)
|
|
236
|
+
else:
|
|
237
|
+
if schema.takes_context:
|
|
238
|
+
result = the_func(ctx, *args, **kwargs_dict)
|
|
239
|
+
else:
|
|
240
|
+
result = the_func(*args, **kwargs_dict)
|
|
241
|
+
|
|
242
|
+
if _debug.DONT_LOG_TOOL_DATA:
|
|
243
|
+
logger.debug(f"Tool {schema.name} completed.")
|
|
244
|
+
else:
|
|
245
|
+
logger.debug(f"Tool {schema.name} returned {result}")
|
|
246
|
+
|
|
247
|
+
return str(result)
|
|
248
|
+
|
|
249
|
+
async def _on_invoke_tool(ctx: RunContextWrapper[Any], input: str) -> str:
|
|
250
|
+
try:
|
|
251
|
+
return await _on_invoke_tool_impl(ctx, input)
|
|
252
|
+
except Exception as e:
|
|
253
|
+
if failure_error_function is None:
|
|
254
|
+
raise
|
|
255
|
+
|
|
256
|
+
result = failure_error_function(ctx, e)
|
|
257
|
+
if inspect.isawaitable(result):
|
|
258
|
+
return await result
|
|
259
|
+
|
|
260
|
+
_utils.attach_error_to_current_span(
|
|
261
|
+
SpanError(
|
|
262
|
+
message="Error running tool (non-fatal)",
|
|
263
|
+
data={
|
|
264
|
+
"tool_name": schema.name,
|
|
265
|
+
"error": str(e),
|
|
266
|
+
},
|
|
267
|
+
)
|
|
268
|
+
)
|
|
269
|
+
return result
|
|
270
|
+
|
|
271
|
+
return FunctionTool(
|
|
272
|
+
name=schema.name,
|
|
273
|
+
description=schema.description or "",
|
|
274
|
+
params_json_schema=schema.params_json_schema,
|
|
275
|
+
on_invoke_tool=_on_invoke_tool,
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
# If func is actually a callable, we were used as @function_tool with no parentheses
|
|
279
|
+
if callable(func):
|
|
280
|
+
return _create_function_tool(func)
|
|
281
|
+
|
|
282
|
+
# Otherwise, we were used as @function_tool(...), so return a decorator
|
|
283
|
+
def decorator(real_func: ToolFunction[...]) -> FunctionTool:
|
|
284
|
+
return _create_function_tool(real_func)
|
|
285
|
+
|
|
286
|
+
return decorator
|
|
287
|
+
return decorator
|
|
288
|
+
return decorator
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import atexit
|
|
2
|
+
|
|
3
|
+
from .create import (
|
|
4
|
+
agent_span,
|
|
5
|
+
custom_span,
|
|
6
|
+
function_span,
|
|
7
|
+
generation_span,
|
|
8
|
+
get_current_span,
|
|
9
|
+
get_current_trace,
|
|
10
|
+
guardrail_span,
|
|
11
|
+
handoff_span,
|
|
12
|
+
response_span,
|
|
13
|
+
trace,
|
|
14
|
+
)
|
|
15
|
+
from .processor_interface import TracingProcessor
|
|
16
|
+
from .processors import default_exporter, default_processor
|
|
17
|
+
from .setup import GLOBAL_TRACE_PROVIDER
|
|
18
|
+
from .span_data import (
|
|
19
|
+
AgentSpanData,
|
|
20
|
+
CustomSpanData,
|
|
21
|
+
FunctionSpanData,
|
|
22
|
+
GenerationSpanData,
|
|
23
|
+
GuardrailSpanData,
|
|
24
|
+
HandoffSpanData,
|
|
25
|
+
ResponseSpanData,
|
|
26
|
+
SpanData,
|
|
27
|
+
)
|
|
28
|
+
from .spans import Span, SpanError
|
|
29
|
+
from .traces import Trace
|
|
30
|
+
from .util import gen_span_id, gen_trace_id
|
|
31
|
+
|
|
32
|
+
__all__ = [
|
|
33
|
+
"add_trace_processor",
|
|
34
|
+
"agent_span",
|
|
35
|
+
"custom_span",
|
|
36
|
+
"function_span",
|
|
37
|
+
"generation_span",
|
|
38
|
+
"get_current_span",
|
|
39
|
+
"get_current_trace",
|
|
40
|
+
"guardrail_span",
|
|
41
|
+
"handoff_span",
|
|
42
|
+
"response_span",
|
|
43
|
+
"set_trace_processors",
|
|
44
|
+
"set_tracing_disabled",
|
|
45
|
+
"trace",
|
|
46
|
+
"Trace",
|
|
47
|
+
"SpanError",
|
|
48
|
+
"Span",
|
|
49
|
+
"SpanData",
|
|
50
|
+
"AgentSpanData",
|
|
51
|
+
"CustomSpanData",
|
|
52
|
+
"FunctionSpanData",
|
|
53
|
+
"GenerationSpanData",
|
|
54
|
+
"GuardrailSpanData",
|
|
55
|
+
"HandoffSpanData",
|
|
56
|
+
"ResponseSpanData",
|
|
57
|
+
"TracingProcessor",
|
|
58
|
+
"gen_trace_id",
|
|
59
|
+
"gen_span_id",
|
|
60
|
+
]
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def add_trace_processor(span_processor: TracingProcessor) -> None:
|
|
64
|
+
"""
|
|
65
|
+
Adds a new trace processor. This processor will receive all traces/spans.
|
|
66
|
+
"""
|
|
67
|
+
GLOBAL_TRACE_PROVIDER.register_processor(span_processor)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def set_trace_processors(processors: list[TracingProcessor]) -> None:
|
|
71
|
+
"""
|
|
72
|
+
Set the list of trace processors. This will replace the current list of processors.
|
|
73
|
+
"""
|
|
74
|
+
GLOBAL_TRACE_PROVIDER.set_processors(processors)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def set_tracing_disabled(disabled: bool) -> None:
|
|
78
|
+
"""
|
|
79
|
+
Set whether tracing is globally disabled.
|
|
80
|
+
"""
|
|
81
|
+
GLOBAL_TRACE_PROVIDER.set_disabled(disabled)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def set_tracing_export_api_key(api_key: str) -> None:
|
|
85
|
+
"""
|
|
86
|
+
Set the OpenAI API key for the backend exporter.
|
|
87
|
+
"""
|
|
88
|
+
default_exporter().set_api_key(api_key)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
# Add the default processor, which exports traces and spans to the backend in batches. You can
|
|
92
|
+
# change the default behavior by either:
|
|
93
|
+
# 1. calling add_trace_processor(), which adds additional processors, or
|
|
94
|
+
# 2. calling set_trace_processors(), which replaces the default processor.
|
|
95
|
+
add_trace_processor(default_processor())
|
|
96
|
+
|
|
97
|
+
atexit.register(GLOBAL_TRACE_PROVIDER.shutdown)
|