hammad-python 0.0.30__py3-none-any.whl → 0.0.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ham/__init__.py +200 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.32.dist-info}/METADATA +6 -32
- hammad_python-0.0.32.dist-info/RECORD +6 -0
- hammad/__init__.py +0 -84
- hammad/_internal.py +0 -256
- hammad/_main.py +0 -226
- hammad/cache/__init__.py +0 -40
- hammad/cache/base_cache.py +0 -181
- hammad/cache/cache.py +0 -169
- hammad/cache/decorators.py +0 -261
- hammad/cache/file_cache.py +0 -80
- hammad/cache/ttl_cache.py +0 -74
- hammad/cli/__init__.py +0 -33
- hammad/cli/animations.py +0 -573
- hammad/cli/plugins.py +0 -867
- hammad/cli/styles/__init__.py +0 -55
- hammad/cli/styles/settings.py +0 -139
- hammad/cli/styles/types.py +0 -358
- hammad/cli/styles/utils.py +0 -634
- hammad/data/__init__.py +0 -90
- hammad/data/collections/__init__.py +0 -49
- hammad/data/collections/collection.py +0 -326
- hammad/data/collections/indexes/__init__.py +0 -37
- hammad/data/collections/indexes/qdrant/__init__.py +0 -1
- hammad/data/collections/indexes/qdrant/index.py +0 -723
- hammad/data/collections/indexes/qdrant/settings.py +0 -94
- hammad/data/collections/indexes/qdrant/utils.py +0 -210
- hammad/data/collections/indexes/tantivy/__init__.py +0 -1
- hammad/data/collections/indexes/tantivy/index.py +0 -426
- hammad/data/collections/indexes/tantivy/settings.py +0 -40
- hammad/data/collections/indexes/tantivy/utils.py +0 -176
- hammad/data/configurations/__init__.py +0 -35
- hammad/data/configurations/configuration.py +0 -564
- hammad/data/models/__init__.py +0 -50
- hammad/data/models/extensions/__init__.py +0 -4
- hammad/data/models/extensions/pydantic/__init__.py +0 -42
- hammad/data/models/extensions/pydantic/converters.py +0 -759
- hammad/data/models/fields.py +0 -546
- hammad/data/models/model.py +0 -1078
- hammad/data/models/utils.py +0 -280
- hammad/data/sql/__init__.py +0 -24
- hammad/data/sql/database.py +0 -576
- hammad/data/sql/types.py +0 -127
- hammad/data/types/__init__.py +0 -75
- hammad/data/types/file.py +0 -431
- hammad/data/types/multimodal/__init__.py +0 -36
- hammad/data/types/multimodal/audio.py +0 -200
- hammad/data/types/multimodal/image.py +0 -182
- hammad/data/types/text.py +0 -1308
- hammad/formatting/__init__.py +0 -33
- hammad/formatting/json/__init__.py +0 -27
- hammad/formatting/json/converters.py +0 -158
- hammad/formatting/text/__init__.py +0 -63
- hammad/formatting/text/converters.py +0 -723
- hammad/formatting/text/markdown.py +0 -131
- hammad/formatting/yaml/__init__.py +0 -26
- hammad/formatting/yaml/converters.py +0 -5
- hammad/genai/__init__.py +0 -217
- hammad/genai/a2a/__init__.py +0 -32
- hammad/genai/a2a/workers.py +0 -552
- hammad/genai/agents/__init__.py +0 -59
- hammad/genai/agents/agent.py +0 -1973
- hammad/genai/agents/run.py +0 -1024
- hammad/genai/agents/types/__init__.py +0 -42
- hammad/genai/agents/types/agent_context.py +0 -13
- hammad/genai/agents/types/agent_event.py +0 -128
- hammad/genai/agents/types/agent_hooks.py +0 -220
- hammad/genai/agents/types/agent_messages.py +0 -31
- hammad/genai/agents/types/agent_response.py +0 -125
- hammad/genai/agents/types/agent_stream.py +0 -327
- hammad/genai/graphs/__init__.py +0 -125
- hammad/genai/graphs/_utils.py +0 -190
- hammad/genai/graphs/base.py +0 -1828
- hammad/genai/graphs/plugins.py +0 -316
- hammad/genai/graphs/types.py +0 -638
- hammad/genai/models/__init__.py +0 -1
- hammad/genai/models/embeddings/__init__.py +0 -43
- hammad/genai/models/embeddings/model.py +0 -226
- hammad/genai/models/embeddings/run.py +0 -163
- hammad/genai/models/embeddings/types/__init__.py +0 -37
- hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
- hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
- hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
- hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
- hammad/genai/models/language/__init__.py +0 -57
- hammad/genai/models/language/model.py +0 -1098
- hammad/genai/models/language/run.py +0 -878
- hammad/genai/models/language/types/__init__.py +0 -40
- hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
- hammad/genai/models/language/types/language_model_messages.py +0 -28
- hammad/genai/models/language/types/language_model_name.py +0 -239
- hammad/genai/models/language/types/language_model_request.py +0 -127
- hammad/genai/models/language/types/language_model_response.py +0 -217
- hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
- hammad/genai/models/language/types/language_model_settings.py +0 -89
- hammad/genai/models/language/types/language_model_stream.py +0 -600
- hammad/genai/models/language/utils/__init__.py +0 -28
- hammad/genai/models/language/utils/requests.py +0 -421
- hammad/genai/models/language/utils/structured_outputs.py +0 -135
- hammad/genai/models/model_provider.py +0 -4
- hammad/genai/models/multimodal.py +0 -47
- hammad/genai/models/reranking.py +0 -26
- hammad/genai/types/__init__.py +0 -1
- hammad/genai/types/base.py +0 -215
- hammad/genai/types/history.py +0 -290
- hammad/genai/types/tools.py +0 -507
- hammad/logging/__init__.py +0 -35
- hammad/logging/decorators.py +0 -834
- hammad/logging/logger.py +0 -1018
- hammad/mcp/__init__.py +0 -53
- hammad/mcp/client/__init__.py +0 -35
- hammad/mcp/client/client.py +0 -624
- hammad/mcp/client/client_service.py +0 -400
- hammad/mcp/client/settings.py +0 -178
- hammad/mcp/servers/__init__.py +0 -26
- hammad/mcp/servers/launcher.py +0 -1161
- hammad/runtime/__init__.py +0 -32
- hammad/runtime/decorators.py +0 -142
- hammad/runtime/run.py +0 -299
- hammad/service/__init__.py +0 -49
- hammad/service/create.py +0 -527
- hammad/service/decorators.py +0 -283
- hammad/types.py +0 -288
- hammad/typing/__init__.py +0 -435
- hammad/web/__init__.py +0 -43
- hammad/web/http/__init__.py +0 -1
- hammad/web/http/client.py +0 -944
- hammad/web/models.py +0 -275
- hammad/web/openapi/__init__.py +0 -1
- hammad/web/openapi/client.py +0 -740
- hammad/web/search/__init__.py +0 -1
- hammad/web/search/client.py +0 -1023
- hammad/web/utils.py +0 -472
- hammad_python-0.0.30.dist-info/RECORD +0 -135
- {hammad → ham}/py.typed +0 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.32.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.32.dist-info}/licenses/LICENSE +0 -0
hammad/genai/agents/run.py
DELETED
@@ -1,1024 +0,0 @@
|
|
1
|
-
"""hammad.genai.agents.run
|
2
|
-
|
3
|
-
Standalone functions for running agents with full parameter typing.
|
4
|
-
"""
|
5
|
-
|
6
|
-
import functools
|
7
|
-
from typing import (
|
8
|
-
Any,
|
9
|
-
Callable,
|
10
|
-
List,
|
11
|
-
TypeVar,
|
12
|
-
Union,
|
13
|
-
Optional,
|
14
|
-
Type,
|
15
|
-
overload,
|
16
|
-
Dict,
|
17
|
-
TYPE_CHECKING,
|
18
|
-
)
|
19
|
-
from typing_extensions import Literal
|
20
|
-
|
21
|
-
|
22
|
-
if TYPE_CHECKING:
|
23
|
-
from ..models.language.model import LanguageModel
|
24
|
-
from ..models.language.types import (
|
25
|
-
LanguageModelName,
|
26
|
-
LanguageModelInstructorMode,
|
27
|
-
)
|
28
|
-
from .types.agent_response import AgentResponse
|
29
|
-
from .types.agent_stream import AgentStream
|
30
|
-
from .types.agent_context import AgentContext
|
31
|
-
from .types.agent_messages import AgentMessages
|
32
|
-
from ..types.tools import Tool
|
33
|
-
from httpx import Timeout
|
34
|
-
|
35
|
-
|
36
|
-
from .agent import Agent, AgentSettings
|
37
|
-
|
38
|
-
|
39
|
-
__all__ = [
|
40
|
-
"run_agent",
|
41
|
-
"async_run_agent",
|
42
|
-
"run_agent_iter",
|
43
|
-
"async_run_agent_iter",
|
44
|
-
"agent_decorator",
|
45
|
-
]
|
46
|
-
|
47
|
-
T = TypeVar("T")
|
48
|
-
|
49
|
-
|
50
|
-
# Overloads for run_agent - non-streaming
|
51
|
-
@overload
|
52
|
-
def run_agent(
|
53
|
-
messages: "AgentMessages",
|
54
|
-
*,
|
55
|
-
# Agent settings
|
56
|
-
name: str = "agent",
|
57
|
-
instructions: Optional[str] = None,
|
58
|
-
description: Optional[str] = None,
|
59
|
-
tools: Union[List["Tool"], Callable, None] = None,
|
60
|
-
settings: Optional[AgentSettings] = None,
|
61
|
-
# Context management
|
62
|
-
context: Optional["AgentContext"] = None,
|
63
|
-
context_updates: Optional[
|
64
|
-
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
65
|
-
] = None,
|
66
|
-
context_confirm: bool = False,
|
67
|
-
context_strategy: Literal["selective", "all"] = "all",
|
68
|
-
context_max_retries: int = 3,
|
69
|
-
context_confirm_instructions: Optional[str] = None,
|
70
|
-
context_selection_instructions: Optional[str] = None,
|
71
|
-
context_update_instructions: Optional[str] = None,
|
72
|
-
context_format: Literal["json", "python", "markdown"] = "json",
|
73
|
-
# Model settings
|
74
|
-
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
75
|
-
max_steps: Optional[int] = None,
|
76
|
-
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
77
|
-
# End strategy
|
78
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
79
|
-
end_tool: Optional[Callable] = None,
|
80
|
-
# LM settings
|
81
|
-
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
82
|
-
temperature: Optional[float] = None,
|
83
|
-
top_p: Optional[float] = None,
|
84
|
-
max_tokens: Optional[int] = None,
|
85
|
-
presence_penalty: Optional[float] = None,
|
86
|
-
frequency_penalty: Optional[float] = None,
|
87
|
-
seed: Optional[int] = None,
|
88
|
-
user: Optional[str] = None,
|
89
|
-
verbose: bool = False,
|
90
|
-
debug: bool = False,
|
91
|
-
) -> "AgentResponse[str]": ...
|
92
|
-
|
93
|
-
|
94
|
-
@overload
|
95
|
-
def run_agent(
|
96
|
-
messages: "AgentMessages",
|
97
|
-
*,
|
98
|
-
output_type: Type[T],
|
99
|
-
# Agent settings
|
100
|
-
name: str = "agent",
|
101
|
-
instructions: Optional[str] = None,
|
102
|
-
description: Optional[str] = None,
|
103
|
-
tools: Union[List["Tool"], Callable, None] = None,
|
104
|
-
settings: Optional[AgentSettings] = None,
|
105
|
-
# Context management
|
106
|
-
context: Optional["AgentContext"] = None,
|
107
|
-
context_updates: Optional[
|
108
|
-
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
109
|
-
] = None,
|
110
|
-
context_confirm: bool = False,
|
111
|
-
context_strategy: Literal["selective", "all"] = "all",
|
112
|
-
context_max_retries: int = 3,
|
113
|
-
context_confirm_instructions: Optional[str] = None,
|
114
|
-
context_selection_instructions: Optional[str] = None,
|
115
|
-
context_update_instructions: Optional[str] = None,
|
116
|
-
context_format: Literal["json", "python", "markdown"] = "json",
|
117
|
-
# Model settings
|
118
|
-
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
119
|
-
max_steps: Optional[int] = None,
|
120
|
-
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
121
|
-
# End strategy
|
122
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
123
|
-
end_tool: Optional[Callable] = None,
|
124
|
-
# LM settings
|
125
|
-
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
126
|
-
temperature: Optional[float] = None,
|
127
|
-
top_p: Optional[float] = None,
|
128
|
-
max_tokens: Optional[int] = None,
|
129
|
-
presence_penalty: Optional[float] = None,
|
130
|
-
frequency_penalty: Optional[float] = None,
|
131
|
-
seed: Optional[int] = None,
|
132
|
-
user: Optional[str] = None,
|
133
|
-
verbose: bool = False,
|
134
|
-
debug: bool = False,
|
135
|
-
) -> "AgentResponse[T]": ...
|
136
|
-
|
137
|
-
|
138
|
-
def run_agent(
|
139
|
-
messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
|
140
|
-
) -> "AgentResponse[Any]":
|
141
|
-
"""Runs this agent and returns a final agent response or stream.
|
142
|
-
|
143
|
-
You can override defaults assigned to this agent from this function directly.
|
144
|
-
|
145
|
-
Args:
|
146
|
-
messages: The messages to process. Can be:
|
147
|
-
- A single string: "What's the weather like?"
|
148
|
-
- A list of message dicts: [{"role": "user", "content": "Hello"}]
|
149
|
-
- A list of strings: ["Hello", "How are you?"]
|
150
|
-
model: The model to use for this run (overrides default).
|
151
|
-
- Can be a LanguageModel instance or model name string like "gpt-4"
|
152
|
-
max_steps: Maximum number of steps to execute (overrides default).
|
153
|
-
- Useful for limiting tool usage or preventing infinite loops
|
154
|
-
context: Context object for the agent (overrides default).
|
155
|
-
- Any object that provides additional context for the conversation
|
156
|
-
output_type: The expected output type (overrides default).
|
157
|
-
- Use for structured outputs: output_type=MyPydanticModel
|
158
|
-
- Defaults to str for unstructured text responses
|
159
|
-
stream: Whether to return a stream instead of a final response.
|
160
|
-
- If True, returns AgentStream for real-time processing
|
161
|
-
- If False, returns complete AgentResponse
|
162
|
-
verbose: If True, set logger to INFO level for detailed output
|
163
|
-
debug: If True, set logger to DEBUG level for maximum verbosity
|
164
|
-
**kwargs: Additional keyword arguments passed to the language model.
|
165
|
-
- Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
|
166
|
-
|
167
|
-
Returns:
|
168
|
-
AgentResponse or AgentStream depending on stream parameter.
|
169
|
-
- AgentResponse: Contains final output, steps taken, and metadata
|
170
|
-
- AgentStream: Iterator yielding intermediate steps and final result
|
171
|
-
|
172
|
-
Examples:
|
173
|
-
Basic text conversation:
|
174
|
-
>>> agent = Agent()
|
175
|
-
>>> response = agent.run("Hello, how are you?")
|
176
|
-
>>> print(response.output)
|
177
|
-
"Hello! I'm doing well, thank you for asking."
|
178
|
-
|
179
|
-
With custom model and parameters:
|
180
|
-
>>> response = agent.run(
|
181
|
-
... messages="Explain quantum computing",
|
182
|
-
... model="gpt-4",
|
183
|
-
... max_steps=5,
|
184
|
-
... temperature=0.3
|
185
|
-
... )
|
186
|
-
|
187
|
-
Structured output with Pydantic model:
|
188
|
-
>>> from pydantic import BaseModel
|
189
|
-
>>> class Summary(BaseModel):
|
190
|
-
... title: str
|
191
|
-
... key_points: List[str]
|
192
|
-
>>> response = agent.run(
|
193
|
-
... "Summarize the benefits of renewable energy",
|
194
|
-
... output_type=Summary
|
195
|
-
... )
|
196
|
-
>>> print(response.output.title)
|
197
|
-
>>> print(response.output.key_points)
|
198
|
-
|
199
|
-
Streaming for real-time results:
|
200
|
-
>>> stream = agent.run(
|
201
|
-
... "Write a long story about space exploration",
|
202
|
-
... stream=True
|
203
|
-
... )
|
204
|
-
>>> for chunk in stream:
|
205
|
-
... print(chunk.output, end="", flush=True)
|
206
|
-
|
207
|
-
With context for additional information:
|
208
|
-
>>> context = {"user_preferences": "technical explanations"}
|
209
|
-
>>> response = agent.run(
|
210
|
-
... "How does machine learning work?",
|
211
|
-
... context=context
|
212
|
-
... )
|
213
|
-
"""
|
214
|
-
# Separate agent constructor parameters from run parameters
|
215
|
-
agent_constructor_params = {
|
216
|
-
k: v
|
217
|
-
for k, v in kwargs.items()
|
218
|
-
if k in ["name", "instructions", "description", "tools", "settings", "model"]
|
219
|
-
}
|
220
|
-
agent_run_params = {
|
221
|
-
k: v
|
222
|
-
for k, v in kwargs.items()
|
223
|
-
if k not in ["name", "instructions", "description", "tools", "settings"]
|
224
|
-
}
|
225
|
-
|
226
|
-
agent = Agent(verbose=verbose, debug=debug, **agent_constructor_params)
|
227
|
-
return agent.run(messages, verbose=verbose, debug=debug, **agent_run_params)
|
228
|
-
|
229
|
-
|
230
|
-
# Overloads for async_run_agent
|
231
|
-
@overload
|
232
|
-
async def async_run_agent(
|
233
|
-
messages: "AgentMessages",
|
234
|
-
*,
|
235
|
-
# Agent settings
|
236
|
-
name: str = "agent",
|
237
|
-
instructions: Optional[str] = None,
|
238
|
-
description: Optional[str] = None,
|
239
|
-
tools: Union[List["Tool"], Callable, None] = None,
|
240
|
-
settings: Optional[AgentSettings] = None,
|
241
|
-
# Context management
|
242
|
-
context: Optional["AgentContext"] = None,
|
243
|
-
context_updates: Optional[
|
244
|
-
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
245
|
-
] = None,
|
246
|
-
context_confirm: bool = False,
|
247
|
-
context_strategy: Literal["selective", "all"] = "all",
|
248
|
-
context_max_retries: int = 3,
|
249
|
-
context_confirm_instructions: Optional[str] = None,
|
250
|
-
context_selection_instructions: Optional[str] = None,
|
251
|
-
context_update_instructions: Optional[str] = None,
|
252
|
-
context_format: Literal["json", "python", "markdown"] = "json",
|
253
|
-
# Model settings
|
254
|
-
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
255
|
-
max_steps: Optional[int] = None,
|
256
|
-
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
257
|
-
# End strategy
|
258
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
259
|
-
end_tool: Optional[Callable] = None,
|
260
|
-
# LM settings
|
261
|
-
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
262
|
-
temperature: Optional[float] = None,
|
263
|
-
top_p: Optional[float] = None,
|
264
|
-
max_tokens: Optional[int] = None,
|
265
|
-
presence_penalty: Optional[float] = None,
|
266
|
-
frequency_penalty: Optional[float] = None,
|
267
|
-
seed: Optional[int] = None,
|
268
|
-
user: Optional[str] = None,
|
269
|
-
verbose: bool = False,
|
270
|
-
debug: bool = False,
|
271
|
-
) -> "AgentResponse[str]": ...
|
272
|
-
|
273
|
-
|
274
|
-
@overload
|
275
|
-
async def async_run_agent(
|
276
|
-
messages: "AgentMessages",
|
277
|
-
*,
|
278
|
-
output_type: Type[T],
|
279
|
-
# Agent settings
|
280
|
-
name: str = "agent",
|
281
|
-
instructions: Optional[str] = None,
|
282
|
-
description: Optional[str] = None,
|
283
|
-
tools: Union[List["Tool"], Callable, None] = None,
|
284
|
-
settings: Optional[AgentSettings] = None,
|
285
|
-
# Context management
|
286
|
-
context: Optional["AgentContext"] = None,
|
287
|
-
context_updates: Optional[
|
288
|
-
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
289
|
-
] = None,
|
290
|
-
context_confirm: bool = False,
|
291
|
-
context_strategy: Literal["selective", "all"] = "all",
|
292
|
-
context_max_retries: int = 3,
|
293
|
-
context_confirm_instructions: Optional[str] = None,
|
294
|
-
context_selection_instructions: Optional[str] = None,
|
295
|
-
context_update_instructions: Optional[str] = None,
|
296
|
-
context_format: Literal["json", "python", "markdown"] = "json",
|
297
|
-
# Model settings
|
298
|
-
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
299
|
-
max_steps: Optional[int] = None,
|
300
|
-
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
301
|
-
# End strategy
|
302
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
303
|
-
end_tool: Optional[Callable] = None,
|
304
|
-
# LM settings
|
305
|
-
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
306
|
-
temperature: Optional[float] = None,
|
307
|
-
top_p: Optional[float] = None,
|
308
|
-
max_tokens: Optional[int] = None,
|
309
|
-
presence_penalty: Optional[float] = None,
|
310
|
-
frequency_penalty: Optional[float] = None,
|
311
|
-
seed: Optional[int] = None,
|
312
|
-
user: Optional[str] = None,
|
313
|
-
verbose: bool = False,
|
314
|
-
debug: bool = False,
|
315
|
-
) -> "AgentResponse[T]": ...
|
316
|
-
|
317
|
-
|
318
|
-
async def async_run_agent(
|
319
|
-
messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
|
320
|
-
) -> "AgentResponse[Any]":
|
321
|
-
"""Runs this agent asynchronously and returns a final agent response.
|
322
|
-
|
323
|
-
You can override defaults assigned to this agent from this function directly.
|
324
|
-
This is the async version of run() for non-blocking execution.
|
325
|
-
|
326
|
-
Args:
|
327
|
-
messages: The messages to process. Can be:
|
328
|
-
- A single string: "What's the weather like?"
|
329
|
-
- A list of message dicts: [{"role": "user", "content": "Hello"}]
|
330
|
-
- A list of strings: ["Hello", "How are you?"]
|
331
|
-
model: The model to use for this run (overrides default).
|
332
|
-
- Can be a LanguageModel instance or model name string like "gpt-4"
|
333
|
-
max_steps: Maximum number of steps to execute (overrides default).
|
334
|
-
- Useful for limiting tool usage or preventing infinite loops
|
335
|
-
context: Context object for the agent (overrides default).
|
336
|
-
- Any object that provides additional context for the conversation
|
337
|
-
output_type: The expected output type (overrides default).
|
338
|
-
- Use for structured outputs: output_type=MyPydanticModel
|
339
|
-
- Defaults to str for unstructured text responses
|
340
|
-
**kwargs: Additional keyword arguments passed to the language model.
|
341
|
-
- Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
|
342
|
-
|
343
|
-
Returns:
|
344
|
-
AgentResponse containing the final output, steps taken, and metadata.
|
345
|
-
|
346
|
-
Examples:
|
347
|
-
Basic async usage:
|
348
|
-
>>> import asyncio
|
349
|
-
>>> agent = Agent()
|
350
|
-
>>> async def main():
|
351
|
-
... response = await agent.async_run("Hello, how are you?")
|
352
|
-
... print(response.output)
|
353
|
-
>>> asyncio.run(main())
|
354
|
-
|
355
|
-
Multiple concurrent requests:
|
356
|
-
>>> async def process_multiple():
|
357
|
-
... tasks = [
|
358
|
-
... agent.async_run("What's 2+2?"),
|
359
|
-
... agent.async_run("What's the capital of France?"),
|
360
|
-
... agent.async_run("Explain photosynthesis")
|
361
|
-
... ]
|
362
|
-
... responses = await asyncio.gather(*tasks)
|
363
|
-
... return responses
|
364
|
-
|
365
|
-
With structured output:
|
366
|
-
>>> from pydantic import BaseModel
|
367
|
-
>>> class Analysis(BaseModel):
|
368
|
-
... sentiment: str
|
369
|
-
... confidence: float
|
370
|
-
>>> async def analyze_text():
|
371
|
-
... response = await agent.async_run(
|
372
|
-
... "Analyze the sentiment of: 'I love this product!'",
|
373
|
-
... output_type=Analysis
|
374
|
-
... )
|
375
|
-
... return response.output
|
376
|
-
|
377
|
-
With custom model and context:
|
378
|
-
>>> async def custom_run():
|
379
|
-
... context = {"domain": "medical", "expertise_level": "expert"}
|
380
|
-
... response = await agent.async_run(
|
381
|
-
... "Explain diabetes",
|
382
|
-
... model="gpt-4",
|
383
|
-
... context=context,
|
384
|
-
... temperature=0.2
|
385
|
-
... )
|
386
|
-
... return response.output
|
387
|
-
"""
|
388
|
-
# Separate agent constructor parameters from run parameters
|
389
|
-
agent_constructor_params = {
|
390
|
-
k: v
|
391
|
-
for k, v in kwargs.items()
|
392
|
-
if k in ["name", "instructions", "description", "tools", "settings", "model"]
|
393
|
-
}
|
394
|
-
agent_run_params = {
|
395
|
-
k: v
|
396
|
-
for k, v in kwargs.items()
|
397
|
-
if k not in ["name", "instructions", "description", "tools", "settings"]
|
398
|
-
}
|
399
|
-
|
400
|
-
agent = Agent(verbose=verbose, debug=debug, **agent_constructor_params)
|
401
|
-
return await agent.async_run(
|
402
|
-
messages, verbose=verbose, debug=debug, **agent_run_params
|
403
|
-
)
|
404
|
-
|
405
|
-
|
406
|
-
# Overloads for run_agent_iter
|
407
|
-
@overload
|
408
|
-
def run_agent_iter(
|
409
|
-
messages: "AgentMessages",
|
410
|
-
*,
|
411
|
-
# Agent settings
|
412
|
-
name: str = "agent",
|
413
|
-
instructions: Optional[str] = None,
|
414
|
-
description: Optional[str] = None,
|
415
|
-
tools: Union[List["Tool"], Callable, None] = None,
|
416
|
-
settings: Optional[AgentSettings] = None,
|
417
|
-
# Context management
|
418
|
-
context: Optional["AgentContext"] = None,
|
419
|
-
context_updates: Optional[
|
420
|
-
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
421
|
-
] = None,
|
422
|
-
context_confirm: bool = False,
|
423
|
-
context_strategy: Literal["selective", "all"] = "all",
|
424
|
-
context_max_retries: int = 3,
|
425
|
-
context_confirm_instructions: Optional[str] = None,
|
426
|
-
context_selection_instructions: Optional[str] = None,
|
427
|
-
context_update_instructions: Optional[str] = None,
|
428
|
-
context_format: Literal["json", "python", "markdown"] = "json",
|
429
|
-
# Model settings
|
430
|
-
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
431
|
-
max_steps: Optional[int] = None,
|
432
|
-
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
433
|
-
# End strategy
|
434
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
435
|
-
end_tool: Optional[Callable] = None,
|
436
|
-
# LM settings
|
437
|
-
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
438
|
-
temperature: Optional[float] = None,
|
439
|
-
top_p: Optional[float] = None,
|
440
|
-
max_tokens: Optional[int] = None,
|
441
|
-
presence_penalty: Optional[float] = None,
|
442
|
-
frequency_penalty: Optional[float] = None,
|
443
|
-
seed: Optional[int] = None,
|
444
|
-
user: Optional[str] = None,
|
445
|
-
) -> "AgentStream[str]": ...
|
446
|
-
|
447
|
-
|
448
|
-
@overload
|
449
|
-
def run_agent_iter(
|
450
|
-
messages: "AgentMessages",
|
451
|
-
*,
|
452
|
-
output_type: Type[T],
|
453
|
-
# Agent settings
|
454
|
-
name: str = "agent",
|
455
|
-
instructions: Optional[str] = None,
|
456
|
-
description: Optional[str] = None,
|
457
|
-
tools: Union[List["Tool"], Callable, None] = None,
|
458
|
-
settings: Optional[AgentSettings] = None,
|
459
|
-
# Context management
|
460
|
-
context: Optional["AgentContext"] = None,
|
461
|
-
context_updates: Optional[
|
462
|
-
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
463
|
-
] = None,
|
464
|
-
context_confirm: bool = False,
|
465
|
-
context_strategy: Literal["selective", "all"] = "all",
|
466
|
-
context_max_retries: int = 3,
|
467
|
-
context_confirm_instructions: Optional[str] = None,
|
468
|
-
context_selection_instructions: Optional[str] = None,
|
469
|
-
context_update_instructions: Optional[str] = None,
|
470
|
-
context_format: Literal["json", "python", "markdown"] = "json",
|
471
|
-
# Model settings
|
472
|
-
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
473
|
-
max_steps: Optional[int] = None,
|
474
|
-
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
475
|
-
# End strategy
|
476
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
477
|
-
end_tool: Optional[Callable] = None,
|
478
|
-
# LM settings
|
479
|
-
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
480
|
-
temperature: Optional[float] = None,
|
481
|
-
top_p: Optional[float] = None,
|
482
|
-
max_tokens: Optional[int] = None,
|
483
|
-
presence_penalty: Optional[float] = None,
|
484
|
-
frequency_penalty: Optional[float] = None,
|
485
|
-
seed: Optional[int] = None,
|
486
|
-
user: Optional[str] = None,
|
487
|
-
) -> "AgentStream[T]": ...
|
488
|
-
|
489
|
-
|
490
|
-
def run_agent_iter(
|
491
|
-
messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
|
492
|
-
) -> "AgentStream[Any]":
|
493
|
-
"""Iterate over agent steps, yielding each step response.
|
494
|
-
|
495
|
-
You can override defaults assigned to this agent from this function directly.
|
496
|
-
Returns an AgentStream that yields intermediate steps and the final result.
|
497
|
-
|
498
|
-
Args:
|
499
|
-
messages: The messages to process. Can be:
|
500
|
-
- A single string: "What's the weather like?"
|
501
|
-
- A list of message dicts: [{"role": "user", "content": "Hello"}]
|
502
|
-
- A list of strings: ["Hello", "How are you?"]
|
503
|
-
model: The model to use for this run (overrides default).
|
504
|
-
- Can be a LanguageModel instance or model name string like "gpt-4"
|
505
|
-
max_steps: Maximum number of steps to execute (overrides default).
|
506
|
-
- Useful for limiting tool usage or preventing infinite loops
|
507
|
-
context: Context object for the agent (overrides default).
|
508
|
-
- Any object that provides additional context for the conversation
|
509
|
-
output_type: The expected output type (overrides default).
|
510
|
-
- Use for structured outputs: output_type=MyPydanticModel
|
511
|
-
- Defaults to str for unstructured text responses
|
512
|
-
**kwargs: Additional keyword arguments passed to the language model.
|
513
|
-
- Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
|
514
|
-
|
515
|
-
Returns:
|
516
|
-
AgentStream that can be iterated over to get each step response,
|
517
|
-
including tool calls and intermediate reasoning steps.
|
518
|
-
|
519
|
-
Examples:
|
520
|
-
Basic iteration over steps:
|
521
|
-
>>> agent = Agent(tools=[calculator_tool])
|
522
|
-
>>> stream = agent.iter("What's 25 * 47?")
|
523
|
-
>>> for step in stream:
|
524
|
-
... print(f"Step {step.step_number}: {step.output}")
|
525
|
-
... if step.tool_calls:
|
526
|
-
... print(f"Tool calls: {len(step.tool_calls)}")
|
527
|
-
|
528
|
-
Real-time processing with streaming:
|
529
|
-
>>> stream = agent.iter("Write a poem about nature")
|
530
|
-
>>> for chunk in stream:
|
531
|
-
... if chunk.output:
|
532
|
-
... print(chunk.output, end="", flush=True)
|
533
|
-
... if chunk.is_final:
|
534
|
-
... print("\n--- Final response ---")
|
535
|
-
|
536
|
-
With structured output iteration:
|
537
|
-
>>> from pydantic import BaseModel
|
538
|
-
>>> class StepAnalysis(BaseModel):
|
539
|
-
... reasoning: str
|
540
|
-
... confidence: float
|
541
|
-
>>> stream = agent.iter(
|
542
|
-
... "Analyze this step by step: Why is the sky blue?",
|
543
|
-
... output_type=StepAnalysis
|
544
|
-
... )
|
545
|
-
>>> for step in stream:
|
546
|
-
... if step.output:
|
547
|
-
... print(f"Reasoning: {step.output.reasoning}")
|
548
|
-
... print(f"Confidence: {step.output.confidence}")
|
549
|
-
|
550
|
-
Processing with custom model and context:
|
551
|
-
>>> context = {"domain": "science", "depth": "detailed"}
|
552
|
-
>>> stream = agent.iter(
|
553
|
-
... "Explain quantum entanglement",
|
554
|
-
... model="gpt-4",
|
555
|
-
... context=context,
|
556
|
-
... max_steps=3,
|
557
|
-
... temperature=0.1
|
558
|
-
... )
|
559
|
-
>>> results = []
|
560
|
-
>>> for step in stream:
|
561
|
-
... results.append(step.output)
|
562
|
-
... if step.is_final:
|
563
|
-
... break
|
564
|
-
|
565
|
-
Error handling during iteration:
|
566
|
-
>>> try:
|
567
|
-
... stream = agent.iter("Complex calculation task")
|
568
|
-
... for step in stream:
|
569
|
-
... if step.error:
|
570
|
-
... print(f"Error in step: {step.error}")
|
571
|
-
... else:
|
572
|
-
... print(f"Step result: {step.output}")
|
573
|
-
... except Exception as e:
|
574
|
-
... print(f"Stream error: {e}")
|
575
|
-
"""
|
576
|
-
# Separate agent constructor parameters from run parameters
|
577
|
-
agent_constructor_params = {
|
578
|
-
k: v
|
579
|
-
for k, v in kwargs.items()
|
580
|
-
if k in ["name", "instructions", "description", "tools", "settings", "model"]
|
581
|
-
}
|
582
|
-
agent_run_params = {
|
583
|
-
k: v
|
584
|
-
for k, v in kwargs.items()
|
585
|
-
if k not in ["name", "instructions", "description", "tools", "settings"]
|
586
|
-
}
|
587
|
-
|
588
|
-
agent = Agent(verbose=verbose, debug=debug, **agent_constructor_params)
|
589
|
-
return agent.run(
|
590
|
-
messages, stream=True, verbose=verbose, debug=debug, **agent_run_params
|
591
|
-
)
|
592
|
-
|
593
|
-
|
594
|
-
# Overloads for async_run_agent_iter
|
595
|
-
@overload
|
596
|
-
def async_run_agent_iter(
|
597
|
-
messages: "AgentMessages",
|
598
|
-
*,
|
599
|
-
# Agent settings
|
600
|
-
name: str = "agent",
|
601
|
-
instructions: Optional[str] = None,
|
602
|
-
description: Optional[str] = None,
|
603
|
-
tools: Union[List["Tool"], Callable, None] = None,
|
604
|
-
settings: Optional[AgentSettings] = None,
|
605
|
-
# Context management
|
606
|
-
context: Optional["AgentContext"] = None,
|
607
|
-
context_updates: Optional[
|
608
|
-
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
609
|
-
] = None,
|
610
|
-
context_confirm: bool = False,
|
611
|
-
context_strategy: Literal["selective", "all"] = "all",
|
612
|
-
context_max_retries: int = 3,
|
613
|
-
context_confirm_instructions: Optional[str] = None,
|
614
|
-
context_selection_instructions: Optional[str] = None,
|
615
|
-
context_update_instructions: Optional[str] = None,
|
616
|
-
context_format: Literal["json", "python", "markdown"] = "json",
|
617
|
-
# Model settings
|
618
|
-
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
619
|
-
max_steps: Optional[int] = None,
|
620
|
-
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
621
|
-
# End strategy
|
622
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
623
|
-
end_tool: Optional[Callable] = None,
|
624
|
-
# LM settings
|
625
|
-
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
626
|
-
temperature: Optional[float] = None,
|
627
|
-
top_p: Optional[float] = None,
|
628
|
-
max_tokens: Optional[int] = None,
|
629
|
-
presence_penalty: Optional[float] = None,
|
630
|
-
frequency_penalty: Optional[float] = None,
|
631
|
-
seed: Optional[int] = None,
|
632
|
-
user: Optional[str] = None,
|
633
|
-
) -> "AgentStream[str]": ...
|
634
|
-
|
635
|
-
|
636
|
-
@overload
|
637
|
-
def async_run_agent_iter(
|
638
|
-
messages: "AgentMessages",
|
639
|
-
*,
|
640
|
-
output_type: Type[T],
|
641
|
-
# Agent settings
|
642
|
-
name: str = "agent",
|
643
|
-
instructions: Optional[str] = None,
|
644
|
-
description: Optional[str] = None,
|
645
|
-
tools: Union[List["Tool"], Callable, None] = None,
|
646
|
-
settings: Optional[AgentSettings] = None,
|
647
|
-
# Context management
|
648
|
-
context: Optional["AgentContext"] = None,
|
649
|
-
context_updates: Optional[
|
650
|
-
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
651
|
-
] = None,
|
652
|
-
context_confirm: bool = False,
|
653
|
-
context_strategy: Literal["selective", "all"] = "all",
|
654
|
-
context_max_retries: int = 3,
|
655
|
-
context_confirm_instructions: Optional[str] = None,
|
656
|
-
context_selection_instructions: Optional[str] = None,
|
657
|
-
context_update_instructions: Optional[str] = None,
|
658
|
-
context_format: Literal["json", "python", "markdown"] = "json",
|
659
|
-
# Model settings
|
660
|
-
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
661
|
-
max_steps: Optional[int] = None,
|
662
|
-
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
663
|
-
# End strategy
|
664
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
665
|
-
end_tool: Optional[Callable] = None,
|
666
|
-
# LM settings
|
667
|
-
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
668
|
-
temperature: Optional[float] = None,
|
669
|
-
top_p: Optional[float] = None,
|
670
|
-
max_tokens: Optional[int] = None,
|
671
|
-
presence_penalty: Optional[float] = None,
|
672
|
-
frequency_penalty: Optional[float] = None,
|
673
|
-
seed: Optional[int] = None,
|
674
|
-
user: Optional[str] = None,
|
675
|
-
) -> "AgentStream[T]": ...
|
676
|
-
|
677
|
-
|
678
|
-
def async_run_agent_iter(
|
679
|
-
messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
|
680
|
-
) -> "AgentStream[Any]":
|
681
|
-
"""Async iterate over agent steps, yielding each step response.
|
682
|
-
|
683
|
-
Args:
|
684
|
-
messages: The input messages to process
|
685
|
-
model: Language model to use (overrides agent's default)
|
686
|
-
max_steps: Maximum number of steps to take
|
687
|
-
context: Context object to maintain state
|
688
|
-
output_type: Type for structured output
|
689
|
-
**kwargs: Additional parameters for the language model
|
690
|
-
|
691
|
-
Returns:
|
692
|
-
An AgentStream that can be iterated over asynchronously
|
693
|
-
"""
|
694
|
-
# Separate agent constructor parameters from run parameters
|
695
|
-
agent_constructor_params = {
|
696
|
-
k: v
|
697
|
-
for k, v in kwargs.items()
|
698
|
-
if k in ["name", "instructions", "description", "tools", "settings", "model"]
|
699
|
-
}
|
700
|
-
agent_run_params = {
|
701
|
-
k: v
|
702
|
-
for k, v in kwargs.items()
|
703
|
-
if k not in ["name", "instructions", "description", "tools", "settings"]
|
704
|
-
}
|
705
|
-
|
706
|
-
agent = Agent(verbose=verbose, debug=debug, **agent_constructor_params)
|
707
|
-
return agent.run(
|
708
|
-
messages, stream=True, verbose=verbose, debug=debug, **agent_run_params
|
709
|
-
)
|
710
|
-
|
711
|
-
|
712
|
-
def agent_decorator(
|
713
|
-
fn: Union[str, Callable, None] = None,
|
714
|
-
*,
|
715
|
-
# Agent settings
|
716
|
-
name: Optional[str] = None,
|
717
|
-
instructions: Optional[str] = None,
|
718
|
-
description: Optional[str] = None,
|
719
|
-
tools: Union[List["Tool"], Callable, None] = None,
|
720
|
-
settings: Optional[AgentSettings] = None,
|
721
|
-
# Context management
|
722
|
-
context: Optional["AgentContext"] = None,
|
723
|
-
context_updates: Optional[
|
724
|
-
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
725
|
-
] = None,
|
726
|
-
context_confirm: bool = False,
|
727
|
-
context_strategy: Literal["selective", "all"] = "all",
|
728
|
-
context_max_retries: int = 3,
|
729
|
-
context_confirm_instructions: Optional[str] = None,
|
730
|
-
context_selection_instructions: Optional[str] = None,
|
731
|
-
context_update_instructions: Optional[str] = None,
|
732
|
-
context_format: Literal["json", "python", "markdown"] = "json",
|
733
|
-
# Model settings
|
734
|
-
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
735
|
-
max_steps: Optional[int] = None,
|
736
|
-
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
737
|
-
return_output: bool = True,
|
738
|
-
# End strategy
|
739
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
740
|
-
end_tool: Optional[Callable] = None,
|
741
|
-
# LM settings
|
742
|
-
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
743
|
-
temperature: Optional[float] = None,
|
744
|
-
top_p: Optional[float] = None,
|
745
|
-
max_tokens: Optional[int] = None,
|
746
|
-
presence_penalty: Optional[float] = None,
|
747
|
-
frequency_penalty: Optional[float] = None,
|
748
|
-
seed: Optional[int] = None,
|
749
|
-
user: Optional[str] = None,
|
750
|
-
verbose: bool = False,
|
751
|
-
debug: bool = False,
|
752
|
-
):
|
753
|
-
"""Decorator that converts a function into an agent.
|
754
|
-
|
755
|
-
The function's parameters become the input to the LLM (converted to a string),
|
756
|
-
the function's return type annotation becomes the agent's output type,
|
757
|
-
and the function's docstring becomes the agent's instructions.
|
758
|
-
|
759
|
-
Works with both sync and async functions.
|
760
|
-
|
761
|
-
Can be used in multiple ways:
|
762
|
-
|
763
|
-
1. As a decorator with parameters:
|
764
|
-
@agent_decorator(name="steve", temperature=0.7)
|
765
|
-
def my_agent():
|
766
|
-
pass
|
767
|
-
|
768
|
-
2. As a decorator without parameters:
|
769
|
-
@agent_decorator
|
770
|
-
def my_agent():
|
771
|
-
pass
|
772
|
-
|
773
|
-
3. As an inline function with name as first argument:
|
774
|
-
agent = agent_decorator("steve")
|
775
|
-
# Then use: decorated_func = agent(my_function)
|
776
|
-
|
777
|
-
4. As an inline function with all parameters:
|
778
|
-
agent = agent_decorator(name="steve", temperature=0.7)
|
779
|
-
# Then use: decorated_func = agent(my_function)
|
780
|
-
"""
|
781
|
-
# Handle different calling patterns
|
782
|
-
if callable(fn):
|
783
|
-
# Case: @agent_decorator (no parentheses)
|
784
|
-
func = fn
|
785
|
-
actual_name = name or "agent"
|
786
|
-
return _create_agent_wrapper(
|
787
|
-
func,
|
788
|
-
actual_name,
|
789
|
-
instructions,
|
790
|
-
description,
|
791
|
-
tools,
|
792
|
-
settings,
|
793
|
-
context,
|
794
|
-
context_updates,
|
795
|
-
context_confirm,
|
796
|
-
context_strategy,
|
797
|
-
context_max_retries,
|
798
|
-
context_confirm_instructions,
|
799
|
-
context_selection_instructions,
|
800
|
-
context_update_instructions,
|
801
|
-
context_format,
|
802
|
-
model,
|
803
|
-
max_steps,
|
804
|
-
instructor_mode,
|
805
|
-
return_output,
|
806
|
-
end_strategy,
|
807
|
-
end_tool,
|
808
|
-
timeout,
|
809
|
-
temperature,
|
810
|
-
top_p,
|
811
|
-
max_tokens,
|
812
|
-
presence_penalty,
|
813
|
-
frequency_penalty,
|
814
|
-
seed,
|
815
|
-
user,
|
816
|
-
verbose,
|
817
|
-
debug,
|
818
|
-
)
|
819
|
-
elif isinstance(fn, str):
|
820
|
-
# Case: agent_decorator("steve") - first arg is name
|
821
|
-
actual_name = fn
|
822
|
-
else:
|
823
|
-
# Case: agent_decorator() or agent_decorator(name="steve")
|
824
|
-
actual_name = name or "agent"
|
825
|
-
|
826
|
-
def decorator(func: Callable) -> Callable:
|
827
|
-
return _create_agent_wrapper(
|
828
|
-
func,
|
829
|
-
actual_name,
|
830
|
-
instructions,
|
831
|
-
description,
|
832
|
-
tools,
|
833
|
-
settings,
|
834
|
-
context,
|
835
|
-
context_updates,
|
836
|
-
context_confirm,
|
837
|
-
context_strategy,
|
838
|
-
context_max_retries,
|
839
|
-
context_confirm_instructions,
|
840
|
-
context_selection_instructions,
|
841
|
-
context_update_instructions,
|
842
|
-
context_format,
|
843
|
-
model,
|
844
|
-
max_steps,
|
845
|
-
instructor_mode,
|
846
|
-
return_output,
|
847
|
-
end_strategy,
|
848
|
-
end_tool,
|
849
|
-
timeout,
|
850
|
-
temperature,
|
851
|
-
top_p,
|
852
|
-
max_tokens,
|
853
|
-
presence_penalty,
|
854
|
-
frequency_penalty,
|
855
|
-
seed,
|
856
|
-
user,
|
857
|
-
verbose,
|
858
|
-
debug,
|
859
|
-
)
|
860
|
-
|
861
|
-
return decorator
|
862
|
-
|
863
|
-
|
864
|
-
def _create_agent_wrapper(
|
865
|
-
func: Callable,
|
866
|
-
name: str,
|
867
|
-
instructions: Optional[str],
|
868
|
-
description: Optional[str],
|
869
|
-
tools: Union[List["Tool"], Callable, None],
|
870
|
-
settings: Optional[AgentSettings],
|
871
|
-
context: Optional["AgentContext"],
|
872
|
-
context_updates: Optional[
|
873
|
-
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
874
|
-
],
|
875
|
-
context_confirm: bool,
|
876
|
-
context_strategy: Literal["selective", "all"],
|
877
|
-
context_max_retries: int,
|
878
|
-
context_confirm_instructions: Optional[str],
|
879
|
-
context_selection_instructions: Optional[str],
|
880
|
-
context_update_instructions: Optional[str],
|
881
|
-
context_format: Literal["json", "python", "markdown"],
|
882
|
-
model: Optional[Union["LanguageModel", "LanguageModelName"]],
|
883
|
-
max_steps: Optional[int],
|
884
|
-
instructor_mode: Optional["LanguageModelInstructorMode"],
|
885
|
-
return_output: bool,
|
886
|
-
end_strategy: Optional[Literal["tool"]],
|
887
|
-
end_tool: Optional[Callable],
|
888
|
-
timeout: Optional[Union[float, str, "Timeout"]],
|
889
|
-
temperature: Optional[float],
|
890
|
-
top_p: Optional[float],
|
891
|
-
max_tokens: Optional[int],
|
892
|
-
presence_penalty: Optional[float],
|
893
|
-
frequency_penalty: Optional[float],
|
894
|
-
seed: Optional[int],
|
895
|
-
user: Optional[str],
|
896
|
-
verbose: bool,
|
897
|
-
debug: bool,
|
898
|
-
) -> Callable:
|
899
|
-
"""Helper function to create the actual agent wrapper."""
|
900
|
-
import inspect
|
901
|
-
import asyncio
|
902
|
-
from typing import get_type_hints
|
903
|
-
|
904
|
-
# Get function metadata
|
905
|
-
sig = inspect.signature(func)
|
906
|
-
type_hints = get_type_hints(func)
|
907
|
-
return_type = type_hints.get("return", str)
|
908
|
-
func_instructions = instructions or func.__doc__ or ""
|
909
|
-
|
910
|
-
# Check if function is async
|
911
|
-
is_async = asyncio.iscoroutinefunction(func)
|
912
|
-
|
913
|
-
if is_async:
|
914
|
-
|
915
|
-
@functools.wraps(func)
|
916
|
-
async def async_wrapper(*args, **kwargs):
|
917
|
-
# Convert function parameters to message string
|
918
|
-
bound_args = sig.bind(*args, **kwargs)
|
919
|
-
bound_args.apply_defaults()
|
920
|
-
|
921
|
-
# Create message from parameters
|
922
|
-
param_parts = []
|
923
|
-
for param_name, param_value in bound_args.arguments.items():
|
924
|
-
param_parts.append(f"{param_name}: {param_value}")
|
925
|
-
message = "\n".join(param_parts)
|
926
|
-
|
927
|
-
# Run agent with extracted parameters
|
928
|
-
response = await async_run_agent(
|
929
|
-
messages=message,
|
930
|
-
output_type=return_type,
|
931
|
-
name=name,
|
932
|
-
instructions=func_instructions,
|
933
|
-
description=description,
|
934
|
-
tools=tools,
|
935
|
-
settings=settings,
|
936
|
-
context=context,
|
937
|
-
context_updates=context_updates,
|
938
|
-
context_confirm=context_confirm,
|
939
|
-
context_strategy=context_strategy,
|
940
|
-
context_max_retries=context_max_retries,
|
941
|
-
context_confirm_instructions=context_confirm_instructions,
|
942
|
-
context_selection_instructions=context_selection_instructions,
|
943
|
-
context_update_instructions=context_update_instructions,
|
944
|
-
context_format=context_format,
|
945
|
-
model=model or "openai/gpt-4o-mini",
|
946
|
-
max_steps=max_steps,
|
947
|
-
instructor_mode=instructor_mode,
|
948
|
-
end_strategy=end_strategy,
|
949
|
-
end_tool=end_tool,
|
950
|
-
timeout=timeout,
|
951
|
-
temperature=temperature,
|
952
|
-
top_p=top_p,
|
953
|
-
max_tokens=max_tokens,
|
954
|
-
presence_penalty=presence_penalty,
|
955
|
-
frequency_penalty=frequency_penalty,
|
956
|
-
seed=seed,
|
957
|
-
user=user,
|
958
|
-
verbose=verbose,
|
959
|
-
debug=debug,
|
960
|
-
)
|
961
|
-
|
962
|
-
# Return just the output if return_output is True (default behavior)
|
963
|
-
if return_output:
|
964
|
-
return response.output
|
965
|
-
else:
|
966
|
-
return response
|
967
|
-
|
968
|
-
return async_wrapper
|
969
|
-
else:
|
970
|
-
|
971
|
-
@functools.wraps(func)
|
972
|
-
def sync_wrapper(*args, **kwargs):
|
973
|
-
# Convert function parameters to message string
|
974
|
-
bound_args = sig.bind(*args, **kwargs)
|
975
|
-
bound_args.apply_defaults()
|
976
|
-
|
977
|
-
# Create message from parameters
|
978
|
-
param_parts = []
|
979
|
-
for param_name, param_value in bound_args.arguments.items():
|
980
|
-
param_parts.append(f"{param_name}: {param_value}")
|
981
|
-
message = "\n".join(param_parts)
|
982
|
-
|
983
|
-
# Run agent with extracted parameters
|
984
|
-
response = run_agent(
|
985
|
-
messages=message,
|
986
|
-
output_type=return_type,
|
987
|
-
name=name,
|
988
|
-
instructions=func_instructions,
|
989
|
-
description=description,
|
990
|
-
tools=tools,
|
991
|
-
settings=settings,
|
992
|
-
context=context,
|
993
|
-
context_updates=context_updates,
|
994
|
-
context_confirm=context_confirm,
|
995
|
-
context_strategy=context_strategy,
|
996
|
-
context_max_retries=context_max_retries,
|
997
|
-
context_confirm_instructions=context_confirm_instructions,
|
998
|
-
context_selection_instructions=context_selection_instructions,
|
999
|
-
context_update_instructions=context_update_instructions,
|
1000
|
-
context_format=context_format,
|
1001
|
-
model=model or "openai/gpt-4o-mini",
|
1002
|
-
max_steps=max_steps,
|
1003
|
-
instructor_mode=instructor_mode,
|
1004
|
-
end_strategy=end_strategy,
|
1005
|
-
end_tool=end_tool,
|
1006
|
-
timeout=timeout,
|
1007
|
-
temperature=temperature,
|
1008
|
-
top_p=top_p,
|
1009
|
-
max_tokens=max_tokens,
|
1010
|
-
presence_penalty=presence_penalty,
|
1011
|
-
frequency_penalty=frequency_penalty,
|
1012
|
-
seed=seed,
|
1013
|
-
user=user,
|
1014
|
-
verbose=verbose,
|
1015
|
-
debug=debug,
|
1016
|
-
)
|
1017
|
-
|
1018
|
-
# Return just the output if return_output is True (default behavior)
|
1019
|
-
if return_output:
|
1020
|
-
return response.output
|
1021
|
-
else:
|
1022
|
-
return response
|
1023
|
-
|
1024
|
-
return sync_wrapper
|