hammad-python 0.0.19__py3-none-any.whl → 0.0.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/__init__.py +7 -137
- hammad/_internal.py +1 -0
- hammad/cli/_runner.py +8 -8
- hammad/cli/plugins.py +55 -26
- hammad/cli/styles/utils.py +16 -8
- hammad/data/__init__.py +1 -5
- hammad/data/collections/__init__.py +2 -3
- hammad/data/collections/collection.py +41 -22
- hammad/data/collections/indexes/__init__.py +1 -1
- hammad/data/collections/indexes/qdrant/__init__.py +1 -1
- hammad/data/collections/indexes/qdrant/index.py +106 -118
- hammad/data/collections/indexes/qdrant/settings.py +14 -14
- hammad/data/collections/indexes/qdrant/utils.py +28 -38
- hammad/data/collections/indexes/tantivy/__init__.py +1 -1
- hammad/data/collections/indexes/tantivy/index.py +57 -59
- hammad/data/collections/indexes/tantivy/settings.py +8 -19
- hammad/data/collections/indexes/tantivy/utils.py +28 -52
- hammad/data/models/__init__.py +2 -7
- hammad/data/sql/__init__.py +1 -1
- hammad/data/sql/database.py +71 -73
- hammad/data/sql/types.py +37 -51
- hammad/formatting/__init__.py +2 -1
- hammad/formatting/json/converters.py +2 -2
- hammad/genai/__init__.py +96 -36
- hammad/genai/agents/__init__.py +47 -1
- hammad/genai/agents/agent.py +1022 -0
- hammad/genai/agents/run.py +615 -0
- hammad/genai/agents/types/__init__.py +29 -22
- hammad/genai/agents/types/agent_context.py +13 -0
- hammad/genai/agents/types/agent_event.py +128 -0
- hammad/genai/agents/types/agent_hooks.py +220 -0
- hammad/genai/agents/types/agent_messages.py +31 -0
- hammad/genai/agents/types/agent_response.py +90 -0
- hammad/genai/agents/types/agent_stream.py +242 -0
- hammad/genai/models/__init__.py +1 -0
- hammad/genai/models/embeddings/__init__.py +39 -0
- hammad/genai/{embedding_models/embedding_model.py → models/embeddings/model.py} +45 -41
- hammad/genai/{embedding_models → models/embeddings}/run.py +10 -8
- hammad/genai/models/embeddings/types/__init__.py +37 -0
- hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_name.py +2 -4
- hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_response.py +11 -4
- hammad/genai/{embedding_models/embedding_model_request.py → models/embeddings/types/embedding_model_run_params.py} +4 -3
- hammad/genai/models/embeddings/types/embedding_model_settings.py +47 -0
- hammad/genai/models/language/__init__.py +48 -0
- hammad/genai/{language_models/language_model.py → models/language/model.py} +481 -204
- hammad/genai/{language_models → models/language}/run.py +80 -57
- hammad/genai/models/language/types/__init__.py +40 -0
- hammad/genai/models/language/types/language_model_instructor_mode.py +47 -0
- hammad/genai/models/language/types/language_model_messages.py +28 -0
- hammad/genai/{language_models/_types.py → models/language/types/language_model_name.py} +3 -40
- hammad/genai/{language_models → models/language/types}/language_model_request.py +17 -25
- hammad/genai/{language_models → models/language/types}/language_model_response.py +61 -68
- hammad/genai/{language_models → models/language/types}/language_model_response_chunk.py +8 -5
- hammad/genai/models/language/types/language_model_settings.py +89 -0
- hammad/genai/{language_models/_streaming.py → models/language/types/language_model_stream.py} +221 -243
- hammad/genai/{language_models/_utils → models/language/utils}/__init__.py +8 -11
- hammad/genai/models/language/utils/requests.py +421 -0
- hammad/genai/{language_models/_utils/_structured_outputs.py → models/language/utils/structured_outputs.py} +31 -20
- hammad/genai/models/model_provider.py +4 -0
- hammad/genai/{multimodal_models.py → models/multimodal.py} +4 -5
- hammad/genai/models/reranking.py +26 -0
- hammad/genai/types/__init__.py +1 -0
- hammad/genai/types/base.py +215 -0
- hammad/genai/{agents/types → types}/history.py +101 -88
- hammad/genai/{agents/types/tool.py → types/tools.py} +156 -141
- hammad/logging/logger.py +1 -1
- hammad/mcp/client/__init__.py +2 -3
- hammad/mcp/client/client.py +10 -10
- hammad/mcp/servers/__init__.py +2 -1
- hammad/service/decorators.py +1 -3
- hammad/web/models.py +1 -3
- hammad/web/search/client.py +10 -22
- {hammad_python-0.0.19.dist-info → hammad_python-0.0.20.dist-info}/METADATA +10 -2
- hammad_python-0.0.20.dist-info/RECORD +127 -0
- hammad/genai/embedding_models/__init__.py +0 -41
- hammad/genai/language_models/__init__.py +0 -35
- hammad/genai/language_models/_utils/_completions.py +0 -131
- hammad/genai/language_models/_utils/_messages.py +0 -89
- hammad/genai/language_models/_utils/_requests.py +0 -202
- hammad/genai/rerank_models.py +0 -26
- hammad_python-0.0.19.dist-info/RECORD +0 -111
- {hammad_python-0.0.19.dist-info → hammad_python-0.0.20.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.19.dist-info → hammad_python-0.0.20.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1022 @@
|
|
1
|
+
"""hammad.genai.agents.agent"""
|
2
|
+
|
3
|
+
from typing import (
|
4
|
+
Any,
|
5
|
+
Callable,
|
6
|
+
Generic,
|
7
|
+
Literal,
|
8
|
+
List,
|
9
|
+
Type,
|
10
|
+
TypeVar,
|
11
|
+
Optional,
|
12
|
+
Union,
|
13
|
+
Dict,
|
14
|
+
overload,
|
15
|
+
TYPE_CHECKING,
|
16
|
+
)
|
17
|
+
from pydantic import BaseModel, Field, create_model
|
18
|
+
from dataclasses import dataclass, field
|
19
|
+
from enum import Enum
|
20
|
+
import json
|
21
|
+
|
22
|
+
from ..types.base import BaseGenAIModel, BaseGenAIModelSettings
|
23
|
+
from ..models.language.model import LanguageModel
|
24
|
+
from ..models.language.types import (
|
25
|
+
LanguageModelResponse,
|
26
|
+
LanguageModelName,
|
27
|
+
LanguageModelInstructorMode,
|
28
|
+
)
|
29
|
+
from ..types.tools import (
|
30
|
+
Tool,
|
31
|
+
define_tool,
|
32
|
+
execute_tools_from_language_model_response,
|
33
|
+
)
|
34
|
+
from ..models.language.utils.requests import (
|
35
|
+
parse_messages_input as parse_messages,
|
36
|
+
consolidate_system_messages,
|
37
|
+
)
|
38
|
+
from ...formatting.text.converters import convert_to_text
|
39
|
+
from .types.agent_response import (
|
40
|
+
AgentResponse,
|
41
|
+
_create_agent_response_from_language_model_response,
|
42
|
+
)
|
43
|
+
from .types.agent_stream import AgentStream
|
44
|
+
from .types.agent_context import AgentContext
|
45
|
+
from .types.agent_event import AgentEvent
|
46
|
+
from .types.agent_hooks import HookManager, HookDecorator
|
47
|
+
from .types.agent_messages import AgentMessages
|
48
|
+
|
49
|
+
if TYPE_CHECKING:
|
50
|
+
pass
|
51
|
+
|
52
|
+
|
53
|
+
T = TypeVar("T")
|
54
|
+
|
55
|
+
|
56
|
+
@dataclass
|
57
|
+
class AgentSettings:
|
58
|
+
"""Settings object that controls the default behavior of an agent's run."""
|
59
|
+
|
60
|
+
max_steps: int = field(default=10)
|
61
|
+
"""The maximum amount of steps the agent can take before stopping."""
|
62
|
+
|
63
|
+
add_name_to_instructions: bool = field(default=True)
|
64
|
+
"""Whether to add the agent name to the instructions."""
|
65
|
+
|
66
|
+
context_format: Literal["json", "python", "markdown"] = field(default="json")
|
67
|
+
"""Format for context in instructions."""
|
68
|
+
|
69
|
+
# Context management settings
|
70
|
+
context_updates: Optional[
|
71
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
72
|
+
] = field(default=None)
|
73
|
+
"""When to update context ('before', 'after', or both)."""
|
74
|
+
|
75
|
+
context_confirm: bool = field(default=False)
|
76
|
+
"""Whether to confirm context updates."""
|
77
|
+
|
78
|
+
context_strategy: Literal["selective", "all"] = field(default="all")
|
79
|
+
"""Strategy for context updates."""
|
80
|
+
|
81
|
+
context_max_retries: int = field(default=3)
|
82
|
+
"""Maximum retries for context updates."""
|
83
|
+
|
84
|
+
context_confirm_instructions: Optional[str] = field(default=None)
|
85
|
+
"""Custom instructions for context confirmation."""
|
86
|
+
|
87
|
+
context_selection_instructions: Optional[str] = field(default=None)
|
88
|
+
"""Custom instructions for context selection."""
|
89
|
+
|
90
|
+
context_update_instructions: Optional[str] = field(default=None)
|
91
|
+
"""Custom instructions for context updates."""
|
92
|
+
|
93
|
+
|
94
|
+
class AgentModelSettings(BaseGenAIModelSettings):
|
95
|
+
"""Agent-specific model settings that extend the base model settings."""
|
96
|
+
|
97
|
+
instructor_mode: Optional[LanguageModelInstructorMode] = None
|
98
|
+
"""Instructor mode for structured outputs."""
|
99
|
+
|
100
|
+
max_steps: int = 10
|
101
|
+
"""Maximum number of steps the agent can take."""
|
102
|
+
|
103
|
+
add_name_to_instructions: bool = True
|
104
|
+
"""Whether to add the agent name to the instructions."""
|
105
|
+
|
106
|
+
context_format: Literal["json", "python", "markdown"] = "json"
|
107
|
+
"""Format for context in instructions."""
|
108
|
+
|
109
|
+
# Context management settings
|
110
|
+
context_updates: Optional[
|
111
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
112
|
+
] = None
|
113
|
+
"""When to update context ('before', 'after', or both)."""
|
114
|
+
|
115
|
+
context_confirm: bool = False
|
116
|
+
"""Whether to confirm context updates."""
|
117
|
+
|
118
|
+
context_strategy: Literal["selective", "all"] = "all"
|
119
|
+
"""Strategy for context updates."""
|
120
|
+
|
121
|
+
context_max_retries: int = 3
|
122
|
+
"""Maximum retries for context updates."""
|
123
|
+
|
124
|
+
context_confirm_instructions: Optional[str] = None
|
125
|
+
"""Custom instructions for context confirmation."""
|
126
|
+
|
127
|
+
context_selection_instructions: Optional[str] = None
|
128
|
+
"""Custom instructions for context selection."""
|
129
|
+
|
130
|
+
context_update_instructions: Optional[str] = None
|
131
|
+
"""Custom instructions for context updates."""
|
132
|
+
|
133
|
+
|
134
|
+
def _build_tools(tools: List[Tool] | Callable | None) -> List[Tool]:
|
135
|
+
"""Builds a list of tools from a list of tools or a callable that returns a list of tools."""
|
136
|
+
if tools is None:
|
137
|
+
return []
|
138
|
+
if callable(tools):
|
139
|
+
return [define_tool(tools)]
|
140
|
+
|
141
|
+
processed_tools = []
|
142
|
+
for tool in tools:
|
143
|
+
if not isinstance(tool, Tool):
|
144
|
+
tool = define_tool(tool)
|
145
|
+
processed_tools.append(tool)
|
146
|
+
|
147
|
+
return processed_tools
|
148
|
+
|
149
|
+
|
150
|
+
def _get_instructions(
|
151
|
+
name: str,
|
152
|
+
instructions: Optional[str],
|
153
|
+
add_name_to_instructions: bool,
|
154
|
+
) -> Optional[str]:
|
155
|
+
"""Gets the instructions for an agent."""
|
156
|
+
if add_name_to_instructions and name:
|
157
|
+
base_instructions = instructions or ""
|
158
|
+
return f"You are {name}.\n\n{base_instructions}".strip()
|
159
|
+
return instructions
|
160
|
+
|
161
|
+
|
162
|
+
def _format_context_for_instructions(
|
163
|
+
context: AgentContext | None,
|
164
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
165
|
+
) -> str:
|
166
|
+
"""Format context object for inclusion in instructions."""
|
167
|
+
if context is None:
|
168
|
+
return ""
|
169
|
+
|
170
|
+
if context_format == "json":
|
171
|
+
if isinstance(context, BaseModel):
|
172
|
+
return context.model_dump_json(indent=2)
|
173
|
+
elif isinstance(context, dict):
|
174
|
+
return json.dumps(context, indent=2)
|
175
|
+
else:
|
176
|
+
return json.dumps(str(context), indent=2)
|
177
|
+
|
178
|
+
elif context_format == "python":
|
179
|
+
if hasattr(context, "__repr__"):
|
180
|
+
return repr(context)
|
181
|
+
elif hasattr(context, "__str__"):
|
182
|
+
return str(context)
|
183
|
+
else:
|
184
|
+
return str(context)
|
185
|
+
|
186
|
+
elif context_format == "markdown":
|
187
|
+
return convert_to_text(context)
|
188
|
+
|
189
|
+
return str(context)
|
190
|
+
|
191
|
+
|
192
|
+
def _update_context_object(
|
193
|
+
context: AgentContext, updates: Dict[str, Any]
|
194
|
+
) -> AgentContext:
|
195
|
+
"""Update a context object with new values."""
|
196
|
+
if isinstance(context, BaseModel):
|
197
|
+
# For Pydantic models, create a copy with updated values
|
198
|
+
return context.model_copy(update=updates)
|
199
|
+
elif isinstance(context, dict):
|
200
|
+
# For dictionaries, update in place
|
201
|
+
updated_context = context.copy()
|
202
|
+
updated_context.update(updates)
|
203
|
+
return updated_context
|
204
|
+
else:
|
205
|
+
raise ValueError(f"Cannot update context of type {type(context)}")
|
206
|
+
|
207
|
+
|
208
|
+
class Agent(BaseGenAIModel, Generic[T]):
|
209
|
+
"""A generative AI agent that can execute tools, generate structured outputs,
|
210
|
+
and maintain context across multiple conversation steps.
|
211
|
+
"""
|
212
|
+
|
213
|
+
model: LanguageModelName = "openai/gpt-4o-mini"
|
214
|
+
"""The language model to use for the agent."""
|
215
|
+
|
216
|
+
name: str = "agent"
|
217
|
+
"""The name of the agent."""
|
218
|
+
|
219
|
+
description: Optional[str] = None
|
220
|
+
"""A description of the agent."""
|
221
|
+
|
222
|
+
instructions: Optional[str] = None
|
223
|
+
"""System instructions for the agent."""
|
224
|
+
|
225
|
+
tools: List[Tool] = Field(default_factory=list)
|
226
|
+
"""List of tools available to the agent."""
|
227
|
+
|
228
|
+
settings: AgentSettings = Field(default_factory=AgentSettings)
|
229
|
+
"""Agent-specific settings."""
|
230
|
+
|
231
|
+
instructor_mode: Optional[LanguageModelInstructorMode] = None
|
232
|
+
"""Instructor mode for structured outputs."""
|
233
|
+
|
234
|
+
def __init__(
|
235
|
+
self,
|
236
|
+
name: str = "agent",
|
237
|
+
instructions: Optional[str] = None,
|
238
|
+
model: Union[LanguageModel, LanguageModelName] = "openai/gpt-4o-mini",
|
239
|
+
description: Optional[str] = None,
|
240
|
+
tools: Union[List[Tool], Callable, None] = None,
|
241
|
+
settings: Optional[AgentSettings] = None,
|
242
|
+
instructor_mode: Optional[LanguageModelInstructorMode] = None,
|
243
|
+
# Context management parameters
|
244
|
+
context_updates: Optional[
|
245
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
246
|
+
] = None,
|
247
|
+
context_confirm: bool = False,
|
248
|
+
context_strategy: Literal["selective", "all"] = "all",
|
249
|
+
context_max_retries: int = 3,
|
250
|
+
context_confirm_instructions: Optional[str] = None,
|
251
|
+
context_selection_instructions: Optional[str] = None,
|
252
|
+
context_update_instructions: Optional[str] = None,
|
253
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
254
|
+
**kwargs: Any,
|
255
|
+
):
|
256
|
+
# Initialize BaseGenAIModel with basic parameters
|
257
|
+
super().__init__(
|
258
|
+
model=model if isinstance(model, str) else model.model, **kwargs
|
259
|
+
)
|
260
|
+
|
261
|
+
# Agent-specific initialization
|
262
|
+
self.name = name
|
263
|
+
self.description = description
|
264
|
+
self.tools = _build_tools(tools)
|
265
|
+
self.settings = settings or AgentSettings()
|
266
|
+
self.instructor_mode = instructor_mode
|
267
|
+
|
268
|
+
# Process instructions
|
269
|
+
self.instructions = _get_instructions(
|
270
|
+
name=name,
|
271
|
+
instructions=instructions,
|
272
|
+
add_name_to_instructions=self.settings.add_name_to_instructions,
|
273
|
+
)
|
274
|
+
|
275
|
+
# Initialize the language model
|
276
|
+
if isinstance(model, LanguageModel):
|
277
|
+
self._language_model = model
|
278
|
+
else:
|
279
|
+
self._language_model = LanguageModel(model=model, **kwargs)
|
280
|
+
|
281
|
+
# Context management settings
|
282
|
+
self.context_updates = context_updates
|
283
|
+
self.context_confirm = context_confirm
|
284
|
+
self.context_strategy = context_strategy
|
285
|
+
self.context_max_retries = context_max_retries
|
286
|
+
self.context_confirm_instructions = context_confirm_instructions
|
287
|
+
self.context_selection_instructions = context_selection_instructions
|
288
|
+
self.context_update_instructions = context_update_instructions
|
289
|
+
self.context_format = context_format
|
290
|
+
|
291
|
+
# Hook system
|
292
|
+
self.hook_manager = HookManager()
|
293
|
+
self.on = HookDecorator(self.hook_manager)
|
294
|
+
|
295
|
+
@property
|
296
|
+
def language_model(self) -> LanguageModel:
|
297
|
+
"""Get the underlying language model."""
|
298
|
+
return self._language_model
|
299
|
+
|
300
|
+
def _should_update_context(
|
301
|
+
self, context: AgentContext, timing: Literal["before", "after"]
|
302
|
+
) -> bool:
|
303
|
+
"""Determine if context should be updated based on timing and configuration."""
|
304
|
+
if not self.context_updates:
|
305
|
+
return False
|
306
|
+
|
307
|
+
if isinstance(self.context_updates, str):
|
308
|
+
return self.context_updates == timing
|
309
|
+
else:
|
310
|
+
return timing in self.context_updates
|
311
|
+
|
312
|
+
def _create_context_confirm_model(self):
|
313
|
+
"""Create IsUpdateRequired model for context confirmation."""
|
314
|
+
return create_model("IsUpdateRequired", decision=(bool, ...))
|
315
|
+
|
316
|
+
def _create_context_selection_model(self, context: AgentContext):
|
317
|
+
"""Create FieldsToUpdate model for selective context updates."""
|
318
|
+
if isinstance(context, BaseModel):
|
319
|
+
field_names = list(context.model_fields.keys())
|
320
|
+
elif isinstance(context, dict):
|
321
|
+
field_names = list(context.keys())
|
322
|
+
else:
|
323
|
+
raise ValueError(
|
324
|
+
f"Cannot create selection model for context type {type(context)}"
|
325
|
+
)
|
326
|
+
|
327
|
+
FieldEnum = Enum("FieldEnum", {name: name for name in field_names})
|
328
|
+
return create_model("FieldsToUpdate", fields=(List[FieldEnum], ...))
|
329
|
+
|
330
|
+
def _create_context_update_model(
|
331
|
+
self, context: AgentContext, field_name: str = None
|
332
|
+
):
|
333
|
+
"""Create update model for context updates."""
|
334
|
+
if field_name:
|
335
|
+
# Single field update
|
336
|
+
if isinstance(context, BaseModel):
|
337
|
+
field_type = context.model_fields[field_name].annotation
|
338
|
+
elif isinstance(context, dict):
|
339
|
+
field_type = type(context[field_name])
|
340
|
+
else:
|
341
|
+
field_type = Any
|
342
|
+
|
343
|
+
return create_model(
|
344
|
+
field_name.capitalize(), **{field_name: (field_type, ...)}
|
345
|
+
)
|
346
|
+
else:
|
347
|
+
# All fields update
|
348
|
+
return create_model("Update", updates=(Dict[str, Any], ...))
|
349
|
+
|
350
|
+
def _perform_context_update(
|
351
|
+
self,
|
352
|
+
context: AgentContext,
|
353
|
+
model: LanguageModel,
|
354
|
+
current_messages: List[Dict[str, Any]],
|
355
|
+
timing: Literal["before", "after"],
|
356
|
+
) -> AgentContext:
|
357
|
+
"""Perform context update with retries and error handling."""
|
358
|
+
updated_context = context
|
359
|
+
|
360
|
+
for attempt in range(self.context_max_retries):
|
361
|
+
try:
|
362
|
+
# Check if update is needed (if confirmation is enabled)
|
363
|
+
if self.context_confirm:
|
364
|
+
confirm_model = self._create_context_confirm_model()
|
365
|
+
confirm_instructions = f"Based on the conversation, determine if the context should be updated {timing} processing."
|
366
|
+
if self.context_confirm_instructions:
|
367
|
+
confirm_instructions += (
|
368
|
+
f"\n\n{self.context_confirm_instructions}"
|
369
|
+
)
|
370
|
+
|
371
|
+
confirm_response = model.run(
|
372
|
+
messages=current_messages
|
373
|
+
+ [{"role": "user", "content": confirm_instructions}],
|
374
|
+
type=confirm_model,
|
375
|
+
instructor_mode=self.instructor_mode,
|
376
|
+
)
|
377
|
+
|
378
|
+
if not confirm_response.output.decision:
|
379
|
+
return updated_context
|
380
|
+
|
381
|
+
# Perform the update based on strategy
|
382
|
+
if self.context_strategy == "selective":
|
383
|
+
# Get fields to update
|
384
|
+
selection_model = self._create_context_selection_model(
|
385
|
+
updated_context
|
386
|
+
)
|
387
|
+
selection_instructions = f"Select which fields in the context should be updated {timing} processing."
|
388
|
+
if self.context_selection_instructions:
|
389
|
+
selection_instructions += (
|
390
|
+
f"\n\n{self.context_selection_instructions}"
|
391
|
+
)
|
392
|
+
|
393
|
+
selection_response = model.run(
|
394
|
+
messages=current_messages
|
395
|
+
+ [{"role": "user", "content": selection_instructions}],
|
396
|
+
type=selection_model,
|
397
|
+
instructor_mode=self.instructor_mode,
|
398
|
+
)
|
399
|
+
|
400
|
+
# Update each selected field
|
401
|
+
for field_enum in selection_response.output.fields:
|
402
|
+
field_name = field_enum.value
|
403
|
+
field_model = self._create_context_update_model(
|
404
|
+
updated_context, field_name
|
405
|
+
)
|
406
|
+
field_instructions = (
|
407
|
+
f"Update the {field_name} field in the context."
|
408
|
+
)
|
409
|
+
if self.context_update_instructions:
|
410
|
+
field_instructions += (
|
411
|
+
f"\n\n{self.context_update_instructions}"
|
412
|
+
)
|
413
|
+
|
414
|
+
field_response = model.run(
|
415
|
+
messages=current_messages
|
416
|
+
+ [{"role": "user", "content": field_instructions}],
|
417
|
+
type=field_model,
|
418
|
+
instructor_mode=self.instructor_mode,
|
419
|
+
)
|
420
|
+
|
421
|
+
# Apply the update
|
422
|
+
field_updates = {
|
423
|
+
field_name: getattr(field_response.output, field_name)
|
424
|
+
}
|
425
|
+
updated_context = _update_context_object(
|
426
|
+
updated_context, field_updates
|
427
|
+
)
|
428
|
+
|
429
|
+
else: # strategy == "all"
|
430
|
+
# Update all fields at once
|
431
|
+
update_model = self._create_context_update_model(updated_context)
|
432
|
+
update_instructions = f"Update the context {timing} processing."
|
433
|
+
if self.context_update_instructions:
|
434
|
+
update_instructions += f"\n\n{self.context_update_instructions}"
|
435
|
+
|
436
|
+
update_response = model.run(
|
437
|
+
messages=current_messages
|
438
|
+
+ [{"role": "user", "content": update_instructions}],
|
439
|
+
type=update_model,
|
440
|
+
instructor_mode=self.instructor_mode,
|
441
|
+
)
|
442
|
+
|
443
|
+
# Apply the updates
|
444
|
+
updated_context = _update_context_object(
|
445
|
+
updated_context, update_response.output.updates
|
446
|
+
)
|
447
|
+
|
448
|
+
# Trigger context update hooks
|
449
|
+
self.hook_manager.trigger_hooks("context_update", updated_context)
|
450
|
+
|
451
|
+
return updated_context
|
452
|
+
|
453
|
+
except Exception as e:
|
454
|
+
if attempt == self.context_max_retries - 1:
|
455
|
+
# Last attempt failed, return original context
|
456
|
+
return updated_context
|
457
|
+
# Continue to next attempt
|
458
|
+
continue
|
459
|
+
|
460
|
+
return updated_context
|
461
|
+
|
462
|
+
def _format_messages_with_context(
|
463
|
+
self, messages: List[Dict[str, Any]], context: Optional[AgentContext] = None
|
464
|
+
) -> List[Dict[str, Any]]:
|
465
|
+
"""Format messages with instructions and context."""
|
466
|
+
formatted_messages = messages.copy()
|
467
|
+
|
468
|
+
if self.instructions:
|
469
|
+
system_content = self.instructions
|
470
|
+
|
471
|
+
# Add context if provided
|
472
|
+
if context is not None:
|
473
|
+
context_str = _format_context_for_instructions(
|
474
|
+
context, self.context_format
|
475
|
+
)
|
476
|
+
if context_str:
|
477
|
+
system_content += f"\n\nContext:\n{context_str}"
|
478
|
+
|
479
|
+
system_message = {"role": "system", "content": system_content}
|
480
|
+
formatted_messages = [system_message] + formatted_messages
|
481
|
+
|
482
|
+
return consolidate_system_messages(formatted_messages)
|
483
|
+
|
484
|
+
# Overloaded run methods for streaming support
|
485
|
+
@overload
|
486
|
+
def run(
|
487
|
+
self,
|
488
|
+
messages: AgentMessages,
|
489
|
+
model: Optional[Union[LanguageModel, LanguageModelName]] = None,
|
490
|
+
max_steps: Optional[int] = None,
|
491
|
+
context: Optional[AgentContext] = None,
|
492
|
+
output_type: Optional[Type[T]] = None,
|
493
|
+
*,
|
494
|
+
stream: Literal[False] = False,
|
495
|
+
**kwargs: Any,
|
496
|
+
) -> AgentResponse[T]: ...
|
497
|
+
|
498
|
+
@overload
|
499
|
+
def run(
|
500
|
+
self,
|
501
|
+
messages: AgentMessages,
|
502
|
+
model: Optional[Union[LanguageModel, LanguageModelName]] = None,
|
503
|
+
max_steps: Optional[int] = None,
|
504
|
+
context: Optional[AgentContext] = None,
|
505
|
+
output_type: Optional[Type[T]] = None,
|
506
|
+
*,
|
507
|
+
stream: Literal[True],
|
508
|
+
**kwargs: Any,
|
509
|
+
) -> AgentStream[T]: ...
|
510
|
+
|
511
|
+
def run(
|
512
|
+
self,
|
513
|
+
messages: AgentMessages,
|
514
|
+
model: Optional[Union[LanguageModel, LanguageModelName]] = None,
|
515
|
+
max_steps: Optional[int] = None,
|
516
|
+
context: Optional[AgentContext] = None,
|
517
|
+
output_type: Optional[Type[T]] = None,
|
518
|
+
stream: bool = False,
|
519
|
+
**kwargs: Any,
|
520
|
+
) -> Union[AgentResponse[T], AgentStream[T]]:
|
521
|
+
"""Runs this agent and returns a final agent response or stream.
|
522
|
+
|
523
|
+
You can override defaults assigned to this agent from this function directly.
|
524
|
+
|
525
|
+
Args:
|
526
|
+
messages: The messages to process. Can be:
|
527
|
+
- A single string: "What's the weather like?"
|
528
|
+
- A list of message dicts: [{"role": "user", "content": "Hello"}]
|
529
|
+
- A list of strings: ["Hello", "How are you?"]
|
530
|
+
model: The model to use for this run (overrides default).
|
531
|
+
- Can be a LanguageModel instance or model name string like "gpt-4"
|
532
|
+
max_steps: Maximum number of steps to execute (overrides default).
|
533
|
+
- Useful for limiting tool usage or preventing infinite loops
|
534
|
+
context: Context object for the agent (overrides default).
|
535
|
+
- Any object that provides additional context for the conversation
|
536
|
+
output_type: The expected output type (overrides default).
|
537
|
+
- Use for structured outputs: output_type=MyPydanticModel
|
538
|
+
- Defaults to str for unstructured text responses
|
539
|
+
stream: Whether to return a stream instead of a final response.
|
540
|
+
- If True, returns AgentStream for real-time processing
|
541
|
+
- If False, returns complete AgentResponse
|
542
|
+
**kwargs: Additional keyword arguments passed to the language model.
|
543
|
+
- Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
|
544
|
+
|
545
|
+
Returns:
|
546
|
+
AgentResponse or AgentStream depending on stream parameter.
|
547
|
+
- AgentResponse: Contains final output, steps taken, and metadata
|
548
|
+
- AgentStream: Iterator yielding intermediate steps and final result
|
549
|
+
|
550
|
+
Examples:
|
551
|
+
Basic text conversation:
|
552
|
+
>>> agent = Agent()
|
553
|
+
>>> response = agent.run("Hello, how are you?")
|
554
|
+
>>> print(response.output)
|
555
|
+
"Hello! I'm doing well, thank you for asking."
|
556
|
+
|
557
|
+
With custom model and parameters:
|
558
|
+
>>> response = agent.run(
|
559
|
+
... messages="Explain quantum computing",
|
560
|
+
... model="gpt-4",
|
561
|
+
... max_steps=5,
|
562
|
+
... temperature=0.3
|
563
|
+
... )
|
564
|
+
|
565
|
+
Structured output with Pydantic model:
|
566
|
+
>>> from pydantic import BaseModel
|
567
|
+
>>> class Summary(BaseModel):
|
568
|
+
... title: str
|
569
|
+
... key_points: List[str]
|
570
|
+
>>> response = agent.run(
|
571
|
+
... "Summarize the benefits of renewable energy",
|
572
|
+
... output_type=Summary
|
573
|
+
... )
|
574
|
+
>>> print(response.output.title)
|
575
|
+
>>> print(response.output.key_points)
|
576
|
+
|
577
|
+
Streaming for real-time results:
|
578
|
+
>>> stream = agent.run(
|
579
|
+
... "Write a long story about space exploration",
|
580
|
+
... stream=True
|
581
|
+
... )
|
582
|
+
>>> for chunk in stream:
|
583
|
+
... print(chunk.output, end="", flush=True)
|
584
|
+
|
585
|
+
With context for additional information:
|
586
|
+
>>> context = {"user_preferences": "technical explanations"}
|
587
|
+
>>> response = agent.run(
|
588
|
+
... "How does machine learning work?",
|
589
|
+
... context=context
|
590
|
+
... )
|
591
|
+
"""
|
592
|
+
# Handle streaming
|
593
|
+
if stream:
|
594
|
+
return AgentStream(
|
595
|
+
agent=self,
|
596
|
+
messages=messages,
|
597
|
+
model=model,
|
598
|
+
max_steps=max_steps,
|
599
|
+
context=context,
|
600
|
+
output_type=output_type,
|
601
|
+
stream=stream,
|
602
|
+
**kwargs,
|
603
|
+
)
|
604
|
+
|
605
|
+
# Use provided model or default
|
606
|
+
if model is None:
|
607
|
+
working_model = self.language_model
|
608
|
+
elif isinstance(model, str):
|
609
|
+
working_model = LanguageModel(model=model)
|
610
|
+
else:
|
611
|
+
working_model = model
|
612
|
+
|
613
|
+
# Use provided max_steps or default
|
614
|
+
if max_steps is None:
|
615
|
+
max_steps = self.settings.max_steps
|
616
|
+
|
617
|
+
# Parse initial messages
|
618
|
+
parsed_messages = parse_messages(messages)
|
619
|
+
current_messages = parsed_messages.copy()
|
620
|
+
steps: List[LanguageModelResponse[str]] = []
|
621
|
+
|
622
|
+
# RUN MAIN AGENTIC LOOP
|
623
|
+
for step in range(max_steps):
|
624
|
+
# Format messages with instructions and context for first step only
|
625
|
+
if step == 0:
|
626
|
+
formatted_messages = self._format_messages_with_context(
|
627
|
+
messages=current_messages,
|
628
|
+
context=context,
|
629
|
+
)
|
630
|
+
else:
|
631
|
+
formatted_messages = current_messages
|
632
|
+
|
633
|
+
# Prepare kwargs for language model
|
634
|
+
model_kwargs = kwargs.copy()
|
635
|
+
if output_type:
|
636
|
+
model_kwargs["type"] = output_type
|
637
|
+
if self.instructor_mode:
|
638
|
+
model_kwargs["instructor_mode"] = self.instructor_mode
|
639
|
+
|
640
|
+
# Get language model response
|
641
|
+
response = working_model.run(
|
642
|
+
messages=formatted_messages,
|
643
|
+
tools=[tool.model_dump() for tool in self.tools]
|
644
|
+
if self.tools
|
645
|
+
else None,
|
646
|
+
**model_kwargs,
|
647
|
+
)
|
648
|
+
|
649
|
+
# Check if response has tool calls
|
650
|
+
if response.has_tool_calls():
|
651
|
+
# Add response to message history (with tool calls)
|
652
|
+
current_messages.append(response.to_message())
|
653
|
+
|
654
|
+
# Execute tools and add their responses to messages
|
655
|
+
tool_responses = execute_tools_from_language_model_response(
|
656
|
+
tools=self.tools, response=response
|
657
|
+
)
|
658
|
+
# Add tool responses to message history
|
659
|
+
for tool_resp in tool_responses:
|
660
|
+
current_messages.append(tool_resp.to_dict())
|
661
|
+
|
662
|
+
# This is not the final step, add to steps
|
663
|
+
steps.append(response)
|
664
|
+
else:
|
665
|
+
# No tool calls - this is the final step
|
666
|
+
return _create_agent_response_from_language_model_response(
|
667
|
+
response=response, steps=steps, context=context
|
668
|
+
)
|
669
|
+
|
670
|
+
# Max steps reached - return last response
|
671
|
+
if steps:
|
672
|
+
final_response = steps[-1]
|
673
|
+
else:
|
674
|
+
# No steps taken, make a final call
|
675
|
+
final_response = working_model.run(
|
676
|
+
messages=self._format_messages_with_context(
|
677
|
+
messages=current_messages,
|
678
|
+
context=context,
|
679
|
+
),
|
680
|
+
**model_kwargs,
|
681
|
+
)
|
682
|
+
|
683
|
+
return _create_agent_response_from_language_model_response(
|
684
|
+
response=final_response, steps=steps, context=context
|
685
|
+
)
|
686
|
+
|
687
|
+
async def async_run(
|
688
|
+
self,
|
689
|
+
messages: AgentMessages,
|
690
|
+
model: Optional[Union[LanguageModel, LanguageModelName]] = None,
|
691
|
+
max_steps: Optional[int] = None,
|
692
|
+
context: Optional[AgentContext] = None,
|
693
|
+
output_type: Optional[Type[T]] = None,
|
694
|
+
**kwargs: Any,
|
695
|
+
) -> AgentResponse[T]:
|
696
|
+
"""Runs this agent asynchronously and returns a final agent response.
|
697
|
+
|
698
|
+
You can override defaults assigned to this agent from this function directly.
|
699
|
+
This is the async version of run() for non-blocking execution.
|
700
|
+
|
701
|
+
Args:
|
702
|
+
messages: The messages to process. Can be:
|
703
|
+
- A single string: "What's the weather like?"
|
704
|
+
- A list of message dicts: [{"role": "user", "content": "Hello"}]
|
705
|
+
- A list of strings: ["Hello", "How are you?"]
|
706
|
+
model: The model to use for this run (overrides default).
|
707
|
+
- Can be a LanguageModel instance or model name string like "gpt-4"
|
708
|
+
max_steps: Maximum number of steps to execute (overrides default).
|
709
|
+
- Useful for limiting tool usage or preventing infinite loops
|
710
|
+
context: Context object for the agent (overrides default).
|
711
|
+
- Any object that provides additional context for the conversation
|
712
|
+
output_type: The expected output type (overrides default).
|
713
|
+
- Use for structured outputs: output_type=MyPydanticModel
|
714
|
+
- Defaults to str for unstructured text responses
|
715
|
+
**kwargs: Additional keyword arguments passed to the language model.
|
716
|
+
- Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
|
717
|
+
|
718
|
+
Returns:
|
719
|
+
AgentResponse containing the final output, steps taken, and metadata.
|
720
|
+
|
721
|
+
Examples:
|
722
|
+
Basic async usage:
|
723
|
+
>>> import asyncio
|
724
|
+
>>> agent = Agent()
|
725
|
+
>>> async def main():
|
726
|
+
... response = await agent.async_run("Hello, how are you?")
|
727
|
+
... print(response.output)
|
728
|
+
>>> asyncio.run(main())
|
729
|
+
|
730
|
+
Multiple concurrent requests:
|
731
|
+
>>> async def process_multiple():
|
732
|
+
... tasks = [
|
733
|
+
... agent.async_run("What's 2+2?"),
|
734
|
+
... agent.async_run("What's the capital of France?"),
|
735
|
+
... agent.async_run("Explain photosynthesis")
|
736
|
+
... ]
|
737
|
+
... responses = await asyncio.gather(*tasks)
|
738
|
+
... return responses
|
739
|
+
|
740
|
+
With structured output:
|
741
|
+
>>> from pydantic import BaseModel
|
742
|
+
>>> class Analysis(BaseModel):
|
743
|
+
... sentiment: str
|
744
|
+
... confidence: float
|
745
|
+
>>> async def analyze_text():
|
746
|
+
... response = await agent.async_run(
|
747
|
+
... "Analyze the sentiment of: 'I love this product!'",
|
748
|
+
... output_type=Analysis
|
749
|
+
... )
|
750
|
+
... return response.output
|
751
|
+
|
752
|
+
With custom model and context:
|
753
|
+
>>> async def custom_run():
|
754
|
+
... context = {"domain": "medical", "expertise_level": "expert"}
|
755
|
+
... response = await agent.async_run(
|
756
|
+
... "Explain diabetes",
|
757
|
+
... model="gpt-4",
|
758
|
+
... context=context,
|
759
|
+
... temperature=0.2
|
760
|
+
... )
|
761
|
+
... return response.output
|
762
|
+
"""
|
763
|
+
# Use provided model or default
|
764
|
+
if model is None:
|
765
|
+
working_model = self.language_model
|
766
|
+
elif isinstance(model, str):
|
767
|
+
working_model = LanguageModel(model=model)
|
768
|
+
else:
|
769
|
+
working_model = model
|
770
|
+
|
771
|
+
# Use provided max_steps or default
|
772
|
+
if max_steps is None:
|
773
|
+
max_steps = self.settings.max_steps
|
774
|
+
|
775
|
+
# Parse initial messages
|
776
|
+
parsed_messages = parse_messages(messages)
|
777
|
+
current_messages = parsed_messages.copy()
|
778
|
+
steps: List[LanguageModelResponse[str]] = []
|
779
|
+
|
780
|
+
# RUN MAIN AGENTIC LOOP
|
781
|
+
for step in range(max_steps):
|
782
|
+
# Format messages with instructions and context for first step only
|
783
|
+
if step == 0:
|
784
|
+
formatted_messages = self._format_messages_with_context(
|
785
|
+
messages=current_messages,
|
786
|
+
context=context,
|
787
|
+
)
|
788
|
+
else:
|
789
|
+
formatted_messages = current_messages
|
790
|
+
|
791
|
+
# Prepare kwargs for language model
|
792
|
+
model_kwargs = kwargs.copy()
|
793
|
+
if output_type:
|
794
|
+
model_kwargs["type"] = output_type
|
795
|
+
if self.instructor_mode:
|
796
|
+
model_kwargs["instructor_mode"] = self.instructor_mode
|
797
|
+
|
798
|
+
# Get language model response
|
799
|
+
response = await working_model.async_run(
|
800
|
+
messages=formatted_messages,
|
801
|
+
tools=[tool.model_dump() for tool in self.tools]
|
802
|
+
if self.tools
|
803
|
+
else None,
|
804
|
+
**model_kwargs,
|
805
|
+
)
|
806
|
+
|
807
|
+
# Check if response has tool calls
|
808
|
+
if response.has_tool_calls():
|
809
|
+
# Add response to message history (with tool calls)
|
810
|
+
current_messages.append(response.to_message())
|
811
|
+
|
812
|
+
# Execute tools and add their responses to messages
|
813
|
+
tool_responses = execute_tools_from_language_model_response(
|
814
|
+
tools=self.tools, response=response
|
815
|
+
)
|
816
|
+
# Add tool responses to message history
|
817
|
+
for tool_resp in tool_responses:
|
818
|
+
current_messages.append(tool_resp.to_dict())
|
819
|
+
|
820
|
+
# This is not the final step, add to steps
|
821
|
+
steps.append(response)
|
822
|
+
else:
|
823
|
+
# No tool calls - this is the final step
|
824
|
+
return _create_agent_response_from_language_model_response(
|
825
|
+
response=response, steps=steps, context=context
|
826
|
+
)
|
827
|
+
|
828
|
+
# Max steps reached - return last response
|
829
|
+
if steps:
|
830
|
+
final_response = steps[-1]
|
831
|
+
else:
|
832
|
+
# No steps taken, make a final call
|
833
|
+
final_response = await working_model.async_run(
|
834
|
+
messages=self._format_messages_with_context(
|
835
|
+
messages=current_messages,
|
836
|
+
context=context,
|
837
|
+
),
|
838
|
+
**model_kwargs,
|
839
|
+
)
|
840
|
+
|
841
|
+
return _create_agent_response_from_language_model_response(
|
842
|
+
response=final_response, steps=steps, context=context
|
843
|
+
)
|
844
|
+
|
845
|
+
def stream(
|
846
|
+
self,
|
847
|
+
messages: AgentMessages,
|
848
|
+
model: Optional[Union[LanguageModel, LanguageModelName]] = None,
|
849
|
+
max_steps: Optional[int] = None,
|
850
|
+
context: Optional[AgentContext] = None,
|
851
|
+
output_type: Optional[Type[T]] = None,
|
852
|
+
**kwargs: Any,
|
853
|
+
) -> AgentStream[T]:
|
854
|
+
"""Create a stream that yields agent steps.
|
855
|
+
|
856
|
+
Args:
|
857
|
+
messages: The input messages to process
|
858
|
+
model: Language model to use (overrides agent's default)
|
859
|
+
max_steps: Maximum number of steps to take
|
860
|
+
context: Context object to maintain state
|
861
|
+
output_type: Type for structured output
|
862
|
+
**kwargs: Additional parameters for the language model
|
863
|
+
|
864
|
+
Returns:
|
865
|
+
An AgentStream that can be iterated over
|
866
|
+
"""
|
867
|
+
return AgentStream(
|
868
|
+
agent=self,
|
869
|
+
messages=messages,
|
870
|
+
model=model,
|
871
|
+
max_steps=max_steps,
|
872
|
+
context=context,
|
873
|
+
output_type=output_type,
|
874
|
+
stream=True,
|
875
|
+
**kwargs,
|
876
|
+
)
|
877
|
+
|
878
|
+
def iter(
|
879
|
+
self,
|
880
|
+
messages: AgentMessages,
|
881
|
+
model: Optional[Union[LanguageModel, LanguageModelName]] = None,
|
882
|
+
max_steps: Optional[int] = None,
|
883
|
+
context: Optional[AgentContext] = None,
|
884
|
+
output_type: Optional[Type[T]] = None,
|
885
|
+
**kwargs: Any,
|
886
|
+
) -> AgentStream[T]:
|
887
|
+
"""Iterate over agent steps, yielding each step response.
|
888
|
+
|
889
|
+
You can override defaults assigned to this agent from this function directly.
|
890
|
+
Returns an AgentStream that yields intermediate steps and the final result.
|
891
|
+
|
892
|
+
Args:
|
893
|
+
messages: The messages to process. Can be:
|
894
|
+
- A single string: "What's the weather like?"
|
895
|
+
- A list of message dicts: [{"role": "user", "content": "Hello"}]
|
896
|
+
- A list of strings: ["Hello", "How are you?"]
|
897
|
+
model: The model to use for this run (overrides default).
|
898
|
+
- Can be a LanguageModel instance or model name string like "gpt-4"
|
899
|
+
max_steps: Maximum number of steps to execute (overrides default).
|
900
|
+
- Useful for limiting tool usage or preventing infinite loops
|
901
|
+
context: Context object for the agent (overrides default).
|
902
|
+
- Any object that provides additional context for the conversation
|
903
|
+
output_type: The expected output type (overrides default).
|
904
|
+
- Use for structured outputs: output_type=MyPydanticModel
|
905
|
+
- Defaults to str for unstructured text responses
|
906
|
+
**kwargs: Additional keyword arguments passed to the language model.
|
907
|
+
- Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
|
908
|
+
|
909
|
+
Returns:
|
910
|
+
AgentStream that can be iterated over to get each step response,
|
911
|
+
including tool calls and intermediate reasoning steps.
|
912
|
+
|
913
|
+
Examples:
|
914
|
+
Basic iteration over steps:
|
915
|
+
>>> agent = Agent(tools=[calculator_tool])
|
916
|
+
>>> stream = agent.iter("What's 25 * 47?")
|
917
|
+
>>> for step in stream:
|
918
|
+
... print(f"Step {step.step_number}: {step.output}")
|
919
|
+
... if step.tool_calls:
|
920
|
+
... print(f"Tool calls: {len(step.tool_calls)}")
|
921
|
+
|
922
|
+
Real-time processing with streaming:
|
923
|
+
>>> stream = agent.iter("Write a poem about nature")
|
924
|
+
>>> for chunk in stream:
|
925
|
+
... if chunk.output:
|
926
|
+
... print(chunk.output, end="", flush=True)
|
927
|
+
... if chunk.is_final:
|
928
|
+
... print("\n--- Final response ---")
|
929
|
+
|
930
|
+
With structured output iteration:
|
931
|
+
>>> from pydantic import BaseModel
|
932
|
+
>>> class StepAnalysis(BaseModel):
|
933
|
+
... reasoning: str
|
934
|
+
... confidence: float
|
935
|
+
>>> stream = agent.iter(
|
936
|
+
... "Analyze this step by step: Why is the sky blue?",
|
937
|
+
... output_type=StepAnalysis
|
938
|
+
... )
|
939
|
+
>>> for step in stream:
|
940
|
+
... if step.output:
|
941
|
+
... print(f"Reasoning: {step.output.reasoning}")
|
942
|
+
... print(f"Confidence: {step.output.confidence}")
|
943
|
+
|
944
|
+
Processing with custom model and context:
|
945
|
+
>>> context = {"domain": "science", "depth": "detailed"}
|
946
|
+
>>> stream = agent.iter(
|
947
|
+
... "Explain quantum entanglement",
|
948
|
+
... model="gpt-4",
|
949
|
+
... context=context,
|
950
|
+
... max_steps=3,
|
951
|
+
... temperature=0.1
|
952
|
+
... )
|
953
|
+
>>> results = []
|
954
|
+
>>> for step in stream:
|
955
|
+
... results.append(step.output)
|
956
|
+
... if step.is_final:
|
957
|
+
... break
|
958
|
+
|
959
|
+
Error handling during iteration:
|
960
|
+
>>> try:
|
961
|
+
... stream = agent.iter("Complex calculation task")
|
962
|
+
... for step in stream:
|
963
|
+
... if step.error:
|
964
|
+
... print(f"Error in step: {step.error}")
|
965
|
+
... else:
|
966
|
+
... print(f"Step result: {step.output}")
|
967
|
+
... except Exception as e:
|
968
|
+
... print(f"Stream error: {e}")
|
969
|
+
"""
|
970
|
+
return AgentStream(
|
971
|
+
agent=self,
|
972
|
+
messages=messages,
|
973
|
+
model=model,
|
974
|
+
max_steps=max_steps,
|
975
|
+
context=context,
|
976
|
+
output_type=output_type,
|
977
|
+
stream=True,
|
978
|
+
**kwargs,
|
979
|
+
)
|
980
|
+
|
981
|
+
def async_iter(
|
982
|
+
self,
|
983
|
+
messages: AgentMessages,
|
984
|
+
model: Optional[Union[LanguageModel, LanguageModelName]] = None,
|
985
|
+
max_steps: Optional[int] = None,
|
986
|
+
context: Optional[AgentContext] = None,
|
987
|
+
output_type: Optional[Type[T]] = None,
|
988
|
+
**kwargs: Any,
|
989
|
+
) -> AgentStream[T]:
|
990
|
+
"""Async iterate over agent steps, yielding each step response.
|
991
|
+
|
992
|
+
Args:
|
993
|
+
messages: The input messages to process
|
994
|
+
model: Language model to use (overrides agent's default)
|
995
|
+
max_steps: Maximum number of steps to take
|
996
|
+
context: Context object to maintain state
|
997
|
+
output_type: Type for structured output
|
998
|
+
**kwargs: Additional parameters for the language model
|
999
|
+
|
1000
|
+
Returns:
|
1001
|
+
An AgentStream that can be iterated over asynchronously
|
1002
|
+
"""
|
1003
|
+
return AgentStream(
|
1004
|
+
agent=self,
|
1005
|
+
messages=messages,
|
1006
|
+
model=model,
|
1007
|
+
max_steps=max_steps,
|
1008
|
+
context=context,
|
1009
|
+
output_type=output_type,
|
1010
|
+
stream=True,
|
1011
|
+
**kwargs,
|
1012
|
+
)
|
1013
|
+
|
1014
|
+
|
1015
|
+
__all__ = [
|
1016
|
+
"Agent",
|
1017
|
+
"AgentSettings",
|
1018
|
+
"AgentModelSettings",
|
1019
|
+
"AgentEvent",
|
1020
|
+
"HookManager",
|
1021
|
+
"HookDecorator",
|
1022
|
+
]
|