hammad-python 0.0.19__py3-none-any.whl → 0.0.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/__init__.py +7 -137
- hammad/_internal.py +1 -0
- hammad/cli/_runner.py +8 -8
- hammad/cli/plugins.py +55 -26
- hammad/cli/styles/utils.py +16 -8
- hammad/data/__init__.py +1 -5
- hammad/data/collections/__init__.py +2 -3
- hammad/data/collections/collection.py +41 -22
- hammad/data/collections/indexes/__init__.py +1 -1
- hammad/data/collections/indexes/qdrant/__init__.py +1 -1
- hammad/data/collections/indexes/qdrant/index.py +106 -118
- hammad/data/collections/indexes/qdrant/settings.py +14 -14
- hammad/data/collections/indexes/qdrant/utils.py +28 -38
- hammad/data/collections/indexes/tantivy/__init__.py +1 -1
- hammad/data/collections/indexes/tantivy/index.py +57 -59
- hammad/data/collections/indexes/tantivy/settings.py +8 -19
- hammad/data/collections/indexes/tantivy/utils.py +28 -52
- hammad/data/models/__init__.py +2 -7
- hammad/data/sql/__init__.py +1 -1
- hammad/data/sql/database.py +71 -73
- hammad/data/sql/types.py +37 -51
- hammad/formatting/__init__.py +2 -1
- hammad/formatting/json/converters.py +2 -2
- hammad/genai/__init__.py +96 -36
- hammad/genai/agents/__init__.py +47 -1
- hammad/genai/agents/agent.py +1298 -0
- hammad/genai/agents/run.py +615 -0
- hammad/genai/agents/types/__init__.py +29 -22
- hammad/genai/agents/types/agent_context.py +13 -0
- hammad/genai/agents/types/agent_event.py +128 -0
- hammad/genai/agents/types/agent_hooks.py +220 -0
- hammad/genai/agents/types/agent_messages.py +31 -0
- hammad/genai/agents/types/agent_response.py +122 -0
- hammad/genai/agents/types/agent_stream.py +318 -0
- hammad/genai/models/__init__.py +1 -0
- hammad/genai/models/embeddings/__init__.py +39 -0
- hammad/genai/{embedding_models/embedding_model.py → models/embeddings/model.py} +45 -41
- hammad/genai/{embedding_models → models/embeddings}/run.py +10 -8
- hammad/genai/models/embeddings/types/__init__.py +37 -0
- hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_name.py +2 -4
- hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_response.py +11 -4
- hammad/genai/{embedding_models/embedding_model_request.py → models/embeddings/types/embedding_model_run_params.py} +4 -3
- hammad/genai/models/embeddings/types/embedding_model_settings.py +47 -0
- hammad/genai/models/language/__init__.py +48 -0
- hammad/genai/{language_models/language_model.py → models/language/model.py} +496 -204
- hammad/genai/{language_models → models/language}/run.py +80 -57
- hammad/genai/models/language/types/__init__.py +40 -0
- hammad/genai/models/language/types/language_model_instructor_mode.py +47 -0
- hammad/genai/models/language/types/language_model_messages.py +28 -0
- hammad/genai/{language_models/_types.py → models/language/types/language_model_name.py} +3 -40
- hammad/genai/{language_models → models/language/types}/language_model_request.py +17 -25
- hammad/genai/{language_models → models/language/types}/language_model_response.py +60 -67
- hammad/genai/{language_models → models/language/types}/language_model_response_chunk.py +8 -5
- hammad/genai/models/language/types/language_model_settings.py +89 -0
- hammad/genai/{language_models/_streaming.py → models/language/types/language_model_stream.py} +221 -243
- hammad/genai/{language_models/_utils → models/language/utils}/__init__.py +8 -11
- hammad/genai/models/language/utils/requests.py +421 -0
- hammad/genai/{language_models/_utils/_structured_outputs.py → models/language/utils/structured_outputs.py} +31 -20
- hammad/genai/models/model_provider.py +4 -0
- hammad/genai/{multimodal_models.py → models/multimodal.py} +4 -5
- hammad/genai/models/reranking.py +26 -0
- hammad/genai/types/__init__.py +1 -0
- hammad/genai/types/base.py +215 -0
- hammad/genai/{agents/types → types}/history.py +101 -88
- hammad/genai/{agents/types/tool.py → types/tools.py} +157 -140
- hammad/logging/logger.py +9 -1
- hammad/mcp/client/__init__.py +2 -3
- hammad/mcp/client/client.py +10 -10
- hammad/mcp/servers/__init__.py +2 -1
- hammad/service/decorators.py +1 -3
- hammad/web/models.py +1 -3
- hammad/web/search/client.py +10 -22
- {hammad_python-0.0.19.dist-info → hammad_python-0.0.21.dist-info}/METADATA +10 -2
- hammad_python-0.0.21.dist-info/RECORD +127 -0
- hammad/genai/embedding_models/__init__.py +0 -41
- hammad/genai/language_models/__init__.py +0 -35
- hammad/genai/language_models/_utils/_completions.py +0 -131
- hammad/genai/language_models/_utils/_messages.py +0 -89
- hammad/genai/language_models/_utils/_requests.py +0 -202
- hammad/genai/rerank_models.py +0 -26
- hammad_python-0.0.19.dist-info/RECORD +0 -111
- {hammad_python-0.0.19.dist-info → hammad_python-0.0.21.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.19.dist-info → hammad_python-0.0.21.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,615 @@
|
|
1
|
+
"""hammad.genai.agents.run
|
2
|
+
|
3
|
+
Standalone functions for running agents with full parameter typing.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from typing import (
|
7
|
+
Any,
|
8
|
+
Callable,
|
9
|
+
List,
|
10
|
+
TypeVar,
|
11
|
+
Union,
|
12
|
+
Optional,
|
13
|
+
Type,
|
14
|
+
overload,
|
15
|
+
Dict,
|
16
|
+
TYPE_CHECKING,
|
17
|
+
)
|
18
|
+
from typing_extensions import Literal
|
19
|
+
|
20
|
+
|
21
|
+
if TYPE_CHECKING:
|
22
|
+
from ..models.language.model import LanguageModel
|
23
|
+
from ..models.language.types import (
|
24
|
+
LanguageModelName,
|
25
|
+
LanguageModelInstructorMode,
|
26
|
+
)
|
27
|
+
from .types.agent_response import AgentResponse
|
28
|
+
from .types.agent_stream import AgentStream
|
29
|
+
from .types.agent_context import AgentContext
|
30
|
+
from .types.agent_messages import AgentMessages
|
31
|
+
from ..types.tools import Tool
|
32
|
+
from httpx import Timeout
|
33
|
+
|
34
|
+
|
35
|
+
from .agent import Agent, AgentSettings
|
36
|
+
|
37
|
+
|
38
|
+
__all__ = [
|
39
|
+
"run_agent",
|
40
|
+
"async_run_agent",
|
41
|
+
"run_agent_iter",
|
42
|
+
"async_run_agent_iter",
|
43
|
+
]
|
44
|
+
|
45
|
+
T = TypeVar("T")
|
46
|
+
|
47
|
+
|
48
|
+
# Overloads for run_agent - non-streaming
|
49
|
+
@overload
|
50
|
+
def run_agent(
|
51
|
+
messages: "AgentMessages",
|
52
|
+
*,
|
53
|
+
# Agent settings
|
54
|
+
name: str = "agent",
|
55
|
+
instructions: Optional[str] = None,
|
56
|
+
description: Optional[str] = None,
|
57
|
+
tools: Union[List["Tool"], Callable, None] = None,
|
58
|
+
settings: Optional[AgentSettings] = None,
|
59
|
+
# Context management
|
60
|
+
context: Optional["AgentContext"] = None,
|
61
|
+
context_updates: Optional[
|
62
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
63
|
+
] = None,
|
64
|
+
context_confirm: bool = False,
|
65
|
+
context_strategy: Literal["selective", "all"] = "all",
|
66
|
+
context_max_retries: int = 3,
|
67
|
+
context_confirm_instructions: Optional[str] = None,
|
68
|
+
context_selection_instructions: Optional[str] = None,
|
69
|
+
context_update_instructions: Optional[str] = None,
|
70
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
71
|
+
# Model settings
|
72
|
+
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
73
|
+
max_steps: Optional[int] = None,
|
74
|
+
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
75
|
+
# LM settings
|
76
|
+
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
77
|
+
temperature: Optional[float] = None,
|
78
|
+
top_p: Optional[float] = None,
|
79
|
+
max_tokens: Optional[int] = None,
|
80
|
+
presence_penalty: Optional[float] = None,
|
81
|
+
frequency_penalty: Optional[float] = None,
|
82
|
+
seed: Optional[int] = None,
|
83
|
+
user: Optional[str] = None,
|
84
|
+
) -> "AgentResponse[str]": ...
|
85
|
+
|
86
|
+
|
87
|
+
@overload
|
88
|
+
def run_agent(
|
89
|
+
messages: "AgentMessages",
|
90
|
+
*,
|
91
|
+
output_type: Type[T],
|
92
|
+
# Agent settings
|
93
|
+
name: str = "agent",
|
94
|
+
instructions: Optional[str] = None,
|
95
|
+
description: Optional[str] = None,
|
96
|
+
tools: Union[List["Tool"], Callable, None] = None,
|
97
|
+
settings: Optional[AgentSettings] = None,
|
98
|
+
# Context management
|
99
|
+
context: Optional["AgentContext"] = None,
|
100
|
+
context_updates: Optional[
|
101
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
102
|
+
] = None,
|
103
|
+
context_confirm: bool = False,
|
104
|
+
context_strategy: Literal["selective", "all"] = "all",
|
105
|
+
context_max_retries: int = 3,
|
106
|
+
context_confirm_instructions: Optional[str] = None,
|
107
|
+
context_selection_instructions: Optional[str] = None,
|
108
|
+
context_update_instructions: Optional[str] = None,
|
109
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
110
|
+
# Model settings
|
111
|
+
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
112
|
+
max_steps: Optional[int] = None,
|
113
|
+
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
114
|
+
# LM settings
|
115
|
+
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
116
|
+
temperature: Optional[float] = None,
|
117
|
+
top_p: Optional[float] = None,
|
118
|
+
max_tokens: Optional[int] = None,
|
119
|
+
presence_penalty: Optional[float] = None,
|
120
|
+
frequency_penalty: Optional[float] = None,
|
121
|
+
seed: Optional[int] = None,
|
122
|
+
user: Optional[str] = None,
|
123
|
+
) -> "AgentResponse[T]": ...
|
124
|
+
|
125
|
+
|
126
|
+
def run_agent(messages: "AgentMessages", **kwargs: Any) -> "AgentResponse[Any]":
|
127
|
+
"""Runs this agent and returns a final agent response or stream.
|
128
|
+
|
129
|
+
You can override defaults assigned to this agent from this function directly.
|
130
|
+
|
131
|
+
Args:
|
132
|
+
messages: The messages to process. Can be:
|
133
|
+
- A single string: "What's the weather like?"
|
134
|
+
- A list of message dicts: [{"role": "user", "content": "Hello"}]
|
135
|
+
- A list of strings: ["Hello", "How are you?"]
|
136
|
+
model: The model to use for this run (overrides default).
|
137
|
+
- Can be a LanguageModel instance or model name string like "gpt-4"
|
138
|
+
max_steps: Maximum number of steps to execute (overrides default).
|
139
|
+
- Useful for limiting tool usage or preventing infinite loops
|
140
|
+
context: Context object for the agent (overrides default).
|
141
|
+
- Any object that provides additional context for the conversation
|
142
|
+
output_type: The expected output type (overrides default).
|
143
|
+
- Use for structured outputs: output_type=MyPydanticModel
|
144
|
+
- Defaults to str for unstructured text responses
|
145
|
+
stream: Whether to return a stream instead of a final response.
|
146
|
+
- If True, returns AgentStream for real-time processing
|
147
|
+
- If False, returns complete AgentResponse
|
148
|
+
**kwargs: Additional keyword arguments passed to the language model.
|
149
|
+
- Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
|
150
|
+
|
151
|
+
Returns:
|
152
|
+
AgentResponse or AgentStream depending on stream parameter.
|
153
|
+
- AgentResponse: Contains final output, steps taken, and metadata
|
154
|
+
- AgentStream: Iterator yielding intermediate steps and final result
|
155
|
+
|
156
|
+
Examples:
|
157
|
+
Basic text conversation:
|
158
|
+
>>> agent = Agent()
|
159
|
+
>>> response = agent.run("Hello, how are you?")
|
160
|
+
>>> print(response.output)
|
161
|
+
"Hello! I'm doing well, thank you for asking."
|
162
|
+
|
163
|
+
With custom model and parameters:
|
164
|
+
>>> response = agent.run(
|
165
|
+
... messages="Explain quantum computing",
|
166
|
+
... model="gpt-4",
|
167
|
+
... max_steps=5,
|
168
|
+
... temperature=0.3
|
169
|
+
... )
|
170
|
+
|
171
|
+
Structured output with Pydantic model:
|
172
|
+
>>> from pydantic import BaseModel
|
173
|
+
>>> class Summary(BaseModel):
|
174
|
+
... title: str
|
175
|
+
... key_points: List[str]
|
176
|
+
>>> response = agent.run(
|
177
|
+
... "Summarize the benefits of renewable energy",
|
178
|
+
... output_type=Summary
|
179
|
+
... )
|
180
|
+
>>> print(response.output.title)
|
181
|
+
>>> print(response.output.key_points)
|
182
|
+
|
183
|
+
Streaming for real-time results:
|
184
|
+
>>> stream = agent.run(
|
185
|
+
... "Write a long story about space exploration",
|
186
|
+
... stream=True
|
187
|
+
... )
|
188
|
+
>>> for chunk in stream:
|
189
|
+
... print(chunk.output, end="", flush=True)
|
190
|
+
|
191
|
+
With context for additional information:
|
192
|
+
>>> context = {"user_preferences": "technical explanations"}
|
193
|
+
>>> response = agent.run(
|
194
|
+
... "How does machine learning work?",
|
195
|
+
... context=context
|
196
|
+
... )
|
197
|
+
"""
|
198
|
+
agent = Agent(**kwargs)
|
199
|
+
return agent.run(messages, **kwargs)
|
200
|
+
|
201
|
+
|
202
|
+
# Overloads for async_run_agent
|
203
|
+
@overload
|
204
|
+
async def async_run_agent(
|
205
|
+
messages: "AgentMessages",
|
206
|
+
*,
|
207
|
+
# Agent settings
|
208
|
+
name: str = "agent",
|
209
|
+
instructions: Optional[str] = None,
|
210
|
+
description: Optional[str] = None,
|
211
|
+
tools: Union[List["Tool"], Callable, None] = None,
|
212
|
+
settings: Optional[AgentSettings] = None,
|
213
|
+
# Context management
|
214
|
+
context: Optional["AgentContext"] = None,
|
215
|
+
context_updates: Optional[
|
216
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
217
|
+
] = None,
|
218
|
+
context_confirm: bool = False,
|
219
|
+
context_strategy: Literal["selective", "all"] = "all",
|
220
|
+
context_max_retries: int = 3,
|
221
|
+
context_confirm_instructions: Optional[str] = None,
|
222
|
+
context_selection_instructions: Optional[str] = None,
|
223
|
+
context_update_instructions: Optional[str] = None,
|
224
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
225
|
+
# Model settings
|
226
|
+
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
227
|
+
max_steps: Optional[int] = None,
|
228
|
+
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
229
|
+
# LM settings
|
230
|
+
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
231
|
+
temperature: Optional[float] = None,
|
232
|
+
top_p: Optional[float] = None,
|
233
|
+
max_tokens: Optional[int] = None,
|
234
|
+
presence_penalty: Optional[float] = None,
|
235
|
+
frequency_penalty: Optional[float] = None,
|
236
|
+
seed: Optional[int] = None,
|
237
|
+
user: Optional[str] = None,
|
238
|
+
) -> "AgentResponse[str]": ...
|
239
|
+
|
240
|
+
|
241
|
+
@overload
|
242
|
+
async def async_run_agent(
|
243
|
+
messages: "AgentMessages",
|
244
|
+
*,
|
245
|
+
output_type: Type[T],
|
246
|
+
# Agent settings
|
247
|
+
name: str = "agent",
|
248
|
+
instructions: Optional[str] = None,
|
249
|
+
description: Optional[str] = None,
|
250
|
+
tools: Union[List["Tool"], Callable, None] = None,
|
251
|
+
settings: Optional[AgentSettings] = None,
|
252
|
+
# Context management
|
253
|
+
context: Optional["AgentContext"] = None,
|
254
|
+
context_updates: Optional[
|
255
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
256
|
+
] = None,
|
257
|
+
context_confirm: bool = False,
|
258
|
+
context_strategy: Literal["selective", "all"] = "all",
|
259
|
+
context_max_retries: int = 3,
|
260
|
+
context_confirm_instructions: Optional[str] = None,
|
261
|
+
context_selection_instructions: Optional[str] = None,
|
262
|
+
context_update_instructions: Optional[str] = None,
|
263
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
264
|
+
# Model settings
|
265
|
+
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
266
|
+
max_steps: Optional[int] = None,
|
267
|
+
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
268
|
+
# LM settings
|
269
|
+
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
270
|
+
temperature: Optional[float] = None,
|
271
|
+
top_p: Optional[float] = None,
|
272
|
+
max_tokens: Optional[int] = None,
|
273
|
+
presence_penalty: Optional[float] = None,
|
274
|
+
frequency_penalty: Optional[float] = None,
|
275
|
+
seed: Optional[int] = None,
|
276
|
+
user: Optional[str] = None,
|
277
|
+
) -> "AgentResponse[T]": ...
|
278
|
+
|
279
|
+
|
280
|
+
async def async_run_agent(
|
281
|
+
messages: "AgentMessages", **kwargs: Any
|
282
|
+
) -> "AgentResponse[Any]":
|
283
|
+
"""Runs this agent asynchronously and returns a final agent response.
|
284
|
+
|
285
|
+
You can override defaults assigned to this agent from this function directly.
|
286
|
+
This is the async version of run() for non-blocking execution.
|
287
|
+
|
288
|
+
Args:
|
289
|
+
messages: The messages to process. Can be:
|
290
|
+
- A single string: "What's the weather like?"
|
291
|
+
- A list of message dicts: [{"role": "user", "content": "Hello"}]
|
292
|
+
- A list of strings: ["Hello", "How are you?"]
|
293
|
+
model: The model to use for this run (overrides default).
|
294
|
+
- Can be a LanguageModel instance or model name string like "gpt-4"
|
295
|
+
max_steps: Maximum number of steps to execute (overrides default).
|
296
|
+
- Useful for limiting tool usage or preventing infinite loops
|
297
|
+
context: Context object for the agent (overrides default).
|
298
|
+
- Any object that provides additional context for the conversation
|
299
|
+
output_type: The expected output type (overrides default).
|
300
|
+
- Use for structured outputs: output_type=MyPydanticModel
|
301
|
+
- Defaults to str for unstructured text responses
|
302
|
+
**kwargs: Additional keyword arguments passed to the language model.
|
303
|
+
- Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
|
304
|
+
|
305
|
+
Returns:
|
306
|
+
AgentResponse containing the final output, steps taken, and metadata.
|
307
|
+
|
308
|
+
Examples:
|
309
|
+
Basic async usage:
|
310
|
+
>>> import asyncio
|
311
|
+
>>> agent = Agent()
|
312
|
+
>>> async def main():
|
313
|
+
... response = await agent.async_run("Hello, how are you?")
|
314
|
+
... print(response.output)
|
315
|
+
>>> asyncio.run(main())
|
316
|
+
|
317
|
+
Multiple concurrent requests:
|
318
|
+
>>> async def process_multiple():
|
319
|
+
... tasks = [
|
320
|
+
... agent.async_run("What's 2+2?"),
|
321
|
+
... agent.async_run("What's the capital of France?"),
|
322
|
+
... agent.async_run("Explain photosynthesis")
|
323
|
+
... ]
|
324
|
+
... responses = await asyncio.gather(*tasks)
|
325
|
+
... return responses
|
326
|
+
|
327
|
+
With structured output:
|
328
|
+
>>> from pydantic import BaseModel
|
329
|
+
>>> class Analysis(BaseModel):
|
330
|
+
... sentiment: str
|
331
|
+
... confidence: float
|
332
|
+
>>> async def analyze_text():
|
333
|
+
... response = await agent.async_run(
|
334
|
+
... "Analyze the sentiment of: 'I love this product!'",
|
335
|
+
... output_type=Analysis
|
336
|
+
... )
|
337
|
+
... return response.output
|
338
|
+
|
339
|
+
With custom model and context:
|
340
|
+
>>> async def custom_run():
|
341
|
+
... context = {"domain": "medical", "expertise_level": "expert"}
|
342
|
+
... response = await agent.async_run(
|
343
|
+
... "Explain diabetes",
|
344
|
+
... model="gpt-4",
|
345
|
+
... context=context,
|
346
|
+
... temperature=0.2
|
347
|
+
... )
|
348
|
+
... return response.output
|
349
|
+
"""
|
350
|
+
agent = Agent(**kwargs)
|
351
|
+
return await agent.async_run(messages, **kwargs)
|
352
|
+
|
353
|
+
|
354
|
+
# Overloads for run_agent_iter
|
355
|
+
@overload
|
356
|
+
def run_agent_iter(
|
357
|
+
messages: "AgentMessages",
|
358
|
+
*,
|
359
|
+
# Agent settings
|
360
|
+
name: str = "agent",
|
361
|
+
instructions: Optional[str] = None,
|
362
|
+
description: Optional[str] = None,
|
363
|
+
tools: Union[List["Tool"], Callable, None] = None,
|
364
|
+
settings: Optional[AgentSettings] = None,
|
365
|
+
# Context management
|
366
|
+
context: Optional["AgentContext"] = None,
|
367
|
+
context_updates: Optional[
|
368
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
369
|
+
] = None,
|
370
|
+
context_confirm: bool = False,
|
371
|
+
context_strategy: Literal["selective", "all"] = "all",
|
372
|
+
context_max_retries: int = 3,
|
373
|
+
context_confirm_instructions: Optional[str] = None,
|
374
|
+
context_selection_instructions: Optional[str] = None,
|
375
|
+
context_update_instructions: Optional[str] = None,
|
376
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
377
|
+
# Model settings
|
378
|
+
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
379
|
+
max_steps: Optional[int] = None,
|
380
|
+
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
381
|
+
# LM settings
|
382
|
+
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
383
|
+
temperature: Optional[float] = None,
|
384
|
+
top_p: Optional[float] = None,
|
385
|
+
max_tokens: Optional[int] = None,
|
386
|
+
presence_penalty: Optional[float] = None,
|
387
|
+
frequency_penalty: Optional[float] = None,
|
388
|
+
seed: Optional[int] = None,
|
389
|
+
user: Optional[str] = None,
|
390
|
+
) -> "AgentStream[str]": ...
|
391
|
+
|
392
|
+
|
393
|
+
@overload
|
394
|
+
def run_agent_iter(
|
395
|
+
messages: "AgentMessages",
|
396
|
+
*,
|
397
|
+
output_type: Type[T],
|
398
|
+
# Agent settings
|
399
|
+
name: str = "agent",
|
400
|
+
instructions: Optional[str] = None,
|
401
|
+
description: Optional[str] = None,
|
402
|
+
tools: Union[List["Tool"], Callable, None] = None,
|
403
|
+
settings: Optional[AgentSettings] = None,
|
404
|
+
# Context management
|
405
|
+
context: Optional["AgentContext"] = None,
|
406
|
+
context_updates: Optional[
|
407
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
408
|
+
] = None,
|
409
|
+
context_confirm: bool = False,
|
410
|
+
context_strategy: Literal["selective", "all"] = "all",
|
411
|
+
context_max_retries: int = 3,
|
412
|
+
context_confirm_instructions: Optional[str] = None,
|
413
|
+
context_selection_instructions: Optional[str] = None,
|
414
|
+
context_update_instructions: Optional[str] = None,
|
415
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
416
|
+
# Model settings
|
417
|
+
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
418
|
+
max_steps: Optional[int] = None,
|
419
|
+
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
420
|
+
# LM settings
|
421
|
+
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
422
|
+
temperature: Optional[float] = None,
|
423
|
+
top_p: Optional[float] = None,
|
424
|
+
max_tokens: Optional[int] = None,
|
425
|
+
presence_penalty: Optional[float] = None,
|
426
|
+
frequency_penalty: Optional[float] = None,
|
427
|
+
seed: Optional[int] = None,
|
428
|
+
user: Optional[str] = None,
|
429
|
+
) -> "AgentStream[T]": ...
|
430
|
+
|
431
|
+
|
432
|
+
def run_agent_iter(messages: "AgentMessages", **kwargs: Any) -> "AgentStream[Any]":
|
433
|
+
"""Iterate over agent steps, yielding each step response.
|
434
|
+
|
435
|
+
You can override defaults assigned to this agent from this function directly.
|
436
|
+
Returns an AgentStream that yields intermediate steps and the final result.
|
437
|
+
|
438
|
+
Args:
|
439
|
+
messages: The messages to process. Can be:
|
440
|
+
- A single string: "What's the weather like?"
|
441
|
+
- A list of message dicts: [{"role": "user", "content": "Hello"}]
|
442
|
+
- A list of strings: ["Hello", "How are you?"]
|
443
|
+
model: The model to use for this run (overrides default).
|
444
|
+
- Can be a LanguageModel instance or model name string like "gpt-4"
|
445
|
+
max_steps: Maximum number of steps to execute (overrides default).
|
446
|
+
- Useful for limiting tool usage or preventing infinite loops
|
447
|
+
context: Context object for the agent (overrides default).
|
448
|
+
- Any object that provides additional context for the conversation
|
449
|
+
output_type: The expected output type (overrides default).
|
450
|
+
- Use for structured outputs: output_type=MyPydanticModel
|
451
|
+
- Defaults to str for unstructured text responses
|
452
|
+
**kwargs: Additional keyword arguments passed to the language model.
|
453
|
+
- Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
|
454
|
+
|
455
|
+
Returns:
|
456
|
+
AgentStream that can be iterated over to get each step response,
|
457
|
+
including tool calls and intermediate reasoning steps.
|
458
|
+
|
459
|
+
Examples:
|
460
|
+
Basic iteration over steps:
|
461
|
+
>>> agent = Agent(tools=[calculator_tool])
|
462
|
+
>>> stream = agent.iter("What's 25 * 47?")
|
463
|
+
>>> for step in stream:
|
464
|
+
... print(f"Step {step.step_number}: {step.output}")
|
465
|
+
... if step.tool_calls:
|
466
|
+
... print(f"Tool calls: {len(step.tool_calls)}")
|
467
|
+
|
468
|
+
Real-time processing with streaming:
|
469
|
+
>>> stream = agent.iter("Write a poem about nature")
|
470
|
+
>>> for chunk in stream:
|
471
|
+
... if chunk.output:
|
472
|
+
... print(chunk.output, end="", flush=True)
|
473
|
+
... if chunk.is_final:
|
474
|
+
... print("\n--- Final response ---")
|
475
|
+
|
476
|
+
With structured output iteration:
|
477
|
+
>>> from pydantic import BaseModel
|
478
|
+
>>> class StepAnalysis(BaseModel):
|
479
|
+
... reasoning: str
|
480
|
+
... confidence: float
|
481
|
+
>>> stream = agent.iter(
|
482
|
+
... "Analyze this step by step: Why is the sky blue?",
|
483
|
+
... output_type=StepAnalysis
|
484
|
+
... )
|
485
|
+
>>> for step in stream:
|
486
|
+
... if step.output:
|
487
|
+
... print(f"Reasoning: {step.output.reasoning}")
|
488
|
+
... print(f"Confidence: {step.output.confidence}")
|
489
|
+
|
490
|
+
Processing with custom model and context:
|
491
|
+
>>> context = {"domain": "science", "depth": "detailed"}
|
492
|
+
>>> stream = agent.iter(
|
493
|
+
... "Explain quantum entanglement",
|
494
|
+
... model="gpt-4",
|
495
|
+
... context=context,
|
496
|
+
... max_steps=3,
|
497
|
+
... temperature=0.1
|
498
|
+
... )
|
499
|
+
>>> results = []
|
500
|
+
>>> for step in stream:
|
501
|
+
... results.append(step.output)
|
502
|
+
... if step.is_final:
|
503
|
+
... break
|
504
|
+
|
505
|
+
Error handling during iteration:
|
506
|
+
>>> try:
|
507
|
+
... stream = agent.iter("Complex calculation task")
|
508
|
+
... for step in stream:
|
509
|
+
... if step.error:
|
510
|
+
... print(f"Error in step: {step.error}")
|
511
|
+
... else:
|
512
|
+
... print(f"Step result: {step.output}")
|
513
|
+
... except Exception as e:
|
514
|
+
... print(f"Stream error: {e}")
|
515
|
+
"""
|
516
|
+
agent = Agent(**kwargs)
|
517
|
+
return agent.run(messages, stream=True, **kwargs)
|
518
|
+
|
519
|
+
|
520
|
+
# Overloads for async_run_agent_iter
|
521
|
+
@overload
|
522
|
+
def async_run_agent_iter(
|
523
|
+
messages: "AgentMessages",
|
524
|
+
*,
|
525
|
+
# Agent settings
|
526
|
+
name: str = "agent",
|
527
|
+
instructions: Optional[str] = None,
|
528
|
+
description: Optional[str] = None,
|
529
|
+
tools: Union[List["Tool"], Callable, None] = None,
|
530
|
+
settings: Optional[AgentSettings] = None,
|
531
|
+
# Context management
|
532
|
+
context: Optional["AgentContext"] = None,
|
533
|
+
context_updates: Optional[
|
534
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
535
|
+
] = None,
|
536
|
+
context_confirm: bool = False,
|
537
|
+
context_strategy: Literal["selective", "all"] = "all",
|
538
|
+
context_max_retries: int = 3,
|
539
|
+
context_confirm_instructions: Optional[str] = None,
|
540
|
+
context_selection_instructions: Optional[str] = None,
|
541
|
+
context_update_instructions: Optional[str] = None,
|
542
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
543
|
+
# Model settings
|
544
|
+
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
545
|
+
max_steps: Optional[int] = None,
|
546
|
+
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
547
|
+
# LM settings
|
548
|
+
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
549
|
+
temperature: Optional[float] = None,
|
550
|
+
top_p: Optional[float] = None,
|
551
|
+
max_tokens: Optional[int] = None,
|
552
|
+
presence_penalty: Optional[float] = None,
|
553
|
+
frequency_penalty: Optional[float] = None,
|
554
|
+
seed: Optional[int] = None,
|
555
|
+
user: Optional[str] = None,
|
556
|
+
) -> "AgentStream[str]": ...
|
557
|
+
|
558
|
+
|
559
|
+
@overload
|
560
|
+
def async_run_agent_iter(
|
561
|
+
messages: "AgentMessages",
|
562
|
+
*,
|
563
|
+
output_type: Type[T],
|
564
|
+
# Agent settings
|
565
|
+
name: str = "agent",
|
566
|
+
instructions: Optional[str] = None,
|
567
|
+
description: Optional[str] = None,
|
568
|
+
tools: Union[List["Tool"], Callable, None] = None,
|
569
|
+
settings: Optional[AgentSettings] = None,
|
570
|
+
# Context management
|
571
|
+
context: Optional["AgentContext"] = None,
|
572
|
+
context_updates: Optional[
|
573
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
574
|
+
] = None,
|
575
|
+
context_confirm: bool = False,
|
576
|
+
context_strategy: Literal["selective", "all"] = "all",
|
577
|
+
context_max_retries: int = 3,
|
578
|
+
context_confirm_instructions: Optional[str] = None,
|
579
|
+
context_selection_instructions: Optional[str] = None,
|
580
|
+
context_update_instructions: Optional[str] = None,
|
581
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
582
|
+
# Model settings
|
583
|
+
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
584
|
+
max_steps: Optional[int] = None,
|
585
|
+
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
586
|
+
# LM settings
|
587
|
+
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
588
|
+
temperature: Optional[float] = None,
|
589
|
+
top_p: Optional[float] = None,
|
590
|
+
max_tokens: Optional[int] = None,
|
591
|
+
presence_penalty: Optional[float] = None,
|
592
|
+
frequency_penalty: Optional[float] = None,
|
593
|
+
seed: Optional[int] = None,
|
594
|
+
user: Optional[str] = None,
|
595
|
+
) -> "AgentStream[T]": ...
|
596
|
+
|
597
|
+
|
598
|
+
def async_run_agent_iter(
|
599
|
+
messages: "AgentMessages", **kwargs: Any
|
600
|
+
) -> "AgentStream[Any]":
|
601
|
+
"""Async iterate over agent steps, yielding each step response.
|
602
|
+
|
603
|
+
Args:
|
604
|
+
messages: The input messages to process
|
605
|
+
model: Language model to use (overrides agent's default)
|
606
|
+
max_steps: Maximum number of steps to take
|
607
|
+
context: Context object to maintain state
|
608
|
+
output_type: Type for structured output
|
609
|
+
**kwargs: Additional parameters for the language model
|
610
|
+
|
611
|
+
Returns:
|
612
|
+
An AgentStream that can be iterated over asynchronously
|
613
|
+
"""
|
614
|
+
agent = Agent(**kwargs)
|
615
|
+
return agent.run(messages, stream=True, **kwargs)
|
@@ -1,35 +1,42 @@
|
|
1
|
-
"""hammad.genai.types
|
2
|
-
|
3
|
-
Contains functional types usable with various components within
|
4
|
-
the `hammad.genai` module."""
|
1
|
+
"""hammad.genai.agents.types"""
|
5
2
|
|
6
3
|
from typing import TYPE_CHECKING
|
7
4
|
from ...._internal import create_getattr_importer
|
8
5
|
|
9
6
|
|
10
7
|
if TYPE_CHECKING:
|
11
|
-
from .
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
ToolResponseMessage,
|
17
|
-
function_tool,
|
8
|
+
from .agent_event import AgentEvent
|
9
|
+
from .agent_hooks import HookManager, HookDecorator
|
10
|
+
from .agent_response import (
|
11
|
+
AgentResponse,
|
12
|
+
_create_agent_response_from_language_model_response,
|
18
13
|
)
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
"
|
27
|
-
|
28
|
-
|
14
|
+
from .agent_stream import AgentStream, AgentResponseChunk
|
15
|
+
from .agent_context import AgentContext
|
16
|
+
from .agent_messages import AgentMessages
|
17
|
+
|
18
|
+
|
19
|
+
__all__ = [
|
20
|
+
# hammad.genai.agents.types.agent_event
|
21
|
+
"AgentEvent",
|
22
|
+
# hammad.genai.agents.types.agent_hooks
|
23
|
+
"HookManager",
|
24
|
+
"HookDecorator",
|
25
|
+
# hammad.genai.agents.types.agent_response
|
26
|
+
"AgentResponse",
|
27
|
+
"_create_agent_response_from_language_model_response",
|
28
|
+
# hammad.genai.agents.types.agent_stream
|
29
|
+
"AgentStream",
|
30
|
+
"AgentResponseChunk",
|
31
|
+
# hammad.genai.agents.types.agent_context
|
32
|
+
"AgentContext",
|
33
|
+
# hammad.genai.agents.types.agent_messages
|
34
|
+
"AgentMessages",
|
35
|
+
]
|
29
36
|
|
30
37
|
|
31
38
|
__getattr__ = create_getattr_importer(__all__)
|
32
39
|
|
33
40
|
|
34
41
|
def __dir__() -> list[str]:
|
35
|
-
return __all__
|
42
|
+
return __all__
|
@@ -0,0 +1,13 @@
|
|
1
|
+
"""hammad.genai.agents.types.agent_context"""
|
2
|
+
|
3
|
+
from typing import Dict, Any, TypeVar
|
4
|
+
from pydantic import BaseModel
|
5
|
+
|
6
|
+
|
7
|
+
__all__ = [
|
8
|
+
"AgentContext",
|
9
|
+
]
|
10
|
+
|
11
|
+
|
12
|
+
AgentContext = TypeVar("AgentContext", bound=BaseModel | Dict[str, Any])
|
13
|
+
"""A context object that can be used to store information about the agent's state."""
|