hammad-python 0.0.23__py3-none-any.whl → 0.0.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/__init__.py +282 -13
- hammad/cli/plugins.py +3 -1
- hammad/genai/__init__.py +51 -0
- hammad/genai/agents/__init__.py +6 -0
- hammad/genai/agents/agent.py +640 -213
- hammad/genai/agents/run.py +50 -12
- hammad/genai/agents/types/agent_response.py +2 -1
- hammad/genai/graphs/__init__.py +113 -0
- hammad/genai/graphs/base.py +1103 -0
- hammad/genai/graphs/plugins.py +316 -0
- hammad/genai/graphs/types.py +638 -0
- hammad/genai/models/language/model.py +46 -0
- hammad/genai/models/language/run.py +22 -4
- hammad/genai/models/language/types/language_model_response.py +1 -1
- hammad/genai/types/tools.py +1 -1
- hammad/logging/logger.py +10 -0
- {hammad_python-0.0.23.dist-info → hammad_python-0.0.24.dist-info}/METADATA +5 -1
- {hammad_python-0.0.23.dist-info → hammad_python-0.0.24.dist-info}/RECORD +20 -21
- hammad/_main/__init__.py +0 -4
- hammad/_main/_fn.py +0 -20
- hammad/_main/_new.py +0 -52
- hammad/_main/_run.py +0 -50
- hammad/_main/_to.py +0 -19
- {hammad_python-0.0.23.dist-info → hammad_python-0.0.24.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.23.dist-info → hammad_python-0.0.24.dist-info}/licenses/LICENSE +0 -0
hammad/genai/agents/run.py
CHANGED
@@ -72,6 +72,9 @@ def run_agent(
|
|
72
72
|
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
73
73
|
max_steps: Optional[int] = None,
|
74
74
|
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
75
|
+
# End strategy
|
76
|
+
end_strategy: Optional[Literal["tool"]] = None,
|
77
|
+
end_tool: Optional[Callable] = None,
|
75
78
|
# LM settings
|
76
79
|
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
77
80
|
temperature: Optional[float] = None,
|
@@ -81,6 +84,8 @@ def run_agent(
|
|
81
84
|
frequency_penalty: Optional[float] = None,
|
82
85
|
seed: Optional[int] = None,
|
83
86
|
user: Optional[str] = None,
|
87
|
+
verbose: bool = False,
|
88
|
+
debug: bool = False,
|
84
89
|
) -> "AgentResponse[str]": ...
|
85
90
|
|
86
91
|
|
@@ -111,6 +116,9 @@ def run_agent(
|
|
111
116
|
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
112
117
|
max_steps: Optional[int] = None,
|
113
118
|
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
119
|
+
# End strategy
|
120
|
+
end_strategy: Optional[Literal["tool"]] = None,
|
121
|
+
end_tool: Optional[Callable] = None,
|
114
122
|
# LM settings
|
115
123
|
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
116
124
|
temperature: Optional[float] = None,
|
@@ -120,10 +128,14 @@ def run_agent(
|
|
120
128
|
frequency_penalty: Optional[float] = None,
|
121
129
|
seed: Optional[int] = None,
|
122
130
|
user: Optional[str] = None,
|
131
|
+
verbose: bool = False,
|
132
|
+
debug: bool = False,
|
123
133
|
) -> "AgentResponse[T]": ...
|
124
134
|
|
125
135
|
|
126
|
-
def run_agent(
|
136
|
+
def run_agent(
|
137
|
+
messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
|
138
|
+
) -> "AgentResponse[Any]":
|
127
139
|
"""Runs this agent and returns a final agent response or stream.
|
128
140
|
|
129
141
|
You can override defaults assigned to this agent from this function directly.
|
@@ -145,6 +157,8 @@ def run_agent(messages: "AgentMessages", **kwargs: Any) -> "AgentResponse[Any]":
|
|
145
157
|
stream: Whether to return a stream instead of a final response.
|
146
158
|
- If True, returns AgentStream for real-time processing
|
147
159
|
- If False, returns complete AgentResponse
|
160
|
+
verbose: If True, set logger to INFO level for detailed output
|
161
|
+
debug: If True, set logger to DEBUG level for maximum verbosity
|
148
162
|
**kwargs: Additional keyword arguments passed to the language model.
|
149
163
|
- Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
|
150
164
|
|
@@ -195,8 +209,8 @@ def run_agent(messages: "AgentMessages", **kwargs: Any) -> "AgentResponse[Any]":
|
|
195
209
|
... context=context
|
196
210
|
... )
|
197
211
|
"""
|
198
|
-
agent = Agent(**kwargs)
|
199
|
-
return agent.run(messages, **kwargs)
|
212
|
+
agent = Agent(verbose=verbose, debug=debug, **kwargs)
|
213
|
+
return agent.run(messages, verbose=verbose, debug=debug, **kwargs)
|
200
214
|
|
201
215
|
|
202
216
|
# Overloads for async_run_agent
|
@@ -226,6 +240,9 @@ async def async_run_agent(
|
|
226
240
|
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
227
241
|
max_steps: Optional[int] = None,
|
228
242
|
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
243
|
+
# End strategy
|
244
|
+
end_strategy: Optional[Literal["tool"]] = None,
|
245
|
+
end_tool: Optional[Callable] = None,
|
229
246
|
# LM settings
|
230
247
|
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
231
248
|
temperature: Optional[float] = None,
|
@@ -235,6 +252,8 @@ async def async_run_agent(
|
|
235
252
|
frequency_penalty: Optional[float] = None,
|
236
253
|
seed: Optional[int] = None,
|
237
254
|
user: Optional[str] = None,
|
255
|
+
verbose: bool = False,
|
256
|
+
debug: bool = False,
|
238
257
|
) -> "AgentResponse[str]": ...
|
239
258
|
|
240
259
|
|
@@ -265,6 +284,9 @@ async def async_run_agent(
|
|
265
284
|
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
266
285
|
max_steps: Optional[int] = None,
|
267
286
|
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
287
|
+
# End strategy
|
288
|
+
end_strategy: Optional[Literal["tool"]] = None,
|
289
|
+
end_tool: Optional[Callable] = None,
|
268
290
|
# LM settings
|
269
291
|
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
270
292
|
temperature: Optional[float] = None,
|
@@ -274,11 +296,13 @@ async def async_run_agent(
|
|
274
296
|
frequency_penalty: Optional[float] = None,
|
275
297
|
seed: Optional[int] = None,
|
276
298
|
user: Optional[str] = None,
|
299
|
+
verbose: bool = False,
|
300
|
+
debug: bool = False,
|
277
301
|
) -> "AgentResponse[T]": ...
|
278
302
|
|
279
303
|
|
280
304
|
async def async_run_agent(
|
281
|
-
messages: "AgentMessages", **kwargs: Any
|
305
|
+
messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
|
282
306
|
) -> "AgentResponse[Any]":
|
283
307
|
"""Runs this agent asynchronously and returns a final agent response.
|
284
308
|
|
@@ -347,8 +371,8 @@ async def async_run_agent(
|
|
347
371
|
... )
|
348
372
|
... return response.output
|
349
373
|
"""
|
350
|
-
agent = Agent(**kwargs)
|
351
|
-
return await agent.async_run(messages, **kwargs)
|
374
|
+
agent = Agent(verbose=verbose, debug=debug, **kwargs)
|
375
|
+
return await agent.async_run(messages, verbose=verbose, debug=debug, **kwargs)
|
352
376
|
|
353
377
|
|
354
378
|
# Overloads for run_agent_iter
|
@@ -378,6 +402,9 @@ def run_agent_iter(
|
|
378
402
|
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
379
403
|
max_steps: Optional[int] = None,
|
380
404
|
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
405
|
+
# End strategy
|
406
|
+
end_strategy: Optional[Literal["tool"]] = None,
|
407
|
+
end_tool: Optional[Callable] = None,
|
381
408
|
# LM settings
|
382
409
|
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
383
410
|
temperature: Optional[float] = None,
|
@@ -417,6 +444,9 @@ def run_agent_iter(
|
|
417
444
|
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
418
445
|
max_steps: Optional[int] = None,
|
419
446
|
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
447
|
+
# End strategy
|
448
|
+
end_strategy: Optional[Literal["tool"]] = None,
|
449
|
+
end_tool: Optional[Callable] = None,
|
420
450
|
# LM settings
|
421
451
|
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
422
452
|
temperature: Optional[float] = None,
|
@@ -429,7 +459,9 @@ def run_agent_iter(
|
|
429
459
|
) -> "AgentStream[T]": ...
|
430
460
|
|
431
461
|
|
432
|
-
def run_agent_iter(
|
462
|
+
def run_agent_iter(
|
463
|
+
messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
|
464
|
+
) -> "AgentStream[Any]":
|
433
465
|
"""Iterate over agent steps, yielding each step response.
|
434
466
|
|
435
467
|
You can override defaults assigned to this agent from this function directly.
|
@@ -513,8 +545,8 @@ def run_agent_iter(messages: "AgentMessages", **kwargs: Any) -> "AgentStream[Any
|
|
513
545
|
... except Exception as e:
|
514
546
|
... print(f"Stream error: {e}")
|
515
547
|
"""
|
516
|
-
agent = Agent(**kwargs)
|
517
|
-
return agent.run(messages, stream=True, **kwargs)
|
548
|
+
agent = Agent(verbose=verbose, debug=debug, **kwargs)
|
549
|
+
return agent.run(messages, stream=True, verbose=verbose, debug=debug, **kwargs)
|
518
550
|
|
519
551
|
|
520
552
|
# Overloads for async_run_agent_iter
|
@@ -544,6 +576,9 @@ def async_run_agent_iter(
|
|
544
576
|
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
545
577
|
max_steps: Optional[int] = None,
|
546
578
|
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
579
|
+
# End strategy
|
580
|
+
end_strategy: Optional[Literal["tool"]] = None,
|
581
|
+
end_tool: Optional[Callable] = None,
|
547
582
|
# LM settings
|
548
583
|
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
549
584
|
temperature: Optional[float] = None,
|
@@ -583,6 +618,9 @@ def async_run_agent_iter(
|
|
583
618
|
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
584
619
|
max_steps: Optional[int] = None,
|
585
620
|
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
621
|
+
# End strategy
|
622
|
+
end_strategy: Optional[Literal["tool"]] = None,
|
623
|
+
end_tool: Optional[Callable] = None,
|
586
624
|
# LM settings
|
587
625
|
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
588
626
|
temperature: Optional[float] = None,
|
@@ -596,7 +634,7 @@ def async_run_agent_iter(
|
|
596
634
|
|
597
635
|
|
598
636
|
def async_run_agent_iter(
|
599
|
-
messages: "AgentMessages", **kwargs: Any
|
637
|
+
messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
|
600
638
|
) -> "AgentStream[Any]":
|
601
639
|
"""Async iterate over agent steps, yielding each step response.
|
602
640
|
|
@@ -611,5 +649,5 @@ def async_run_agent_iter(
|
|
611
649
|
Returns:
|
612
650
|
An AgentStream that can be iterated over asynchronously
|
613
651
|
"""
|
614
|
-
agent = Agent(**kwargs)
|
615
|
-
return agent.run(messages, stream=True, **kwargs)
|
652
|
+
agent = Agent(verbose=verbose, debug=debug, **kwargs)
|
653
|
+
return agent.run(messages, stream=True, verbose=verbose, debug=debug, **kwargs)
|
@@ -1,6 +1,7 @@
|
|
1
1
|
"""hammad.genai.agents.types.agent_response"""
|
2
2
|
|
3
3
|
from typing import List, Any, TypeVar, Literal, Generic
|
4
|
+
from pydantic import Field
|
4
5
|
|
5
6
|
from ....cache import cached
|
6
7
|
from ....typing import get_type_description
|
@@ -49,7 +50,7 @@ class AgentResponse(LanguageModelResponse[T], Generic[T, AgentContext]):
|
|
49
50
|
type: Literal["agent"] = "agent"
|
50
51
|
"""The type of the response. Always `agent`."""
|
51
52
|
|
52
|
-
steps: List[LanguageModelResponse[str]]
|
53
|
+
steps: List[LanguageModelResponse[str]] = Field(default_factory=list)
|
53
54
|
"""
|
54
55
|
A list of steps taken by the agent **BEFORE** its final output.
|
55
56
|
|
@@ -0,0 +1,113 @@
|
|
1
|
+
"""hammad.genai.graphs - Graph-based workflow framework built on pydantic-graph
|
2
|
+
|
3
|
+
This module provides a high-level interface for creating graph-based workflows
|
4
|
+
that integrate seamlessly with hammad's Agent and LanguageModel infrastructure.
|
5
|
+
|
6
|
+
Key Features:
|
7
|
+
- Action decorator system for defining graph nodes
|
8
|
+
- Automatic integration with Agent and LanguageModel
|
9
|
+
- IDE-friendly type hints and parameter unpacking
|
10
|
+
- Plugin system for extensibility
|
11
|
+
- Built on pydantic-graph for robust execution
|
12
|
+
|
13
|
+
Basic Usage:
|
14
|
+
from hammad.genai.graphs import BaseGraph, action
|
15
|
+
from pydantic import BaseModel
|
16
|
+
|
17
|
+
class MyState(BaseModel):
|
18
|
+
count: int = 0
|
19
|
+
|
20
|
+
class CountingGraph(BaseGraph[MyState, str]):
|
21
|
+
@action.start()
|
22
|
+
def start_counting(self, ctx, agent, target: int):
|
23
|
+
# Use agent for AI operations
|
24
|
+
response = agent.run(f"Count from 1 to {target}")
|
25
|
+
return response.output
|
26
|
+
|
27
|
+
# Usage
|
28
|
+
graph = CountingGraph()
|
29
|
+
result = graph.run(target=5)
|
30
|
+
print(result.output)
|
31
|
+
|
32
|
+
Advanced Usage with Plugins:
|
33
|
+
from hammad.genai.graphs import plugin
|
34
|
+
|
35
|
+
@plugin.history(summarize=True)
|
36
|
+
@plugin.memory(collection_name="counting")
|
37
|
+
class AdvancedGraph(BaseGraph[MyState, str]):
|
38
|
+
@action.start(instructions="You are a helpful counting assistant")
|
39
|
+
def count_with_memory(self, ctx, agent, target: int):
|
40
|
+
# Agent will have instructions and plugins automatically applied
|
41
|
+
return agent.run(f"Count to {target} and remember this session")
|
42
|
+
"""
|
43
|
+
|
44
|
+
from typing import TYPE_CHECKING
|
45
|
+
from ..._internal import create_getattr_importer
|
46
|
+
|
47
|
+
|
48
|
+
if TYPE_CHECKING:
|
49
|
+
from .base import BaseGraph, action, ActionNode, GraphBuilder
|
50
|
+
from .types import (
|
51
|
+
GraphContext,
|
52
|
+
GraphResponse,
|
53
|
+
GraphState,
|
54
|
+
BasePlugin,
|
55
|
+
ActionSettings,
|
56
|
+
ActionInfo,
|
57
|
+
GraphEvent,
|
58
|
+
GraphHistoryEntry,
|
59
|
+
GraphStream,
|
60
|
+
GraphResponseChunk,
|
61
|
+
GraphNode,
|
62
|
+
GraphEnd,
|
63
|
+
PydanticGraphContext,
|
64
|
+
)
|
65
|
+
from .plugins import (
|
66
|
+
plugin,
|
67
|
+
PluginDecorator,
|
68
|
+
HistoryPlugin,
|
69
|
+
MemoryPlugin,
|
70
|
+
AudioPlugin,
|
71
|
+
ServePlugin,
|
72
|
+
SettingsPlugin,
|
73
|
+
)
|
74
|
+
|
75
|
+
|
76
|
+
__all__ = (
|
77
|
+
# Core graph classes
|
78
|
+
"BaseGraph",
|
79
|
+
"GraphBuilder",
|
80
|
+
# Action system
|
81
|
+
"action",
|
82
|
+
"ActionNode",
|
83
|
+
"ActionSettings",
|
84
|
+
"ActionInfo",
|
85
|
+
# Plugin system
|
86
|
+
"plugin",
|
87
|
+
"BasePlugin",
|
88
|
+
"PluginDecorator",
|
89
|
+
"HistoryPlugin",
|
90
|
+
"MemoryPlugin",
|
91
|
+
"AudioPlugin",
|
92
|
+
"ServePlugin",
|
93
|
+
"SettingsPlugin",
|
94
|
+
# Types and context
|
95
|
+
"GraphContext",
|
96
|
+
"GraphResponse",
|
97
|
+
"GraphState",
|
98
|
+
"GraphEvent",
|
99
|
+
"GraphHistoryEntry",
|
100
|
+
"GraphStream",
|
101
|
+
"GraphResponseChunk",
|
102
|
+
# Re-exports from pydantic-graph
|
103
|
+
"GraphNode",
|
104
|
+
"GraphEnd",
|
105
|
+
"PydanticGraphContext",
|
106
|
+
)
|
107
|
+
|
108
|
+
|
109
|
+
__getattr__ = create_getattr_importer(__all__)
|
110
|
+
|
111
|
+
|
112
|
+
def __dir__() -> list[str]:
|
113
|
+
return list(__all__)
|