hammad-python 0.0.23__py3-none-any.whl → 0.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. hammad/__init__.py +62 -14
  2. hammad/_main.py +226 -0
  3. hammad/cli/__init__.py +0 -2
  4. hammad/cli/plugins.py +3 -1
  5. hammad/data/__init__.py +4 -5
  6. hammad/data/types/__init__.py +37 -1
  7. hammad/data/types/file.py +74 -1
  8. hammad/data/types/multimodal/__init__.py +14 -2
  9. hammad/data/types/multimodal/audio.py +106 -2
  10. hammad/data/types/multimodal/image.py +104 -2
  11. hammad/data/types/text.py +242 -0
  12. hammad/genai/__init__.py +73 -0
  13. hammad/genai/a2a/__init__.py +32 -0
  14. hammad/genai/a2a/workers.py +552 -0
  15. hammad/genai/agents/__init__.py +8 -0
  16. hammad/genai/agents/agent.py +747 -214
  17. hammad/genai/agents/run.py +421 -12
  18. hammad/genai/agents/types/agent_response.py +2 -1
  19. hammad/genai/graphs/__init__.py +125 -0
  20. hammad/genai/graphs/base.py +1786 -0
  21. hammad/genai/graphs/plugins.py +316 -0
  22. hammad/genai/graphs/types.py +638 -0
  23. hammad/genai/models/language/__init__.py +6 -1
  24. hammad/genai/models/language/model.py +46 -0
  25. hammad/genai/models/language/run.py +330 -4
  26. hammad/genai/models/language/types/language_model_response.py +1 -1
  27. hammad/genai/types/tools.py +1 -1
  28. hammad/logging/logger.py +60 -5
  29. hammad/mcp/__init__.py +3 -0
  30. hammad/types.py +288 -0
  31. {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/METADATA +6 -1
  32. {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/RECORD +34 -32
  33. hammad/_main/__init__.py +0 -4
  34. hammad/_main/_fn.py +0 -20
  35. hammad/_main/_new.py +0 -52
  36. hammad/_main/_run.py +0 -50
  37. hammad/_main/_to.py +0 -19
  38. hammad/cli/_runner.py +0 -265
  39. {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/WHEEL +0 -0
  40. {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,316 @@
1
+ """hammad.genai.graphs.plugins - Plugin system for graphs"""
2
+
3
+ from typing import Any, Dict, List, Optional, Type, Callable, Union
4
+ from dataclasses import dataclass, field
5
+
6
+ from ..types.history import History
7
+ from ..models.language.model import LanguageModel
8
+ from ..models.language.types.language_model_name import LanguageModelName
9
+ from .types import BasePlugin, GraphContext
10
+
11
+ __all__ = [
12
+ "plugin",
13
+ "PluginDecorator",
14
+ "HistoryPlugin",
15
+ "MemoryPlugin",
16
+ "AudioPlugin",
17
+ "ServePlugin",
18
+ "SettingsPlugin",
19
+ ]
20
+
21
+
22
+ @dataclass
23
+ class PluginConfig:
24
+ """Configuration for a plugin."""
25
+
26
+ name: str
27
+ plugin_class: Type[BasePlugin]
28
+ config: Dict[str, Any] = field(default_factory=dict)
29
+
30
+
31
+ class PluginDecorator:
32
+ """Decorator for adding plugins to graphs."""
33
+
34
+ def __init__(self):
35
+ self._plugins: List[PluginConfig] = []
36
+
37
+ def history(
38
+ self,
39
+ summarize: bool = False,
40
+ model: Optional[LanguageModelName] = None,
41
+ max_messages: int = 100,
42
+ **kwargs: Any,
43
+ ) -> Callable:
44
+ """Add history plugin with automatic summarization."""
45
+
46
+ def decorator(cls):
47
+ self._plugins.append(
48
+ PluginConfig(
49
+ name="history",
50
+ plugin_class=HistoryPlugin,
51
+ config={
52
+ "summarize": summarize,
53
+ "model": model,
54
+ "max_messages": max_messages,
55
+ **kwargs,
56
+ },
57
+ )
58
+ )
59
+ return cls
60
+
61
+ return decorator
62
+
63
+ def memory(
64
+ self,
65
+ collection_name: Optional[str] = None,
66
+ searchable: bool = True,
67
+ vector_enabled: bool = True,
68
+ **kwargs: Any,
69
+ ) -> Callable:
70
+ """Add memory plugin for long-term searchable memory."""
71
+
72
+ def decorator(cls):
73
+ self._plugins.append(
74
+ PluginConfig(
75
+ name="memory",
76
+ plugin_class=MemoryPlugin,
77
+ config={
78
+ "collection_name": collection_name,
79
+ "searchable": searchable,
80
+ "vector_enabled": vector_enabled,
81
+ **kwargs,
82
+ },
83
+ )
84
+ )
85
+ return cls
86
+
87
+ return decorator
88
+
89
+ def audio(self, model: LanguageModelName, **kwargs: Any) -> Callable:
90
+ """Add audio plugin for voice output."""
91
+
92
+ def decorator(cls):
93
+ self._plugins.append(
94
+ PluginConfig(
95
+ name="audio",
96
+ plugin_class=AudioPlugin,
97
+ config={"model": model, **kwargs},
98
+ )
99
+ )
100
+ return cls
101
+
102
+ return decorator
103
+
104
+ def serve(
105
+ self,
106
+ server_type: Union[str, List[str]] = "http",
107
+ settings: Optional[Dict[str, Any]] = None,
108
+ **kwargs: Any,
109
+ ) -> Callable:
110
+ """Add serve plugin for running graphs as servers."""
111
+
112
+ def decorator(cls):
113
+ self._plugins.append(
114
+ PluginConfig(
115
+ name="serve",
116
+ plugin_class=ServePlugin,
117
+ config={
118
+ "server_type": server_type,
119
+ "settings": settings or {},
120
+ **kwargs,
121
+ },
122
+ )
123
+ )
124
+ return cls
125
+
126
+ return decorator
127
+
128
+ def settings(
129
+ self,
130
+ model: Optional[LanguageModelName] = None,
131
+ tools: Optional[List[Callable]] = None,
132
+ summarize_tools: bool = True,
133
+ summarize_tools_with_model: bool = False,
134
+ max_steps: Optional[int] = None,
135
+ **kwargs: Any,
136
+ ) -> Callable:
137
+ """Add settings plugin for global configuration."""
138
+
139
+ def decorator(cls):
140
+ self._plugins.append(
141
+ PluginConfig(
142
+ name="settings",
143
+ plugin_class=SettingsPlugin,
144
+ config={
145
+ "model": model,
146
+ "tools": tools or [],
147
+ "summarize_tools": summarize_tools,
148
+ "summarize_tools_with_model": summarize_tools_with_model,
149
+ "max_steps": max_steps,
150
+ **kwargs,
151
+ },
152
+ )
153
+ )
154
+ return cls
155
+
156
+ return decorator
157
+
158
+ def get_plugins(self) -> List[PluginConfig]:
159
+ """Get all registered plugins."""
160
+ return self._plugins.copy()
161
+
162
+ def clear(self) -> None:
163
+ """Clear all registered plugins."""
164
+ self._plugins.clear()
165
+
166
+
167
+ # Plugin implementations
168
+
169
+
170
+ class HistoryPlugin(BasePlugin):
171
+ """Plugin for managing graph history with optional summarization."""
172
+
173
+ def __init__(self, **kwargs):
174
+ super().__init__(**kwargs)
175
+ self.history: Optional[History] = None
176
+ self.summarize = kwargs.get("summarize", False)
177
+ self.model = kwargs.get("model")
178
+ self.max_messages = kwargs.get("max_messages", 100)
179
+
180
+ def on_action_start(self, context: GraphContext[Any], action_name: str) -> None:
181
+ """Handle action start events."""
182
+ if self.history is None:
183
+ self.history = History()
184
+
185
+ # Add event to history
186
+ self.history.add_message(
187
+ {
188
+ "role": "system",
189
+ "content": f"Graph action started: {action_name}",
190
+ "metadata": {"action_name": action_name, "event_type": "action_start"},
191
+ }
192
+ )
193
+
194
+ def on_action_end(
195
+ self, context: GraphContext[Any], action_name: str, result: Any
196
+ ) -> None:
197
+ """Handle action end events."""
198
+ if self.history is None:
199
+ self.history = History()
200
+
201
+ # Add event to history
202
+ self.history.add_message(
203
+ {
204
+ "role": "system",
205
+ "content": f"Graph action completed: {action_name}",
206
+ "metadata": {
207
+ "action_name": action_name,
208
+ "result": result,
209
+ "event_type": "action_end",
210
+ },
211
+ }
212
+ )
213
+
214
+ # Summarize if needed
215
+ if self.summarize and len(self.history.messages) >= self.max_messages:
216
+ self._summarize_history()
217
+
218
+ def _summarize_history(self):
219
+ """Summarize the history using the configured model."""
220
+ if self.model and self.history:
221
+ # This would use the language model to summarize
222
+ # For now, just truncate
223
+ self.history.messages = self.history.messages[-self.max_messages // 2 :]
224
+
225
+
226
+ class MemoryPlugin(BasePlugin):
227
+ """Plugin for long-term memory storage and retrieval."""
228
+
229
+ def __init__(self, **kwargs):
230
+ super().__init__(**kwargs)
231
+ self.collection_name = kwargs.get("collection_name", "graph_memory")
232
+ self.searchable = kwargs.get("searchable", True)
233
+ self.vector_enabled = kwargs.get("vector_enabled", True)
234
+ self.memory_store: Dict[str, Any] = {}
235
+
236
+ def on_action_end(
237
+ self, context: GraphContext[Any], action_name: str, result: Any
238
+ ) -> None:
239
+ """Store action results in memory."""
240
+ memory_key = f"{action_name}_{len(self.memory_store)}"
241
+ self.memory_store[memory_key] = {
242
+ "action_name": action_name,
243
+ "result": result,
244
+ "timestamp": context.metadata.get("timestamp"),
245
+ "state": context.state,
246
+ }
247
+
248
+ def search_memory(self, query: str) -> List[Dict[str, Any]]:
249
+ """Search memory for relevant information."""
250
+ # Simple text search for now
251
+ results = []
252
+ for key, value in self.memory_store.items():
253
+ if query.lower() in str(value).lower():
254
+ results.append(value)
255
+ return results
256
+
257
+
258
+ class AudioPlugin(BasePlugin):
259
+ """Plugin for audio/TTS output."""
260
+
261
+ def __init__(self, **kwargs):
262
+ super().__init__(**kwargs)
263
+ self.model = kwargs.get("model")
264
+
265
+ def on_action_end(
266
+ self, context: GraphContext[Any], action_name: str, result: Any
267
+ ) -> None:
268
+ """Generate audio for action results."""
269
+ if self.model and result:
270
+ # This would generate audio using TTS
271
+ context.metadata["audio_generated"] = True
272
+ context.metadata["tts_model"] = self.model
273
+ context.metadata["audio_content"] = str(result)
274
+
275
+
276
+ class ServePlugin(BasePlugin):
277
+ """Plugin for serving graphs as web services."""
278
+
279
+ def __init__(self, **kwargs):
280
+ super().__init__(**kwargs)
281
+ self.server_type = kwargs.get("server_type", "http")
282
+ self.settings = kwargs.get("settings", {})
283
+
284
+ def on_graph_start(self, context: GraphContext[Any]) -> None:
285
+ """Handle graph start for server setup."""
286
+ context.metadata["server_type"] = self.server_type
287
+ context.metadata["serve_enabled"] = True
288
+
289
+ def on_graph_end(self, context: GraphContext[Any]) -> None:
290
+ """Handle graph end for server response."""
291
+ context.metadata["server_response_ready"] = True
292
+
293
+
294
+ class SettingsPlugin(BasePlugin):
295
+ """Plugin for global graph settings."""
296
+
297
+ def __init__(self, **kwargs):
298
+ super().__init__(**kwargs)
299
+ self.model = kwargs.get("model")
300
+ self.tools = kwargs.get("tools", [])
301
+ self.summarize_tools = kwargs.get("summarize_tools", True)
302
+ self.summarize_tools_with_model = kwargs.get(
303
+ "summarize_tools_with_model", False
304
+ )
305
+ self.max_steps = kwargs.get("max_steps")
306
+
307
+ def on_graph_start(self, context: GraphContext[Any]) -> None:
308
+ """Apply global settings at graph start."""
309
+ context.metadata["global_model"] = self.model
310
+ context.metadata["global_tools"] = self.tools
311
+ context.metadata["max_steps"] = self.max_steps
312
+ context.metadata["summarize_tools"] = self.summarize_tools
313
+
314
+
315
+ # Global plugin decorator instance
316
+ plugin = PluginDecorator()