hammad-python 0.0.29__py3-none-any.whl → 0.0.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ham/__init__.py +10 -0
- {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
- hammad_python-0.0.31.dist-info/RECORD +6 -0
- hammad/__init__.py +0 -84
- hammad/_internal.py +0 -256
- hammad/_main.py +0 -226
- hammad/cache/__init__.py +0 -40
- hammad/cache/base_cache.py +0 -181
- hammad/cache/cache.py +0 -169
- hammad/cache/decorators.py +0 -261
- hammad/cache/file_cache.py +0 -80
- hammad/cache/ttl_cache.py +0 -74
- hammad/cli/__init__.py +0 -33
- hammad/cli/animations.py +0 -573
- hammad/cli/plugins.py +0 -867
- hammad/cli/styles/__init__.py +0 -55
- hammad/cli/styles/settings.py +0 -139
- hammad/cli/styles/types.py +0 -358
- hammad/cli/styles/utils.py +0 -634
- hammad/data/__init__.py +0 -90
- hammad/data/collections/__init__.py +0 -49
- hammad/data/collections/collection.py +0 -326
- hammad/data/collections/indexes/__init__.py +0 -37
- hammad/data/collections/indexes/qdrant/__init__.py +0 -1
- hammad/data/collections/indexes/qdrant/index.py +0 -723
- hammad/data/collections/indexes/qdrant/settings.py +0 -94
- hammad/data/collections/indexes/qdrant/utils.py +0 -210
- hammad/data/collections/indexes/tantivy/__init__.py +0 -1
- hammad/data/collections/indexes/tantivy/index.py +0 -426
- hammad/data/collections/indexes/tantivy/settings.py +0 -40
- hammad/data/collections/indexes/tantivy/utils.py +0 -176
- hammad/data/configurations/__init__.py +0 -35
- hammad/data/configurations/configuration.py +0 -564
- hammad/data/models/__init__.py +0 -50
- hammad/data/models/extensions/__init__.py +0 -4
- hammad/data/models/extensions/pydantic/__init__.py +0 -42
- hammad/data/models/extensions/pydantic/converters.py +0 -759
- hammad/data/models/fields.py +0 -546
- hammad/data/models/model.py +0 -1078
- hammad/data/models/utils.py +0 -280
- hammad/data/sql/__init__.py +0 -24
- hammad/data/sql/database.py +0 -576
- hammad/data/sql/types.py +0 -127
- hammad/data/types/__init__.py +0 -75
- hammad/data/types/file.py +0 -431
- hammad/data/types/multimodal/__init__.py +0 -36
- hammad/data/types/multimodal/audio.py +0 -200
- hammad/data/types/multimodal/image.py +0 -182
- hammad/data/types/text.py +0 -1308
- hammad/formatting/__init__.py +0 -33
- hammad/formatting/json/__init__.py +0 -27
- hammad/formatting/json/converters.py +0 -158
- hammad/formatting/text/__init__.py +0 -63
- hammad/formatting/text/converters.py +0 -723
- hammad/formatting/text/markdown.py +0 -131
- hammad/formatting/yaml/__init__.py +0 -26
- hammad/formatting/yaml/converters.py +0 -5
- hammad/genai/__init__.py +0 -217
- hammad/genai/a2a/__init__.py +0 -32
- hammad/genai/a2a/workers.py +0 -552
- hammad/genai/agents/__init__.py +0 -59
- hammad/genai/agents/agent.py +0 -1973
- hammad/genai/agents/run.py +0 -1024
- hammad/genai/agents/types/__init__.py +0 -42
- hammad/genai/agents/types/agent_context.py +0 -13
- hammad/genai/agents/types/agent_event.py +0 -128
- hammad/genai/agents/types/agent_hooks.py +0 -220
- hammad/genai/agents/types/agent_messages.py +0 -31
- hammad/genai/agents/types/agent_response.py +0 -125
- hammad/genai/agents/types/agent_stream.py +0 -327
- hammad/genai/graphs/__init__.py +0 -125
- hammad/genai/graphs/_utils.py +0 -190
- hammad/genai/graphs/base.py +0 -1828
- hammad/genai/graphs/plugins.py +0 -316
- hammad/genai/graphs/types.py +0 -638
- hammad/genai/models/__init__.py +0 -1
- hammad/genai/models/embeddings/__init__.py +0 -43
- hammad/genai/models/embeddings/model.py +0 -226
- hammad/genai/models/embeddings/run.py +0 -163
- hammad/genai/models/embeddings/types/__init__.py +0 -37
- hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
- hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
- hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
- hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
- hammad/genai/models/language/__init__.py +0 -57
- hammad/genai/models/language/model.py +0 -1098
- hammad/genai/models/language/run.py +0 -878
- hammad/genai/models/language/types/__init__.py +0 -40
- hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
- hammad/genai/models/language/types/language_model_messages.py +0 -28
- hammad/genai/models/language/types/language_model_name.py +0 -239
- hammad/genai/models/language/types/language_model_request.py +0 -127
- hammad/genai/models/language/types/language_model_response.py +0 -217
- hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
- hammad/genai/models/language/types/language_model_settings.py +0 -89
- hammad/genai/models/language/types/language_model_stream.py +0 -600
- hammad/genai/models/language/utils/__init__.py +0 -28
- hammad/genai/models/language/utils/requests.py +0 -421
- hammad/genai/models/language/utils/structured_outputs.py +0 -135
- hammad/genai/models/model_provider.py +0 -4
- hammad/genai/models/multimodal.py +0 -47
- hammad/genai/models/reranking.py +0 -26
- hammad/genai/types/__init__.py +0 -1
- hammad/genai/types/base.py +0 -215
- hammad/genai/types/history.py +0 -290
- hammad/genai/types/tools.py +0 -507
- hammad/logging/__init__.py +0 -35
- hammad/logging/decorators.py +0 -834
- hammad/logging/logger.py +0 -1018
- hammad/mcp/__init__.py +0 -53
- hammad/mcp/client/__init__.py +0 -35
- hammad/mcp/client/client.py +0 -624
- hammad/mcp/client/client_service.py +0 -400
- hammad/mcp/client/settings.py +0 -178
- hammad/mcp/servers/__init__.py +0 -26
- hammad/mcp/servers/launcher.py +0 -1161
- hammad/runtime/__init__.py +0 -32
- hammad/runtime/decorators.py +0 -142
- hammad/runtime/run.py +0 -299
- hammad/service/__init__.py +0 -49
- hammad/service/create.py +0 -527
- hammad/service/decorators.py +0 -283
- hammad/types.py +0 -288
- hammad/typing/__init__.py +0 -435
- hammad/web/__init__.py +0 -43
- hammad/web/http/__init__.py +0 -1
- hammad/web/http/client.py +0 -944
- hammad/web/models.py +0 -275
- hammad/web/openapi/__init__.py +0 -1
- hammad/web/openapi/client.py +0 -740
- hammad/web/search/__init__.py +0 -1
- hammad/web/search/client.py +0 -1023
- hammad/web/utils.py +0 -472
- hammad_python-0.0.29.dist-info/RECORD +0 -135
- {hammad → ham}/py.typed +0 -0
- {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
@@ -1,327 +0,0 @@
|
|
1
|
-
"""hammad.genai.agents.types.agent_stream"""
|
2
|
-
|
3
|
-
from typing import (
|
4
|
-
Generic,
|
5
|
-
TypeVar,
|
6
|
-
Iterator,
|
7
|
-
AsyncIterator,
|
8
|
-
List,
|
9
|
-
Any,
|
10
|
-
Dict,
|
11
|
-
Optional,
|
12
|
-
TYPE_CHECKING,
|
13
|
-
Type,
|
14
|
-
Union,
|
15
|
-
Literal,
|
16
|
-
)
|
17
|
-
from contextlib import contextmanager, asynccontextmanager
|
18
|
-
|
19
|
-
from ...types.base import BaseGenAIModelStream
|
20
|
-
from ...types.tools import (
|
21
|
-
Tool,
|
22
|
-
execute_tools_from_language_model_response,
|
23
|
-
)
|
24
|
-
from ...models.language.model import LanguageModel
|
25
|
-
from ...models.language.types import (
|
26
|
-
LanguageModelMessages,
|
27
|
-
LanguageModelResponse,
|
28
|
-
LanguageModelResponseChunk,
|
29
|
-
LanguageModelStream,
|
30
|
-
LanguageModelSettings,
|
31
|
-
)
|
32
|
-
from ...models.language.utils.requests import (
|
33
|
-
parse_messages_input as parse_messages,
|
34
|
-
consolidate_system_messages,
|
35
|
-
)
|
36
|
-
|
37
|
-
from .agent_response import (
|
38
|
-
AgentResponse,
|
39
|
-
_create_agent_response_from_language_model_response,
|
40
|
-
)
|
41
|
-
from .agent_context import AgentContext
|
42
|
-
|
43
|
-
if TYPE_CHECKING:
|
44
|
-
from ..agent import Agent
|
45
|
-
|
46
|
-
T = TypeVar("T")
|
47
|
-
|
48
|
-
|
49
|
-
class AgentResponseChunk(LanguageModelResponseChunk[T], Generic[T]):
|
50
|
-
"""A chunk from an agent response stream representing a single step."""
|
51
|
-
|
52
|
-
def __init__(
|
53
|
-
self,
|
54
|
-
step_number: int,
|
55
|
-
response: LanguageModelResponse[str],
|
56
|
-
output: T | None = None,
|
57
|
-
content: str | None = None,
|
58
|
-
model: str | None = None,
|
59
|
-
is_final: bool = False,
|
60
|
-
**kwargs: Any,
|
61
|
-
):
|
62
|
-
"""Initialize a AgentResponseChunk.
|
63
|
-
|
64
|
-
Args:
|
65
|
-
step_number: The step number of this chunk
|
66
|
-
response: The language model response for this step
|
67
|
-
output: The output value
|
68
|
-
content: The content string
|
69
|
-
model: The model name
|
70
|
-
is_final: Whether this is the final chunk
|
71
|
-
**kwargs: Additional keyword arguments
|
72
|
-
"""
|
73
|
-
super().__init__(
|
74
|
-
output=output if output is not None else response.output,
|
75
|
-
content=content if content is not None else response.content,
|
76
|
-
model=model if model is not None else response.model,
|
77
|
-
is_final=is_final,
|
78
|
-
**kwargs,
|
79
|
-
)
|
80
|
-
self.step_number = step_number
|
81
|
-
self.response = response
|
82
|
-
|
83
|
-
def __bool__(self) -> bool:
|
84
|
-
"""Check if this chunk has meaningful content."""
|
85
|
-
return bool(self.response)
|
86
|
-
|
87
|
-
def __str__(self) -> str:
|
88
|
-
"""String representation of the chunk."""
|
89
|
-
output = f"AgentResponseChunk(step={self.step_number}, final={self.is_final})"
|
90
|
-
|
91
|
-
# Show content if available
|
92
|
-
if self.output or self.content:
|
93
|
-
content_preview = str(self.output if self.output else self.content)
|
94
|
-
if len(content_preview) > 100:
|
95
|
-
content_preview = content_preview[:100] + "..."
|
96
|
-
output += f"\nContent: {content_preview}"
|
97
|
-
|
98
|
-
return output
|
99
|
-
|
100
|
-
|
101
|
-
class AgentStream(
|
102
|
-
BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T, AgentContext]
|
103
|
-
):
|
104
|
-
"""Stream of agent responses that can be used in sync and async contexts."""
|
105
|
-
|
106
|
-
def __init__(
|
107
|
-
self,
|
108
|
-
agent: "Agent[T]",
|
109
|
-
messages: LanguageModelMessages,
|
110
|
-
model: Optional[Union[LanguageModel, str]] = None,
|
111
|
-
max_steps: Optional[int] = None,
|
112
|
-
context: Optional[AgentContext] = None,
|
113
|
-
output_type: Optional[Type[T]] = None,
|
114
|
-
stream: bool = False,
|
115
|
-
**kwargs: Any,
|
116
|
-
):
|
117
|
-
self.agent = agent
|
118
|
-
self.messages = messages
|
119
|
-
self.context = context
|
120
|
-
self.output_type = output_type
|
121
|
-
self.stream = stream
|
122
|
-
self.kwargs = kwargs
|
123
|
-
self.current_step = 0
|
124
|
-
self.steps: List[LanguageModelResponse[str]] = []
|
125
|
-
self.current_messages = parse_messages(messages)
|
126
|
-
self.is_done = False
|
127
|
-
self._final_response: Optional[LanguageModelResponse[str]] = None
|
128
|
-
|
129
|
-
# Model setup
|
130
|
-
if model is None:
|
131
|
-
self.model = agent.model
|
132
|
-
elif isinstance(model, str):
|
133
|
-
self.model = LanguageModel(model=model)
|
134
|
-
else:
|
135
|
-
self.model = model
|
136
|
-
|
137
|
-
# Max steps setup
|
138
|
-
self.max_steps = max_steps or agent.settings.max_steps
|
139
|
-
|
140
|
-
# Context handling
|
141
|
-
self.current_context = context
|
142
|
-
self.initial_context = context
|
143
|
-
|
144
|
-
# Model kwargs setup
|
145
|
-
self.model_kwargs = kwargs.copy()
|
146
|
-
if output_type:
|
147
|
-
self.model_kwargs["type"] = output_type
|
148
|
-
if agent.instructor_mode:
|
149
|
-
self.model_kwargs["instructor_mode"] = agent.instructor_mode
|
150
|
-
if stream:
|
151
|
-
self.model_kwargs["stream"] = stream
|
152
|
-
|
153
|
-
def _format_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
154
|
-
if self.agent.instructions:
|
155
|
-
system_content = self.agent.instructions
|
156
|
-
|
157
|
-
# Add context if available
|
158
|
-
if self.current_context is not None:
|
159
|
-
from ..agent import _format_context_for_instructions
|
160
|
-
|
161
|
-
context_str = _format_context_for_instructions(
|
162
|
-
self.current_context, self.agent.context_format
|
163
|
-
)
|
164
|
-
if context_str:
|
165
|
-
system_content += f"\n\nContext:\n{context_str}"
|
166
|
-
|
167
|
-
system_message = {"role": "system", "content": system_content}
|
168
|
-
messages = [system_message] + messages
|
169
|
-
return consolidate_system_messages(messages)
|
170
|
-
|
171
|
-
def _process_response(
|
172
|
-
self, response: LanguageModelResponse
|
173
|
-
) -> AgentResponseChunk[T]:
|
174
|
-
self.current_messages.append(response.to_message())
|
175
|
-
|
176
|
-
if response.has_tool_calls:
|
177
|
-
tool_responses = execute_tools_from_language_model_response(
|
178
|
-
tools=self.agent.tools, response=response
|
179
|
-
)
|
180
|
-
for tool_resp in tool_responses:
|
181
|
-
self.current_messages.append(tool_resp.to_dict())
|
182
|
-
|
183
|
-
self.steps.append(response)
|
184
|
-
return AgentResponseChunk(
|
185
|
-
step_number=self.current_step, response=response, is_final=False
|
186
|
-
)
|
187
|
-
else:
|
188
|
-
self.is_done = True
|
189
|
-
self._final_response = response
|
190
|
-
|
191
|
-
# Update context after processing if configured
|
192
|
-
if self.current_context and self.agent._should_update_context(
|
193
|
-
self.current_context, "after"
|
194
|
-
):
|
195
|
-
self.current_context = self.agent._perform_context_update(
|
196
|
-
context=self.current_context,
|
197
|
-
model=self.model,
|
198
|
-
current_messages=self.current_messages,
|
199
|
-
timing="after",
|
200
|
-
)
|
201
|
-
|
202
|
-
return AgentResponseChunk(
|
203
|
-
step_number=self.current_step, response=response, is_final=True
|
204
|
-
)
|
205
|
-
|
206
|
-
def __iter__(self) -> Iterator[AgentResponseChunk[T]]:
|
207
|
-
# The context manager handling should be managed by the agent's run method
|
208
|
-
while not self.is_done and self.current_step < self.max_steps:
|
209
|
-
self.current_step += 1
|
210
|
-
|
211
|
-
# Update context before processing if configured
|
212
|
-
if self.current_context and self.agent._should_update_context(
|
213
|
-
self.current_context, "before"
|
214
|
-
):
|
215
|
-
self.current_context = self.agent._perform_context_update(
|
216
|
-
context=self.current_context,
|
217
|
-
model=self.model,
|
218
|
-
current_messages=self.current_messages,
|
219
|
-
timing="before",
|
220
|
-
)
|
221
|
-
|
222
|
-
formatted_messages = self.current_messages
|
223
|
-
if self.current_step == 1:
|
224
|
-
formatted_messages = self._format_messages(self.current_messages)
|
225
|
-
|
226
|
-
response = self.model.run(
|
227
|
-
messages=formatted_messages,
|
228
|
-
tools=[tool.model_dump() for tool in self.agent.tools]
|
229
|
-
if self.agent.tools
|
230
|
-
else None,
|
231
|
-
**self.model_kwargs,
|
232
|
-
)
|
233
|
-
|
234
|
-
chunk = self._process_response(response)
|
235
|
-
yield chunk
|
236
|
-
if chunk.is_final:
|
237
|
-
break
|
238
|
-
|
239
|
-
def __aiter__(self) -> AsyncIterator[AgentResponseChunk[T]]:
|
240
|
-
return self
|
241
|
-
|
242
|
-
async def __anext__(self) -> AgentResponseChunk[T]:
|
243
|
-
if self.is_done or self.current_step >= self.max_steps:
|
244
|
-
raise StopAsyncIteration
|
245
|
-
|
246
|
-
# The context manager handling should be managed by the agent's run method
|
247
|
-
self.current_step += 1
|
248
|
-
|
249
|
-
# Update context before processing if configured
|
250
|
-
if self.current_context and self.agent._should_update_context(
|
251
|
-
self.current_context, "before"
|
252
|
-
):
|
253
|
-
self.current_context = self.agent._perform_context_update(
|
254
|
-
context=self.current_context,
|
255
|
-
model=self.model,
|
256
|
-
current_messages=self.current_messages,
|
257
|
-
timing="before",
|
258
|
-
)
|
259
|
-
|
260
|
-
formatted_messages = self.current_messages
|
261
|
-
if self.current_step == 1:
|
262
|
-
formatted_messages = self._format_messages(self.current_messages)
|
263
|
-
|
264
|
-
response = await self.model.async_run(
|
265
|
-
messages=formatted_messages,
|
266
|
-
tools=[tool.model_dump() for tool in self.agent.tools]
|
267
|
-
if self.agent.tools
|
268
|
-
else None,
|
269
|
-
**self.model_kwargs,
|
270
|
-
)
|
271
|
-
|
272
|
-
chunk = self._process_response(response)
|
273
|
-
if chunk.is_final:
|
274
|
-
self.is_done = True
|
275
|
-
return chunk
|
276
|
-
|
277
|
-
def _build_response(self) -> AgentResponse[T, AgentContext]:
|
278
|
-
if self._final_response:
|
279
|
-
final_response = self._final_response
|
280
|
-
elif self.steps:
|
281
|
-
final_response = self.steps[-1]
|
282
|
-
else:
|
283
|
-
raise RuntimeError("No response generated by the agent.")
|
284
|
-
|
285
|
-
return _create_agent_response_from_language_model_response(
|
286
|
-
response=final_response,
|
287
|
-
steps=self.steps,
|
288
|
-
context=self.current_context,
|
289
|
-
)
|
290
|
-
|
291
|
-
def _format_context_display(self, context: AgentContext) -> str:
|
292
|
-
"""Format context for display in string representation."""
|
293
|
-
if context is None:
|
294
|
-
return "None"
|
295
|
-
|
296
|
-
try:
|
297
|
-
# For Pydantic models, show as dict
|
298
|
-
if hasattr(context, "model_dump"):
|
299
|
-
context_dict = context.model_dump()
|
300
|
-
elif isinstance(context, dict):
|
301
|
-
context_dict = context
|
302
|
-
else:
|
303
|
-
return str(context)
|
304
|
-
|
305
|
-
# Format as compact JSON-like string
|
306
|
-
items = []
|
307
|
-
for key, value in context_dict.items():
|
308
|
-
if isinstance(value, str):
|
309
|
-
items.append(f"{key}='{value}'")
|
310
|
-
else:
|
311
|
-
items.append(f"{key}={value}")
|
312
|
-
|
313
|
-
return "{" + ", ".join(items) + "}"
|
314
|
-
except Exception:
|
315
|
-
return str(context)
|
316
|
-
|
317
|
-
def collect(self) -> AgentResponse[T, AgentContext]:
|
318
|
-
"""Collect all steps and return final response."""
|
319
|
-
for _ in self:
|
320
|
-
pass
|
321
|
-
return self._build_response()
|
322
|
-
|
323
|
-
async def async_collect(self) -> AgentResponse[T, AgentContext]:
|
324
|
-
"""Collect all steps and return final response."""
|
325
|
-
async for _ in self:
|
326
|
-
pass
|
327
|
-
return self._build_response()
|
hammad/genai/graphs/__init__.py
DELETED
@@ -1,125 +0,0 @@
|
|
1
|
-
"""hammad.genai.graphs - Graph-based workflow framework built on pydantic-graph
|
2
|
-
|
3
|
-
This module provides a high-level interface for creating graph-based workflows
|
4
|
-
that integrate seamlessly with hammad's Agent and LanguageModel infrastructure.
|
5
|
-
|
6
|
-
Key Features:
|
7
|
-
- Action decorator system for defining graph nodes
|
8
|
-
- Automatic integration with Agent and LanguageModel
|
9
|
-
- IDE-friendly type hints and parameter unpacking
|
10
|
-
- Plugin system for extensibility
|
11
|
-
- Built on pydantic-graph for robust execution
|
12
|
-
|
13
|
-
Basic Usage:
|
14
|
-
from hammad.genai.graphs import BaseGraph, action
|
15
|
-
from pydantic import BaseModel
|
16
|
-
|
17
|
-
class MyState(BaseModel):
|
18
|
-
count: int = 0
|
19
|
-
|
20
|
-
class CountingGraph(BaseGraph[MyState, str]):
|
21
|
-
@action.start()
|
22
|
-
def start_counting(self, ctx, agent, target: int):
|
23
|
-
# Use agent for AI operations
|
24
|
-
response = agent.run(f"Count from 1 to {target}")
|
25
|
-
return response.output
|
26
|
-
|
27
|
-
# Usage
|
28
|
-
graph = CountingGraph()
|
29
|
-
result = graph.run(target=5)
|
30
|
-
print(result.output)
|
31
|
-
|
32
|
-
Advanced Usage with Plugins:
|
33
|
-
from hammad.genai.graphs import plugin
|
34
|
-
|
35
|
-
@plugin.history(summarize=True)
|
36
|
-
@plugin.memory(collection_name="counting")
|
37
|
-
class AdvancedGraph(BaseGraph[MyState, str]):
|
38
|
-
@action.start(instructions="You are a helpful counting assistant")
|
39
|
-
def count_with_memory(self, ctx, agent, target: int):
|
40
|
-
# Agent will have instructions and plugins automatically applied
|
41
|
-
return agent.run(f"Count to {target} and remember this session")
|
42
|
-
"""
|
43
|
-
|
44
|
-
from typing import TYPE_CHECKING
|
45
|
-
from ..._internal import create_getattr_importer
|
46
|
-
|
47
|
-
|
48
|
-
if TYPE_CHECKING:
|
49
|
-
from .base import (
|
50
|
-
ActionDecorator,
|
51
|
-
ActionNode,
|
52
|
-
ActionSettings,
|
53
|
-
BaseGraph,
|
54
|
-
GraphBuilder,
|
55
|
-
action,
|
56
|
-
select,
|
57
|
-
SelectionStrategy,
|
58
|
-
)
|
59
|
-
from .types import (
|
60
|
-
GraphContext,
|
61
|
-
GraphResponse,
|
62
|
-
GraphState,
|
63
|
-
BasePlugin,
|
64
|
-
ActionSettings,
|
65
|
-
ActionInfo,
|
66
|
-
GraphEvent,
|
67
|
-
GraphHistoryEntry,
|
68
|
-
GraphStream,
|
69
|
-
GraphResponseChunk,
|
70
|
-
GraphNode,
|
71
|
-
GraphEnd,
|
72
|
-
PydanticGraphContext,
|
73
|
-
)
|
74
|
-
from .plugins import (
|
75
|
-
plugin,
|
76
|
-
PluginDecorator,
|
77
|
-
HistoryPlugin,
|
78
|
-
MemoryPlugin,
|
79
|
-
AudioPlugin,
|
80
|
-
ServePlugin,
|
81
|
-
SettingsPlugin,
|
82
|
-
)
|
83
|
-
|
84
|
-
|
85
|
-
__all__ = (
|
86
|
-
# Core graph classes
|
87
|
-
"BaseGraph",
|
88
|
-
"GraphBuilder",
|
89
|
-
"ActionDecorator",
|
90
|
-
# Action system
|
91
|
-
"action",
|
92
|
-
"ActionNode",
|
93
|
-
"ActionSettings",
|
94
|
-
"ActionInfo",
|
95
|
-
"select",
|
96
|
-
"SelectionStrategy",
|
97
|
-
# Plugin system
|
98
|
-
"plugin",
|
99
|
-
"BasePlugin",
|
100
|
-
"PluginDecorator",
|
101
|
-
"HistoryPlugin",
|
102
|
-
"MemoryPlugin",
|
103
|
-
"AudioPlugin",
|
104
|
-
"ServePlugin",
|
105
|
-
"SettingsPlugin",
|
106
|
-
# Types and context
|
107
|
-
"GraphContext",
|
108
|
-
"GraphResponse",
|
109
|
-
"GraphState",
|
110
|
-
"GraphEvent",
|
111
|
-
"GraphHistoryEntry",
|
112
|
-
"GraphStream",
|
113
|
-
"GraphResponseChunk",
|
114
|
-
# Re-exports from pydantic-graph
|
115
|
-
"GraphNode",
|
116
|
-
"GraphEnd",
|
117
|
-
"PydanticGraphContext",
|
118
|
-
)
|
119
|
-
|
120
|
-
|
121
|
-
__getattr__ = create_getattr_importer(__all__)
|
122
|
-
|
123
|
-
|
124
|
-
def __dir__() -> list[str]:
|
125
|
-
return list(__all__)
|
hammad/genai/graphs/_utils.py
DELETED
@@ -1,190 +0,0 @@
|
|
1
|
-
|
2
|
-
from typing import TYPE_CHECKING
|
3
|
-
|
4
|
-
if TYPE_CHECKING:
|
5
|
-
from .base import BaseGraph
|
6
|
-
|
7
|
-
|
8
|
-
def visualize_base_graph(graph: "BaseGraph", filename: str) -> None:
|
9
|
-
"""Generate a visualization of the graph with clean, readable flow."""
|
10
|
-
if not graph._action_nodes or not graph._start_action_name:
|
11
|
-
raise ValueError("No actions defined in graph")
|
12
|
-
|
13
|
-
# Build our own mermaid code for better control over layout
|
14
|
-
mermaid_lines = ["graph TD"] # Top-Down layout
|
15
|
-
|
16
|
-
# Track which nodes we've already added
|
17
|
-
added_nodes = set()
|
18
|
-
|
19
|
-
# Style definitions
|
20
|
-
mermaid_lines.append(" %% Styles")
|
21
|
-
mermaid_lines.append(" classDef startNode fill:#4CAF50,stroke:#333,stroke-width:2px,color:#fff")
|
22
|
-
mermaid_lines.append(" classDef endNode fill:#f44336,stroke:#333,stroke-width:2px,color:#fff")
|
23
|
-
mermaid_lines.append(" classDef defaultNode fill:#2196F3,stroke:#333,stroke-width:2px,color:#fff")
|
24
|
-
mermaid_lines.append("")
|
25
|
-
|
26
|
-
# Helper to get clean node ID
|
27
|
-
def get_node_id(action_name: str) -> str:
|
28
|
-
return action_name.replace(" ", "_").replace("-", "_")
|
29
|
-
|
30
|
-
# Helper to add a node if not already added
|
31
|
-
def add_node(action_name: str, is_start: bool = False, is_end: bool = False) -> None:
|
32
|
-
if action_name not in added_nodes:
|
33
|
-
node_id = get_node_id(action_name)
|
34
|
-
# Use the action name as the display label
|
35
|
-
display_name = action_name
|
36
|
-
|
37
|
-
if is_start:
|
38
|
-
mermaid_lines.append(f" {node_id}[{display_name}]:::startNode")
|
39
|
-
elif is_end:
|
40
|
-
mermaid_lines.append(f" {node_id}[{display_name}]:::endNode")
|
41
|
-
else:
|
42
|
-
mermaid_lines.append(f" {node_id}[{display_name}]:::defaultNode")
|
43
|
-
added_nodes.add(action_name)
|
44
|
-
|
45
|
-
# Add all nodes and connections
|
46
|
-
mermaid_lines.append(" %% Nodes and connections")
|
47
|
-
|
48
|
-
# Start with the start node
|
49
|
-
add_node(graph._start_action_name, is_start=True)
|
50
|
-
|
51
|
-
# Process all actions to find connections
|
52
|
-
for action_name in graph._action_nodes:
|
53
|
-
action_func = getattr(graph, action_name, None)
|
54
|
-
if action_func and hasattr(action_func, '_action_settings'):
|
55
|
-
settings = action_func._action_settings
|
56
|
-
|
57
|
-
# Add the node
|
58
|
-
add_node(action_name, is_end=settings.terminates)
|
59
|
-
|
60
|
-
# Add connections based on 'next' settings
|
61
|
-
if settings.next:
|
62
|
-
source_id = get_node_id(action_name)
|
63
|
-
|
64
|
-
if isinstance(settings.next, str):
|
65
|
-
# Simple string case
|
66
|
-
target_id = get_node_id(settings.next)
|
67
|
-
add_node(settings.next)
|
68
|
-
mermaid_lines.append(f" {source_id} --> {target_id}")
|
69
|
-
|
70
|
-
elif isinstance(settings.next, list):
|
71
|
-
# List case - branches to multiple nodes
|
72
|
-
for next_action in settings.next:
|
73
|
-
if isinstance(next_action, str):
|
74
|
-
target_id = get_node_id(next_action)
|
75
|
-
add_node(next_action)
|
76
|
-
mermaid_lines.append(f" {source_id} --> {target_id}")
|
77
|
-
|
78
|
-
elif hasattr(settings.next, '__class__') and settings.next.__class__.__name__ == 'SelectionStrategy':
|
79
|
-
# SelectionStrategy case
|
80
|
-
if settings.next.actions:
|
81
|
-
# Show all possible paths with a decision diamond
|
82
|
-
decision_id = f"{source_id}_decision"
|
83
|
-
mermaid_lines.append(f" {source_id} --> {decision_id}{{LLM Selection}}")
|
84
|
-
|
85
|
-
for next_action in settings.next.actions:
|
86
|
-
target_id = get_node_id(next_action)
|
87
|
-
add_node(next_action)
|
88
|
-
mermaid_lines.append(f" {decision_id} --> {target_id}")
|
89
|
-
else:
|
90
|
-
# If no specific actions, it can go to any node
|
91
|
-
# For visualization, show connections to all non-start nodes
|
92
|
-
decision_id = f"{source_id}_decision"
|
93
|
-
mermaid_lines.append(f" {source_id} --> {decision_id}{{LLM Selection}}")
|
94
|
-
|
95
|
-
for other_action in graph._action_nodes:
|
96
|
-
if other_action != action_name and other_action != graph._start_action_name:
|
97
|
-
target_id = get_node_id(other_action)
|
98
|
-
add_node(other_action)
|
99
|
-
mermaid_lines.append(f" {decision_id} -.-> {target_id}")
|
100
|
-
|
101
|
-
# If start node has no explicit next, but there are other nodes, show possible connections
|
102
|
-
start_func = getattr(graph, graph._start_action_name, None)
|
103
|
-
if start_func and hasattr(start_func, '_action_settings'):
|
104
|
-
if not start_func._action_settings.next and len(graph._action_nodes) > 1:
|
105
|
-
source_id = get_node_id(graph._start_action_name)
|
106
|
-
# Find end nodes (terminates=True) to connect to
|
107
|
-
for action_name in graph._action_nodes:
|
108
|
-
if action_name != graph._start_action_name:
|
109
|
-
action_func = getattr(graph, action_name, None)
|
110
|
-
if action_func and hasattr(action_func, '_action_settings'):
|
111
|
-
if action_func._action_settings.terminates:
|
112
|
-
target_id = get_node_id(action_name)
|
113
|
-
add_node(action_name, is_end=True)
|
114
|
-
mermaid_lines.append(f" {source_id} --> {target_id}")
|
115
|
-
|
116
|
-
# Join all lines
|
117
|
-
mermaid_code = "\n".join(mermaid_lines)
|
118
|
-
|
119
|
-
# Render the mermaid diagram and save it
|
120
|
-
try:
|
121
|
-
import subprocess
|
122
|
-
import tempfile
|
123
|
-
import os
|
124
|
-
import shutil
|
125
|
-
|
126
|
-
# Check if mmdc (mermaid CLI) is available
|
127
|
-
if shutil.which('mmdc') is None:
|
128
|
-
raise FileNotFoundError("mermaid-cli (mmdc) not found. Install with: npm install -g @mermaid-js/mermaid-cli")
|
129
|
-
|
130
|
-
# Create a temporary mermaid file
|
131
|
-
with tempfile.NamedTemporaryFile(mode='w', suffix='.mmd', delete=False) as temp_file:
|
132
|
-
temp_file.write(mermaid_code)
|
133
|
-
temp_mmd_path = temp_file.name
|
134
|
-
|
135
|
-
try:
|
136
|
-
# Determine output format from filename extension
|
137
|
-
output_format = 'png' # default
|
138
|
-
if filename.lower().endswith('.svg'):
|
139
|
-
output_format = 'svg'
|
140
|
-
elif filename.lower().endswith('.pdf'):
|
141
|
-
output_format = 'pdf'
|
142
|
-
|
143
|
-
# Use mermaid CLI to render the diagram
|
144
|
-
cmd = ['mmdc', '-i', temp_mmd_path, '-o', filename]
|
145
|
-
|
146
|
-
# Add format flag only if not PNG (PNG is default)
|
147
|
-
if output_format != 'png':
|
148
|
-
cmd.extend(['-f', output_format])
|
149
|
-
|
150
|
-
# Add theme and background color
|
151
|
-
cmd.extend(['-t', 'default', '-b', 'transparent'])
|
152
|
-
|
153
|
-
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
154
|
-
|
155
|
-
if result.returncode == 0:
|
156
|
-
print(f"Graph visualization saved to: {filename}")
|
157
|
-
else:
|
158
|
-
raise subprocess.CalledProcessError(result.returncode, result.args, result.stderr)
|
159
|
-
|
160
|
-
finally:
|
161
|
-
# Clean up temporary file
|
162
|
-
if os.path.exists(temp_mmd_path):
|
163
|
-
os.unlink(temp_mmd_path)
|
164
|
-
|
165
|
-
except FileNotFoundError as e:
|
166
|
-
# Provide helpful error message for missing mermaid CLI
|
167
|
-
print(f"Warning: {e}")
|
168
|
-
# Save as .mmd file instead
|
169
|
-
mmd_filename = filename.rsplit('.', 1)[0] + '.mmd'
|
170
|
-
with open(mmd_filename, "w") as f:
|
171
|
-
f.write(mermaid_code)
|
172
|
-
print(f"Mermaid code saved to: {mmd_filename}")
|
173
|
-
print("To render as PNG, install mermaid-cli: npm install -g @mermaid-js/mermaid-cli")
|
174
|
-
|
175
|
-
except subprocess.CalledProcessError as e:
|
176
|
-
# Handle mermaid CLI errors
|
177
|
-
print(f"Error rendering mermaid diagram: {e.stderr if e.stderr else str(e)}")
|
178
|
-
# Save as .mmd file as fallback
|
179
|
-
mmd_filename = filename.rsplit('.', 1)[0] + '.mmd'
|
180
|
-
with open(mmd_filename, "w") as f:
|
181
|
-
f.write(mermaid_code)
|
182
|
-
print(f"Mermaid code saved to: {mmd_filename} (rendering failed)")
|
183
|
-
|
184
|
-
except Exception as e:
|
185
|
-
# General fallback: save the mermaid code
|
186
|
-
print(f"Unexpected error: {e}")
|
187
|
-
mmd_filename = filename.rsplit('.', 1)[0] + '.mmd'
|
188
|
-
with open(mmd_filename, "w") as f:
|
189
|
-
f.write(mermaid_code)
|
190
|
-
print(f"Mermaid code saved to: {mmd_filename}")
|