hammad-python 0.0.30__py3-none-any.whl → 0.0.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ham/__init__.py +10 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
- hammad_python-0.0.31.dist-info/RECORD +6 -0
- hammad/__init__.py +0 -84
- hammad/_internal.py +0 -256
- hammad/_main.py +0 -226
- hammad/cache/__init__.py +0 -40
- hammad/cache/base_cache.py +0 -181
- hammad/cache/cache.py +0 -169
- hammad/cache/decorators.py +0 -261
- hammad/cache/file_cache.py +0 -80
- hammad/cache/ttl_cache.py +0 -74
- hammad/cli/__init__.py +0 -33
- hammad/cli/animations.py +0 -573
- hammad/cli/plugins.py +0 -867
- hammad/cli/styles/__init__.py +0 -55
- hammad/cli/styles/settings.py +0 -139
- hammad/cli/styles/types.py +0 -358
- hammad/cli/styles/utils.py +0 -634
- hammad/data/__init__.py +0 -90
- hammad/data/collections/__init__.py +0 -49
- hammad/data/collections/collection.py +0 -326
- hammad/data/collections/indexes/__init__.py +0 -37
- hammad/data/collections/indexes/qdrant/__init__.py +0 -1
- hammad/data/collections/indexes/qdrant/index.py +0 -723
- hammad/data/collections/indexes/qdrant/settings.py +0 -94
- hammad/data/collections/indexes/qdrant/utils.py +0 -210
- hammad/data/collections/indexes/tantivy/__init__.py +0 -1
- hammad/data/collections/indexes/tantivy/index.py +0 -426
- hammad/data/collections/indexes/tantivy/settings.py +0 -40
- hammad/data/collections/indexes/tantivy/utils.py +0 -176
- hammad/data/configurations/__init__.py +0 -35
- hammad/data/configurations/configuration.py +0 -564
- hammad/data/models/__init__.py +0 -50
- hammad/data/models/extensions/__init__.py +0 -4
- hammad/data/models/extensions/pydantic/__init__.py +0 -42
- hammad/data/models/extensions/pydantic/converters.py +0 -759
- hammad/data/models/fields.py +0 -546
- hammad/data/models/model.py +0 -1078
- hammad/data/models/utils.py +0 -280
- hammad/data/sql/__init__.py +0 -24
- hammad/data/sql/database.py +0 -576
- hammad/data/sql/types.py +0 -127
- hammad/data/types/__init__.py +0 -75
- hammad/data/types/file.py +0 -431
- hammad/data/types/multimodal/__init__.py +0 -36
- hammad/data/types/multimodal/audio.py +0 -200
- hammad/data/types/multimodal/image.py +0 -182
- hammad/data/types/text.py +0 -1308
- hammad/formatting/__init__.py +0 -33
- hammad/formatting/json/__init__.py +0 -27
- hammad/formatting/json/converters.py +0 -158
- hammad/formatting/text/__init__.py +0 -63
- hammad/formatting/text/converters.py +0 -723
- hammad/formatting/text/markdown.py +0 -131
- hammad/formatting/yaml/__init__.py +0 -26
- hammad/formatting/yaml/converters.py +0 -5
- hammad/genai/__init__.py +0 -217
- hammad/genai/a2a/__init__.py +0 -32
- hammad/genai/a2a/workers.py +0 -552
- hammad/genai/agents/__init__.py +0 -59
- hammad/genai/agents/agent.py +0 -1973
- hammad/genai/agents/run.py +0 -1024
- hammad/genai/agents/types/__init__.py +0 -42
- hammad/genai/agents/types/agent_context.py +0 -13
- hammad/genai/agents/types/agent_event.py +0 -128
- hammad/genai/agents/types/agent_hooks.py +0 -220
- hammad/genai/agents/types/agent_messages.py +0 -31
- hammad/genai/agents/types/agent_response.py +0 -125
- hammad/genai/agents/types/agent_stream.py +0 -327
- hammad/genai/graphs/__init__.py +0 -125
- hammad/genai/graphs/_utils.py +0 -190
- hammad/genai/graphs/base.py +0 -1828
- hammad/genai/graphs/plugins.py +0 -316
- hammad/genai/graphs/types.py +0 -638
- hammad/genai/models/__init__.py +0 -1
- hammad/genai/models/embeddings/__init__.py +0 -43
- hammad/genai/models/embeddings/model.py +0 -226
- hammad/genai/models/embeddings/run.py +0 -163
- hammad/genai/models/embeddings/types/__init__.py +0 -37
- hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
- hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
- hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
- hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
- hammad/genai/models/language/__init__.py +0 -57
- hammad/genai/models/language/model.py +0 -1098
- hammad/genai/models/language/run.py +0 -878
- hammad/genai/models/language/types/__init__.py +0 -40
- hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
- hammad/genai/models/language/types/language_model_messages.py +0 -28
- hammad/genai/models/language/types/language_model_name.py +0 -239
- hammad/genai/models/language/types/language_model_request.py +0 -127
- hammad/genai/models/language/types/language_model_response.py +0 -217
- hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
- hammad/genai/models/language/types/language_model_settings.py +0 -89
- hammad/genai/models/language/types/language_model_stream.py +0 -600
- hammad/genai/models/language/utils/__init__.py +0 -28
- hammad/genai/models/language/utils/requests.py +0 -421
- hammad/genai/models/language/utils/structured_outputs.py +0 -135
- hammad/genai/models/model_provider.py +0 -4
- hammad/genai/models/multimodal.py +0 -47
- hammad/genai/models/reranking.py +0 -26
- hammad/genai/types/__init__.py +0 -1
- hammad/genai/types/base.py +0 -215
- hammad/genai/types/history.py +0 -290
- hammad/genai/types/tools.py +0 -507
- hammad/logging/__init__.py +0 -35
- hammad/logging/decorators.py +0 -834
- hammad/logging/logger.py +0 -1018
- hammad/mcp/__init__.py +0 -53
- hammad/mcp/client/__init__.py +0 -35
- hammad/mcp/client/client.py +0 -624
- hammad/mcp/client/client_service.py +0 -400
- hammad/mcp/client/settings.py +0 -178
- hammad/mcp/servers/__init__.py +0 -26
- hammad/mcp/servers/launcher.py +0 -1161
- hammad/runtime/__init__.py +0 -32
- hammad/runtime/decorators.py +0 -142
- hammad/runtime/run.py +0 -299
- hammad/service/__init__.py +0 -49
- hammad/service/create.py +0 -527
- hammad/service/decorators.py +0 -283
- hammad/types.py +0 -288
- hammad/typing/__init__.py +0 -435
- hammad/web/__init__.py +0 -43
- hammad/web/http/__init__.py +0 -1
- hammad/web/http/client.py +0 -944
- hammad/web/models.py +0 -275
- hammad/web/openapi/__init__.py +0 -1
- hammad/web/openapi/client.py +0 -740
- hammad/web/search/__init__.py +0 -1
- hammad/web/search/client.py +0 -1023
- hammad/web/utils.py +0 -472
- hammad_python-0.0.30.dist-info/RECORD +0 -135
- {hammad → ham}/py.typed +0 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
hammad/genai/graphs/base.py
DELETED
@@ -1,1828 +0,0 @@
|
|
1
|
-
"""hammad.genai.graphs.base"""
|
2
|
-
|
3
|
-
from typing import (
|
4
|
-
Any,
|
5
|
-
Dict,
|
6
|
-
List,
|
7
|
-
Optional,
|
8
|
-
Type,
|
9
|
-
TypeVar,
|
10
|
-
Generic,
|
11
|
-
Union,
|
12
|
-
Callable,
|
13
|
-
get_type_hints,
|
14
|
-
ParamSpec,
|
15
|
-
TypeAlias,
|
16
|
-
Awaitable,
|
17
|
-
TYPE_CHECKING,
|
18
|
-
)
|
19
|
-
from typing_extensions import Literal
|
20
|
-
from dataclasses import dataclass, field
|
21
|
-
import inspect
|
22
|
-
from functools import wraps
|
23
|
-
import asyncio
|
24
|
-
|
25
|
-
from pydantic_graph import BaseNode, End, Graph as PydanticGraph, GraphRunContext
|
26
|
-
from ..models.language.utils import (
|
27
|
-
parse_messages_input,
|
28
|
-
consolidate_system_messages,
|
29
|
-
)
|
30
|
-
from ...formatting.text import convert_to_text
|
31
|
-
|
32
|
-
from ..agents.agent import Agent
|
33
|
-
from ..agents.types.agent_response import AgentResponse
|
34
|
-
from ..agents.types.agent_messages import AgentMessages
|
35
|
-
from ..models.language.model import LanguageModel
|
36
|
-
from ..models.language.types.language_model_name import LanguageModelName
|
37
|
-
from .types import (
|
38
|
-
GraphContext,
|
39
|
-
GraphResponse,
|
40
|
-
GraphStream,
|
41
|
-
GraphResponseChunk,
|
42
|
-
GraphState,
|
43
|
-
BasePlugin,
|
44
|
-
ActionSettings,
|
45
|
-
GraphHistoryEntry,
|
46
|
-
)
|
47
|
-
from ._utils import visualize_base_graph
|
48
|
-
|
49
|
-
if TYPE_CHECKING:
|
50
|
-
try:
|
51
|
-
from fasta2a import FastA2A
|
52
|
-
except ImportError:
|
53
|
-
FastA2A: TypeAlias = Any
|
54
|
-
|
55
|
-
__all__ = [
|
56
|
-
"BaseGraph",
|
57
|
-
"action",
|
58
|
-
"ActionNode",
|
59
|
-
"GraphBuilder",
|
60
|
-
"GraphStream",
|
61
|
-
"GraphResponseChunk",
|
62
|
-
"select",
|
63
|
-
"SelectionStrategy",
|
64
|
-
]
|
65
|
-
|
66
|
-
T = TypeVar("T")
|
67
|
-
StateT = TypeVar("StateT")
|
68
|
-
P = ParamSpec("P")
|
69
|
-
|
70
|
-
|
71
|
-
class SelectionStrategy:
|
72
|
-
"""LLM-based selection strategy for choosing the next action."""
|
73
|
-
|
74
|
-
def __init__(
|
75
|
-
self,
|
76
|
-
*actions: str,
|
77
|
-
instructions: Optional[str] = None,
|
78
|
-
model: Optional[str] = None,
|
79
|
-
):
|
80
|
-
self.actions = list(actions)
|
81
|
-
self.instructions = instructions
|
82
|
-
self.model = model or "openai/gpt-4o-mini"
|
83
|
-
self._language_model = None
|
84
|
-
self._use_all_actions = (
|
85
|
-
len(actions) == 0
|
86
|
-
) # If no actions specified, use all available
|
87
|
-
|
88
|
-
def _get_language_model(self):
|
89
|
-
"""Lazy load the language model."""
|
90
|
-
if self._language_model is None:
|
91
|
-
from ..models.language.model import LanguageModel
|
92
|
-
|
93
|
-
self._language_model = LanguageModel(model=self.model)
|
94
|
-
return self._language_model
|
95
|
-
|
96
|
-
def select(self, context: Optional[Dict[str, Any]] = None) -> str:
|
97
|
-
"""Use LLM to select the most appropriate action."""
|
98
|
-
if not context:
|
99
|
-
context = {}
|
100
|
-
|
101
|
-
# Get available actions
|
102
|
-
actions_to_choose_from = self.actions
|
103
|
-
if self._use_all_actions and "all_actions" in context:
|
104
|
-
# Use all available actions from the graph
|
105
|
-
actions_to_choose_from = context["all_actions"]
|
106
|
-
|
107
|
-
if not actions_to_choose_from:
|
108
|
-
return ""
|
109
|
-
|
110
|
-
# If only one action, return it
|
111
|
-
if len(actions_to_choose_from) == 1:
|
112
|
-
return actions_to_choose_from[0]
|
113
|
-
|
114
|
-
# Import here to avoid circular imports
|
115
|
-
from pydantic import BaseModel, Field, create_model
|
116
|
-
from enum import Enum
|
117
|
-
|
118
|
-
# Create enum for available actions
|
119
|
-
ActionEnum = Enum(
|
120
|
-
"ActionEnum", {action: action for action in actions_to_choose_from}
|
121
|
-
)
|
122
|
-
|
123
|
-
# Create selection model
|
124
|
-
SelectionModel = create_model(
|
125
|
-
"ActionSelection",
|
126
|
-
action=(
|
127
|
-
ActionEnum,
|
128
|
-
Field(description="The selected action to execute next"),
|
129
|
-
),
|
130
|
-
reasoning=(str, Field(description="Brief reasoning for the selection")),
|
131
|
-
)
|
132
|
-
|
133
|
-
# Build context description
|
134
|
-
context_parts = []
|
135
|
-
|
136
|
-
# Add result from previous action
|
137
|
-
if "result" in context:
|
138
|
-
context_parts.append(f"Previous action result: {context['result']}")
|
139
|
-
|
140
|
-
# Add conversation history
|
141
|
-
if "messages" in context and context["messages"]:
|
142
|
-
# Get last few messages for context
|
143
|
-
recent_messages = context["messages"][-5:] # Last 5 messages
|
144
|
-
messages_str = "\n".join(
|
145
|
-
[
|
146
|
-
f"{msg.get('role', 'unknown')}: {msg.get('content', '')}"
|
147
|
-
for msg in recent_messages
|
148
|
-
]
|
149
|
-
)
|
150
|
-
context_parts.append(f"Recent conversation:\n{messages_str}")
|
151
|
-
|
152
|
-
# Add state information
|
153
|
-
if "state" in context and context["state"]:
|
154
|
-
context_parts.append(f"Current state: {context['state']}")
|
155
|
-
|
156
|
-
context_description = "\n\n".join(context_parts)
|
157
|
-
|
158
|
-
# Build selection prompt
|
159
|
-
base_instructions = f"""Based on the context below, select the most appropriate next action from the available options.
|
160
|
-
|
161
|
-
Available actions:
|
162
|
-
{", ".join(actions_to_choose_from)}
|
163
|
-
|
164
|
-
Context:
|
165
|
-
{context_description}
|
166
|
-
|
167
|
-
Consider the conversation flow, user's request, and any patterns in the conversation when making your selection.
|
168
|
-
For example, if the user asked to do something multiple times (e.g., "reason twice"), and you've only done it once, select that action again."""
|
169
|
-
|
170
|
-
# Add custom instructions if provided
|
171
|
-
if self.instructions:
|
172
|
-
base_instructions = (
|
173
|
-
f"{base_instructions}\n\nAdditional instructions:\n{self.instructions}"
|
174
|
-
)
|
175
|
-
|
176
|
-
# Get language model to make selection
|
177
|
-
try:
|
178
|
-
lm = self._get_language_model()
|
179
|
-
response = lm.run(
|
180
|
-
messages=[{"role": "user", "content": base_instructions}],
|
181
|
-
type=SelectionModel,
|
182
|
-
)
|
183
|
-
|
184
|
-
selected_action = response.output.action.value
|
185
|
-
|
186
|
-
# Validate the selection
|
187
|
-
if selected_action in actions_to_choose_from:
|
188
|
-
return selected_action
|
189
|
-
else:
|
190
|
-
# Fallback to first action if invalid selection
|
191
|
-
return actions_to_choose_from[0]
|
192
|
-
|
193
|
-
except Exception:
|
194
|
-
# Fallback to first action on any error
|
195
|
-
return actions_to_choose_from[0] if actions_to_choose_from else ""
|
196
|
-
|
197
|
-
def __repr__(self) -> str:
|
198
|
-
if self._use_all_actions:
|
199
|
-
return f"SelectionStrategy(all_actions)"
|
200
|
-
return f"SelectionStrategy({', '.join(repr(a) for a in self.actions)})"
|
201
|
-
|
202
|
-
def select(self, context: Optional[Dict[str, Any]] = None) -> str:
|
203
|
-
"""Use LLM to select the most appropriate action."""
|
204
|
-
if not context or not self.actions:
|
205
|
-
return self.actions[0] if self.actions else ""
|
206
|
-
|
207
|
-
# Import here to avoid circular imports
|
208
|
-
from pydantic import BaseModel, Field, create_model
|
209
|
-
from enum import Enum
|
210
|
-
|
211
|
-
# Create enum for available actions
|
212
|
-
ActionEnum = Enum("ActionEnum", {action: action for action in self.actions})
|
213
|
-
|
214
|
-
# Create selection model
|
215
|
-
SelectionModel = create_model(
|
216
|
-
"ActionSelection",
|
217
|
-
action=(
|
218
|
-
ActionEnum,
|
219
|
-
Field(description="The selected action to execute next"),
|
220
|
-
),
|
221
|
-
reasoning=(str, Field(description="Brief reasoning for the selection")),
|
222
|
-
)
|
223
|
-
|
224
|
-
# Build context description
|
225
|
-
context_parts = []
|
226
|
-
|
227
|
-
# Add result from previous action
|
228
|
-
if "result" in context:
|
229
|
-
context_parts.append(f"Previous action result: {context['result']}")
|
230
|
-
|
231
|
-
# Add conversation history
|
232
|
-
if "messages" in context and context["messages"]:
|
233
|
-
# Get last few messages for context
|
234
|
-
recent_messages = context["messages"][-5:] # Last 5 messages
|
235
|
-
messages_str = "\n".join(
|
236
|
-
[
|
237
|
-
f"{msg.get('role', 'unknown')}: {msg.get('content', '')}"
|
238
|
-
for msg in recent_messages
|
239
|
-
]
|
240
|
-
)
|
241
|
-
context_parts.append(f"Recent conversation:\n{messages_str}")
|
242
|
-
|
243
|
-
# Add state information
|
244
|
-
if "state" in context and context["state"]:
|
245
|
-
context_parts.append(f"Current state: {context['state']}")
|
246
|
-
|
247
|
-
context_description = "\n\n".join(context_parts)
|
248
|
-
|
249
|
-
# Build selection prompt
|
250
|
-
base_instructions = f"""Based on the context below, select the most appropriate next action from the available options.
|
251
|
-
|
252
|
-
Available actions:
|
253
|
-
{", ".join(self.actions)}
|
254
|
-
|
255
|
-
Context:
|
256
|
-
{context_description}
|
257
|
-
|
258
|
-
Consider the conversation flow and any specific instructions from the user when making your selection."""
|
259
|
-
|
260
|
-
# Add custom instructions if provided
|
261
|
-
if self.instructions:
|
262
|
-
base_instructions = (
|
263
|
-
f"{base_instructions}\n\nAdditional instructions:\n{self.instructions}"
|
264
|
-
)
|
265
|
-
|
266
|
-
# Get language model to make selection
|
267
|
-
try:
|
268
|
-
lm = self._get_language_model()
|
269
|
-
response = lm.run(
|
270
|
-
messages=[{"role": "user", "content": base_instructions}],
|
271
|
-
type=SelectionModel,
|
272
|
-
)
|
273
|
-
|
274
|
-
selected_action = response.output.action.value
|
275
|
-
|
276
|
-
# Validate the selection
|
277
|
-
if selected_action in self.actions:
|
278
|
-
return selected_action
|
279
|
-
else:
|
280
|
-
# Fallback to first action if invalid selection
|
281
|
-
return self.actions[0]
|
282
|
-
|
283
|
-
except Exception:
|
284
|
-
# Fallback to first action on any error
|
285
|
-
return self.actions[0] if self.actions else ""
|
286
|
-
|
287
|
-
|
288
|
-
def select(
|
289
|
-
*actions: str, instructions: Optional[str] = None, model: Optional[str] = None
|
290
|
-
) -> SelectionStrategy:
|
291
|
-
"""
|
292
|
-
Create an LLM-based selection strategy for choosing between multiple actions.
|
293
|
-
|
294
|
-
Args:
|
295
|
-
*actions: The action names to choose from. If empty, will select from all available actions.
|
296
|
-
instructions: Optional instructions for the LLM selection
|
297
|
-
model: Optional model to use for selection (defaults to gpt-4o-mini)
|
298
|
-
|
299
|
-
Returns:
|
300
|
-
A SelectionStrategy instance
|
301
|
-
|
302
|
-
Examples:
|
303
|
-
# Select between specific actions
|
304
|
-
@action(next=select("poem", "response"))
|
305
|
-
def reasoning(self, message: str) -> str:
|
306
|
-
...
|
307
|
-
|
308
|
-
# Select from all available actions in the graph
|
309
|
-
@action(next=select())
|
310
|
-
def reasoning(self, message: str) -> str:
|
311
|
-
...
|
312
|
-
|
313
|
-
# With custom instructions
|
314
|
-
@action(next=select("reasoning", "response",
|
315
|
-
instructions="If the user asked for multiple reasonings, select 'reasoning' again"))
|
316
|
-
def reasoning(self, message: str) -> str:
|
317
|
-
...
|
318
|
-
"""
|
319
|
-
return SelectionStrategy(*actions, instructions=instructions, model=model)
|
320
|
-
|
321
|
-
|
322
|
-
class ActionNode(BaseNode[StateT, None, Any]):
|
323
|
-
"""A pydantic-graph node that wraps a user-defined action function."""
|
324
|
-
|
325
|
-
def __init__(
|
326
|
-
self,
|
327
|
-
action_name: str,
|
328
|
-
action_func: Callable,
|
329
|
-
settings: ActionSettings,
|
330
|
-
**action_params: Any,
|
331
|
-
):
|
332
|
-
"""Initialize the action node with parameters."""
|
333
|
-
self.action_name = action_name
|
334
|
-
self.action_func = action_func
|
335
|
-
self.settings = settings
|
336
|
-
|
337
|
-
# Store action parameters as instance attributes for pydantic-graph
|
338
|
-
for param_name, param_value in action_params.items():
|
339
|
-
setattr(self, param_name, param_value)
|
340
|
-
|
341
|
-
async def run(self, ctx: GraphRunContext[StateT]) -> Union[BaseNode, End]:
|
342
|
-
"""Execute the action function using Agent/LanguageModel infrastructure."""
|
343
|
-
|
344
|
-
# Track this node's execution
|
345
|
-
execution_tracker = getattr(self, "_execution_tracker", [])
|
346
|
-
execution_tracker.append(self.action_name)
|
347
|
-
|
348
|
-
# Create enhanced context that wraps pydantic-graph context
|
349
|
-
enhanced_ctx = GraphContext(
|
350
|
-
pydantic_context=ctx,
|
351
|
-
plugins=[], # Will be populated by BaseGraph
|
352
|
-
history=[],
|
353
|
-
metadata={},
|
354
|
-
)
|
355
|
-
|
356
|
-
# Extract action parameters from self
|
357
|
-
action_params = {}
|
358
|
-
sig = inspect.signature(self.action_func)
|
359
|
-
for param_name in sig.parameters:
|
360
|
-
if param_name not in ("self", "ctx", "context", "agent", "language_model"):
|
361
|
-
if hasattr(self, param_name):
|
362
|
-
action_params[param_name] = getattr(self, param_name)
|
363
|
-
|
364
|
-
# Get the docstring from the action function to use as field-level instructions
|
365
|
-
field_instructions = self.action_func.__doc__ or ""
|
366
|
-
|
367
|
-
# Get the global system prompt from the graph class docstring
|
368
|
-
global_system_prompt = ""
|
369
|
-
if hasattr(self, "_graph_docstring"):
|
370
|
-
global_system_prompt = self._graph_docstring
|
371
|
-
|
372
|
-
# Get state from the context if available
|
373
|
-
current_state = None
|
374
|
-
if hasattr(ctx, "state") and ctx.state is not None:
|
375
|
-
current_state = ctx.state
|
376
|
-
elif hasattr(self, "_state"):
|
377
|
-
current_state = getattr(self, "_state", None)
|
378
|
-
|
379
|
-
# Check if the action function expects to handle the language model itself
|
380
|
-
expects_language_model = (
|
381
|
-
"language_model" in sig.parameters or "agent" in sig.parameters
|
382
|
-
)
|
383
|
-
|
384
|
-
if expects_language_model:
|
385
|
-
# Legacy mode: action function expects to handle language model
|
386
|
-
# Combine global system prompt with field-level instructions and state
|
387
|
-
combined_instructions = global_system_prompt
|
388
|
-
if field_instructions and field_instructions not in combined_instructions:
|
389
|
-
if combined_instructions:
|
390
|
-
combined_instructions += f"\n\n{field_instructions}"
|
391
|
-
else:
|
392
|
-
combined_instructions = field_instructions
|
393
|
-
|
394
|
-
# Add state to instructions if available
|
395
|
-
if current_state is not None:
|
396
|
-
state_str = convert_to_text(current_state, show_defaults=False)
|
397
|
-
if state_str:
|
398
|
-
combined_instructions += f"\n\nState: {state_str}"
|
399
|
-
|
400
|
-
# Get verbose/debug flags and language model kwargs from the node
|
401
|
-
verbose = getattr(self, "_verbose", self.settings.verbose)
|
402
|
-
debug = getattr(self, "_debug", self.settings.debug)
|
403
|
-
language_model_kwargs = getattr(self, "_language_model_kwargs", {})
|
404
|
-
|
405
|
-
# Get end strategy parameters from node or settings
|
406
|
-
max_steps = getattr(self, "_max_steps", self.settings.max_steps)
|
407
|
-
end_strategy = getattr(self, "_end_strategy", self.settings.end_strategy)
|
408
|
-
end_tool = getattr(self, "_end_tool", self.settings.end_tool)
|
409
|
-
|
410
|
-
if self.settings.tools or self.settings.instructions:
|
411
|
-
# Get model from settings, then language_model_kwargs, then default
|
412
|
-
model = self.settings.model or language_model_kwargs.get(
|
413
|
-
"model", "openai/gpt-4o-mini"
|
414
|
-
)
|
415
|
-
|
416
|
-
# Remove parameters that will be passed explicitly to avoid duplicates
|
417
|
-
filtered_kwargs = {
|
418
|
-
k: v
|
419
|
-
for k, v in language_model_kwargs.items()
|
420
|
-
if k
|
421
|
-
not in [
|
422
|
-
"model",
|
423
|
-
"name",
|
424
|
-
"instructions",
|
425
|
-
"tools",
|
426
|
-
"max_steps",
|
427
|
-
"end_strategy",
|
428
|
-
"end_tool",
|
429
|
-
"verbose",
|
430
|
-
"debug",
|
431
|
-
]
|
432
|
-
}
|
433
|
-
|
434
|
-
agent = Agent(
|
435
|
-
name=self.settings.name or self.action_name,
|
436
|
-
instructions=self.settings.instructions or combined_instructions,
|
437
|
-
model=model,
|
438
|
-
tools=self.settings.tools,
|
439
|
-
max_steps=max_steps,
|
440
|
-
end_strategy=end_strategy,
|
441
|
-
end_tool=end_tool,
|
442
|
-
verbose=verbose,
|
443
|
-
debug=debug,
|
444
|
-
**filtered_kwargs,
|
445
|
-
)
|
446
|
-
# Pass history to context if available
|
447
|
-
history = getattr(self, "_history", None)
|
448
|
-
if history:
|
449
|
-
enhanced_ctx.metadata["history"] = history
|
450
|
-
|
451
|
-
if asyncio.iscoroutinefunction(self.action_func):
|
452
|
-
result = await self.action_func(
|
453
|
-
enhanced_ctx, agent, **action_params
|
454
|
-
)
|
455
|
-
else:
|
456
|
-
result = self.action_func(enhanced_ctx, agent, **action_params)
|
457
|
-
else:
|
458
|
-
# Get model from settings, then language_model_kwargs, then default
|
459
|
-
model = self.settings.model or language_model_kwargs.get(
|
460
|
-
"model", "openai/gpt-4o-mini"
|
461
|
-
)
|
462
|
-
|
463
|
-
# Remove parameters that will be passed explicitly to avoid duplicates
|
464
|
-
filtered_kwargs = {
|
465
|
-
k: v
|
466
|
-
for k, v in language_model_kwargs.items()
|
467
|
-
if k not in ["model", "verbose", "debug"]
|
468
|
-
}
|
469
|
-
|
470
|
-
language_model = LanguageModel(
|
471
|
-
model=model,
|
472
|
-
verbose=verbose,
|
473
|
-
debug=debug,
|
474
|
-
**filtered_kwargs,
|
475
|
-
)
|
476
|
-
# Pass history to context if available
|
477
|
-
history = getattr(self, "_history", None)
|
478
|
-
if history:
|
479
|
-
enhanced_ctx.metadata["history"] = history
|
480
|
-
|
481
|
-
if asyncio.iscoroutinefunction(self.action_func):
|
482
|
-
result = await self.action_func(
|
483
|
-
enhanced_ctx, language_model, **action_params
|
484
|
-
)
|
485
|
-
else:
|
486
|
-
result = self.action_func(
|
487
|
-
enhanced_ctx, language_model, **action_params
|
488
|
-
)
|
489
|
-
else:
|
490
|
-
# New mode: framework handles language model internally
|
491
|
-
# Build the user message from the action parameters
|
492
|
-
user_message = ""
|
493
|
-
if action_params:
|
494
|
-
if len(action_params) == 1:
|
495
|
-
# Single parameter - use its value directly
|
496
|
-
param_value = list(action_params.values())[0]
|
497
|
-
user_message = str(param_value)
|
498
|
-
else:
|
499
|
-
# Multiple parameters - format them clearly
|
500
|
-
param_list = "\n".join(
|
501
|
-
f"{k}: {v}" for k, v in action_params.items()
|
502
|
-
)
|
503
|
-
user_message = param_list
|
504
|
-
else:
|
505
|
-
# No parameters - check if we have previous conversation history
|
506
|
-
# If we do, don't add an empty user message
|
507
|
-
user_message = ""
|
508
|
-
|
509
|
-
# Combine global system prompt with field-level instructions and state
|
510
|
-
combined_instructions = global_system_prompt
|
511
|
-
if field_instructions and field_instructions not in combined_instructions:
|
512
|
-
if combined_instructions:
|
513
|
-
combined_instructions += f"\n\n{field_instructions}"
|
514
|
-
else:
|
515
|
-
combined_instructions = field_instructions
|
516
|
-
|
517
|
-
# Add state to instructions if available
|
518
|
-
if current_state is not None:
|
519
|
-
state_str = convert_to_text(current_state, show_defaults=False)
|
520
|
-
if state_str:
|
521
|
-
combined_instructions += f"\n\nContext: {state_str}"
|
522
|
-
|
523
|
-
# Get verbose/debug flags and language model kwargs from the node
|
524
|
-
verbose = getattr(self, "_verbose", self.settings.verbose)
|
525
|
-
debug = getattr(self, "_debug", self.settings.debug)
|
526
|
-
language_model_kwargs = getattr(self, "_language_model_kwargs", {})
|
527
|
-
|
528
|
-
# Get end strategy parameters from node or settings
|
529
|
-
max_steps = getattr(self, "_max_steps", self.settings.max_steps)
|
530
|
-
end_strategy = getattr(self, "_end_strategy", self.settings.end_strategy)
|
531
|
-
end_tool = getattr(self, "_end_tool", self.settings.end_tool)
|
532
|
-
|
533
|
-
# Determine if we need to use Agent or LanguageModel
|
534
|
-
if self.settings.tools or self.settings.instructions:
|
535
|
-
# Use Agent for complex operations with tools/instructions
|
536
|
-
# Get model from settings, then language_model_kwargs, then default
|
537
|
-
model = self.settings.model or language_model_kwargs.get(
|
538
|
-
"model", "openai/gpt-4o-mini"
|
539
|
-
)
|
540
|
-
|
541
|
-
# Remove parameters that will be passed explicitly to avoid duplicates
|
542
|
-
filtered_kwargs = {
|
543
|
-
k: v
|
544
|
-
for k, v in language_model_kwargs.items()
|
545
|
-
if k
|
546
|
-
not in [
|
547
|
-
"model",
|
548
|
-
"name",
|
549
|
-
"instructions",
|
550
|
-
"tools",
|
551
|
-
"max_steps",
|
552
|
-
"end_strategy",
|
553
|
-
"end_tool",
|
554
|
-
"verbose",
|
555
|
-
"debug",
|
556
|
-
]
|
557
|
-
}
|
558
|
-
|
559
|
-
agent = Agent(
|
560
|
-
name=self.settings.name or self.action_name,
|
561
|
-
instructions=self.settings.instructions or combined_instructions,
|
562
|
-
model=model,
|
563
|
-
tools=self.settings.tools,
|
564
|
-
max_steps=max_steps,
|
565
|
-
end_strategy=end_strategy,
|
566
|
-
end_tool=end_tool,
|
567
|
-
verbose=verbose,
|
568
|
-
debug=debug,
|
569
|
-
**filtered_kwargs,
|
570
|
-
)
|
571
|
-
|
572
|
-
# Get history if available
|
573
|
-
history = getattr(self, "_history", None)
|
574
|
-
|
575
|
-
# Check if we have previous conversation history from the graph execution
|
576
|
-
previous_messages = getattr(self, "_graph_messages", [])
|
577
|
-
|
578
|
-
# Store the current user message for history building
|
579
|
-
if user_message:
|
580
|
-
self._current_user_message = user_message
|
581
|
-
|
582
|
-
# Run the agent with the user message and history
|
583
|
-
if history:
|
584
|
-
# If history is provided, we need to combine it with the user message
|
585
|
-
# The history should be the conversation context, and user_message is the new input
|
586
|
-
combined_messages = parse_messages_input(history)
|
587
|
-
combined_messages.extend(previous_messages)
|
588
|
-
if user_message: # Only add non-empty user messages
|
589
|
-
combined_messages.append(
|
590
|
-
{"role": "user", "content": user_message}
|
591
|
-
)
|
592
|
-
agent_result = await agent.async_run(combined_messages)
|
593
|
-
elif previous_messages:
|
594
|
-
# If we have previous messages from the graph, use them
|
595
|
-
combined_messages = previous_messages.copy()
|
596
|
-
if user_message: # Only add non-empty user messages
|
597
|
-
combined_messages.append(
|
598
|
-
{"role": "user", "content": user_message}
|
599
|
-
)
|
600
|
-
agent_result = await agent.async_run(combined_messages)
|
601
|
-
else:
|
602
|
-
# Only run with user message if it's not empty
|
603
|
-
if user_message:
|
604
|
-
agent_result = await agent.async_run(user_message)
|
605
|
-
else:
|
606
|
-
# If no user message and no history, we can't run the agent
|
607
|
-
raise ValueError(
|
608
|
-
"No user message or history provided for agent execution"
|
609
|
-
)
|
610
|
-
result = agent_result.output
|
611
|
-
else:
|
612
|
-
# Use LanguageModel for simple operations
|
613
|
-
# Get model from settings, then language_model_kwargs, then default
|
614
|
-
model = self.settings.model or language_model_kwargs.get(
|
615
|
-
"model", "openai/gpt-4o-mini"
|
616
|
-
)
|
617
|
-
|
618
|
-
# Remove parameters that will be passed explicitly to avoid duplicates
|
619
|
-
filtered_kwargs = {
|
620
|
-
k: v
|
621
|
-
for k, v in language_model_kwargs.items()
|
622
|
-
if k not in ["model", "verbose", "debug"]
|
623
|
-
}
|
624
|
-
|
625
|
-
language_model = LanguageModel(
|
626
|
-
model=model,
|
627
|
-
verbose=verbose,
|
628
|
-
debug=debug,
|
629
|
-
**filtered_kwargs,
|
630
|
-
)
|
631
|
-
|
632
|
-
# Get history if available
|
633
|
-
history = getattr(self, "_history", None)
|
634
|
-
|
635
|
-
# Check if we have previous conversation history from the graph execution
|
636
|
-
previous_messages = getattr(self, "_graph_messages", [])
|
637
|
-
|
638
|
-
# Create messages using the language model utils
|
639
|
-
if history:
|
640
|
-
# If history is provided, use it as the base messages
|
641
|
-
messages = parse_messages_input(
|
642
|
-
history, instructions=combined_instructions
|
643
|
-
)
|
644
|
-
# Add any previous graph messages
|
645
|
-
messages.extend(previous_messages)
|
646
|
-
# Then add the user message from action parameters
|
647
|
-
if user_message: # Only add non-empty user messages
|
648
|
-
messages.append({"role": "user", "content": user_message})
|
649
|
-
elif previous_messages:
|
650
|
-
# If we have previous messages from the graph, use them
|
651
|
-
messages = parse_messages_input(
|
652
|
-
"", instructions=combined_instructions
|
653
|
-
)
|
654
|
-
messages.extend(previous_messages)
|
655
|
-
if user_message: # Only add non-empty user messages
|
656
|
-
messages.append({"role": "user", "content": user_message})
|
657
|
-
else:
|
658
|
-
# Otherwise, use the user message (if not empty)
|
659
|
-
if user_message:
|
660
|
-
messages = parse_messages_input(
|
661
|
-
user_message, instructions=combined_instructions
|
662
|
-
)
|
663
|
-
else:
|
664
|
-
# If no user message and no history, just use instructions
|
665
|
-
messages = parse_messages_input(
|
666
|
-
"", instructions=combined_instructions
|
667
|
-
)
|
668
|
-
messages = consolidate_system_messages(messages)
|
669
|
-
|
670
|
-
# Store the current user message for history building
|
671
|
-
if user_message:
|
672
|
-
self._current_user_message = user_message
|
673
|
-
|
674
|
-
# Run the language model with the consolidated messages
|
675
|
-
lm_result = await language_model.async_run(messages)
|
676
|
-
result = lm_result.output
|
677
|
-
|
678
|
-
# Get the return type annotation to determine expected output type
|
679
|
-
return_type = sig.return_annotation
|
680
|
-
if return_type != inspect.Parameter.empty and return_type != str:
|
681
|
-
# If the action expects a specific return type, try to parse it
|
682
|
-
# For now, we'll just return the string result
|
683
|
-
# In a full implementation, we'd use structured output parsing
|
684
|
-
pass
|
685
|
-
|
686
|
-
# Handle the result based on settings
|
687
|
-
if isinstance(result, (BaseNode, End)):
|
688
|
-
return result
|
689
|
-
elif self.settings.terminates:
|
690
|
-
return End(result)
|
691
|
-
else:
|
692
|
-
# Check if there's a next action defined
|
693
|
-
if self.settings.next:
|
694
|
-
# Handle different types of next specifications
|
695
|
-
next_action_name = None
|
696
|
-
|
697
|
-
if isinstance(self.settings.next, str):
|
698
|
-
# Simple string case
|
699
|
-
next_action_name = self.settings.next
|
700
|
-
elif isinstance(self.settings.next, list):
|
701
|
-
# List case - for now, just pick the first one
|
702
|
-
# In the future, this could execute all in parallel
|
703
|
-
if self.settings.next:
|
704
|
-
next_action_name = self.settings.next[0]
|
705
|
-
elif isinstance(self.settings.next, SelectionStrategy):
|
706
|
-
# Selection strategy case - use the strategy to pick an action
|
707
|
-
context = {
|
708
|
-
"result": result,
|
709
|
-
"state": getattr(self, "_state", None),
|
710
|
-
"messages": getattr(self, "_graph_messages", []),
|
711
|
-
}
|
712
|
-
# If using all actions, pass them in the context
|
713
|
-
if self.settings.next._use_all_actions and hasattr(
|
714
|
-
self, "_graph_action_nodes"
|
715
|
-
):
|
716
|
-
context["all_actions"] = list(self._graph_action_nodes.keys())
|
717
|
-
next_action_name = self.settings.next.select(context)
|
718
|
-
else:
|
719
|
-
# Invalid type for next
|
720
|
-
return End(result)
|
721
|
-
|
722
|
-
# Find the next node class from the graph's action nodes
|
723
|
-
if hasattr(self, "_graph_action_nodes") and next_action_name:
|
724
|
-
next_node_class = self._graph_action_nodes.get(next_action_name)
|
725
|
-
if next_node_class:
|
726
|
-
# Create the next node instance
|
727
|
-
# For graph flow, we don't pass the result as a parameter
|
728
|
-
# The conversation history will contain the context
|
729
|
-
next_node = next_node_class()
|
730
|
-
|
731
|
-
# Copy over any graph-specific attributes
|
732
|
-
for attr in [
|
733
|
-
"_graph_docstring",
|
734
|
-
"_verbose",
|
735
|
-
"_debug",
|
736
|
-
"_language_model_kwargs",
|
737
|
-
"_history",
|
738
|
-
"_state",
|
739
|
-
"_graph_action_nodes",
|
740
|
-
"_execution_tracker",
|
741
|
-
]:
|
742
|
-
if hasattr(self, attr):
|
743
|
-
setattr(next_node, attr, getattr(self, attr))
|
744
|
-
|
745
|
-
# Build up the conversation history for the next node
|
746
|
-
current_messages = getattr(self, "_graph_messages", [])
|
747
|
-
# Add the current interaction to the conversation history
|
748
|
-
# Only add the user message if it was actually provided (not empty)
|
749
|
-
if (
|
750
|
-
hasattr(self, "_current_user_message")
|
751
|
-
and self._current_user_message
|
752
|
-
):
|
753
|
-
current_messages.append(
|
754
|
-
{"role": "user", "content": self._current_user_message}
|
755
|
-
)
|
756
|
-
# Add the assistant response from this node
|
757
|
-
current_messages.append(
|
758
|
-
{"role": "assistant", "content": str(result)}
|
759
|
-
)
|
760
|
-
next_node._graph_messages = current_messages
|
761
|
-
|
762
|
-
return next_node
|
763
|
-
|
764
|
-
# If we can't find any valid next node, terminate
|
765
|
-
return End(result)
|
766
|
-
else:
|
767
|
-
# No next action defined, terminate
|
768
|
-
return End(result)
|
769
|
-
|
770
|
-
|
771
|
-
class ActionDecorator:
|
772
|
-
"""Decorator for creating actions that become nodes in the graph."""
|
773
|
-
|
774
|
-
def __init__(self):
|
775
|
-
self._actions: Dict[str, Type[ActionNode]] = {}
|
776
|
-
self._start_action: Optional[str] = None
|
777
|
-
|
778
|
-
def __call__(
|
779
|
-
self,
|
780
|
-
func: Optional[Callable] = None,
|
781
|
-
*,
|
782
|
-
model: Optional[LanguageModelName | str] = None,
|
783
|
-
temperature: Optional[float] = None,
|
784
|
-
max_tokens: Optional[int] = None,
|
785
|
-
tools: Optional[List[Callable]] = None,
|
786
|
-
start: bool = False,
|
787
|
-
terminates: bool = False,
|
788
|
-
xml: Optional[str] = None,
|
789
|
-
next: Optional[Union[str, List[str], SelectionStrategy]] = None,
|
790
|
-
read_history: bool = False,
|
791
|
-
persist_history: bool = False,
|
792
|
-
condition: Optional[str] = None,
|
793
|
-
name: Optional[str] = None,
|
794
|
-
instructions: Optional[str] = None,
|
795
|
-
verbose: bool = False,
|
796
|
-
debug: bool = False,
|
797
|
-
# Agent end strategy parameters
|
798
|
-
max_steps: Optional[int] = None,
|
799
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
800
|
-
end_tool: Optional[Callable] = None,
|
801
|
-
**kwargs: Any,
|
802
|
-
) -> Union[Callable, Type[ActionNode]]:
|
803
|
-
"""Main action decorator."""
|
804
|
-
|
805
|
-
settings = ActionSettings(
|
806
|
-
model=model,
|
807
|
-
temperature=temperature,
|
808
|
-
max_tokens=max_tokens,
|
809
|
-
tools=tools or [],
|
810
|
-
start=start,
|
811
|
-
terminates=terminates,
|
812
|
-
xml=xml,
|
813
|
-
next=next,
|
814
|
-
read_history=read_history,
|
815
|
-
persist_history=persist_history,
|
816
|
-
condition=condition,
|
817
|
-
name=name,
|
818
|
-
instructions=instructions,
|
819
|
-
verbose=verbose,
|
820
|
-
debug=debug,
|
821
|
-
max_steps=max_steps,
|
822
|
-
end_strategy=end_strategy,
|
823
|
-
end_tool=end_tool,
|
824
|
-
kwargs=kwargs,
|
825
|
-
)
|
826
|
-
|
827
|
-
def decorator(f: Callable) -> Callable:
|
828
|
-
action_name = name or f.__name__
|
829
|
-
|
830
|
-
# Check if action name is reserved
|
831
|
-
reserved_names = {
|
832
|
-
'run', 'async_run', 'iter', 'async_iter',
|
833
|
-
'visualize', 'builder', 'as_a2a',
|
834
|
-
'_initialize', '_collect_state_class', '_collect_actions',
|
835
|
-
'_create_pydantic_graph', '_get_start_action_signature'
|
836
|
-
}
|
837
|
-
if action_name in reserved_names:
|
838
|
-
raise ValueError(
|
839
|
-
f"Action name '{action_name}' is reserved and cannot be used. "
|
840
|
-
f"Reserved names include: {', '.join(sorted(reserved_names))}. "
|
841
|
-
"Please choose a different name for your action."
|
842
|
-
)
|
843
|
-
|
844
|
-
# Check that the action has at least one parameter besides 'self'
|
845
|
-
sig = inspect.signature(f)
|
846
|
-
params = [p for p in sig.parameters if p != 'self']
|
847
|
-
if not params:
|
848
|
-
raise ValueError(
|
849
|
-
f"Action '{action_name}' must have at least one parameter besides 'self'. "
|
850
|
-
"Actions need input parameters to process."
|
851
|
-
)
|
852
|
-
|
853
|
-
# Create a dynamic ActionNode class for this specific action with unique name
|
854
|
-
class DynamicActionNode(ActionNode[StateT]):
|
855
|
-
def __init__(self, **action_params):
|
856
|
-
super().__init__(
|
857
|
-
action_name=action_name,
|
858
|
-
action_func=f,
|
859
|
-
settings=settings,
|
860
|
-
**action_params,
|
861
|
-
)
|
862
|
-
|
863
|
-
@classmethod
|
864
|
-
def get_node_id(cls):
|
865
|
-
"""Override to provide unique node ID based on action name."""
|
866
|
-
return f"DynamicActionNode_{action_name}"
|
867
|
-
|
868
|
-
# Store the action
|
869
|
-
self._actions[action_name] = DynamicActionNode
|
870
|
-
if start:
|
871
|
-
if self._start_action is not None:
|
872
|
-
raise ValueError(
|
873
|
-
f"Multiple start actions: {self._start_action} and {action_name}"
|
874
|
-
)
|
875
|
-
self._start_action = action_name
|
876
|
-
|
877
|
-
# Return the original function with metadata attached
|
878
|
-
f._action_name = action_name
|
879
|
-
f._action_settings = settings
|
880
|
-
f._action_node_class = DynamicActionNode
|
881
|
-
f._is_start = start
|
882
|
-
|
883
|
-
return f
|
884
|
-
|
885
|
-
if func is None:
|
886
|
-
return decorator
|
887
|
-
else:
|
888
|
-
return decorator(func)
|
889
|
-
|
890
|
-
def start(
|
891
|
-
self, func: Optional[Callable] = None, **kwargs
|
892
|
-
) -> Union[Callable, Type[ActionNode]]:
|
893
|
-
"""Decorator for start actions."""
|
894
|
-
return self.__call__(func, start=True, **kwargs)
|
895
|
-
|
896
|
-
def end(
|
897
|
-
self, func: Optional[Callable] = None, **kwargs
|
898
|
-
) -> Union[Callable, Type[ActionNode]]:
|
899
|
-
"""Decorator for end actions."""
|
900
|
-
return self.__call__(func, terminates=True, **kwargs)
|
901
|
-
|
902
|
-
|
903
|
-
# Global action decorator
|
904
|
-
action = ActionDecorator()
|
905
|
-
|
906
|
-
|
907
|
-
class GraphBuilder(Generic[StateT, T]):
|
908
|
-
"""Builder for creating graphs with plugins and configuration."""
|
909
|
-
|
910
|
-
def __init__(self, graph_class: Type["BaseGraph[StateT, T]"]):
|
911
|
-
self.graph_class = graph_class
|
912
|
-
self.plugins: List[BasePlugin] = []
|
913
|
-
self.global_model: Optional[LanguageModelName] = None
|
914
|
-
self.global_settings: Dict[str, Any] = {}
|
915
|
-
|
916
|
-
def with_plugin(self, plugin: BasePlugin) -> "GraphBuilder[StateT, T]":
|
917
|
-
"""Add a plugin to the graph."""
|
918
|
-
self.plugins.append(plugin)
|
919
|
-
return self
|
920
|
-
|
921
|
-
def with_model(self, model: LanguageModelName) -> "GraphBuilder[StateT, T]":
|
922
|
-
"""Set the global model for the graph."""
|
923
|
-
self.global_model = model
|
924
|
-
return self
|
925
|
-
|
926
|
-
def with_settings(self, **settings: Any) -> "GraphBuilder[StateT, T]":
|
927
|
-
"""Set global settings for the graph."""
|
928
|
-
self.global_settings.update(settings)
|
929
|
-
return self
|
930
|
-
|
931
|
-
def build(self) -> "BaseGraph[StateT, T]":
|
932
|
-
"""Build the graph instance."""
|
933
|
-
instance = self.graph_class()
|
934
|
-
instance._plugins = self.plugins
|
935
|
-
instance._global_model = self.global_model
|
936
|
-
instance._global_settings = self.global_settings
|
937
|
-
instance._initialize()
|
938
|
-
return instance
|
939
|
-
|
940
|
-
|
941
|
-
class BaseGraph(Generic[StateT, T]):
|
942
|
-
"""Base class for graphs that provides action decorator support on top of pydantic-graph."""
|
943
|
-
|
944
|
-
def __init__(
|
945
|
-
self,
|
946
|
-
state: Optional[StateT] = None,
|
947
|
-
*,
|
948
|
-
model: Optional[LanguageModelName | str] = "openai/gpt-4.1-nano",
|
949
|
-
temperature: Optional[float] = None,
|
950
|
-
max_tokens: Optional[int] = None,
|
951
|
-
tools: Optional[List[Callable]] = None,
|
952
|
-
verbose: bool = False,
|
953
|
-
debug: bool = False,
|
954
|
-
max_steps: Optional[int] = None,
|
955
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
956
|
-
end_tool: Optional[Callable] = None,
|
957
|
-
summarize_tools: bool = True,
|
958
|
-
summarize_tools_with_model: bool = False,
|
959
|
-
plugins: Optional[List[BasePlugin]] = None,
|
960
|
-
**kwargs: Any,
|
961
|
-
):
|
962
|
-
self._plugins: List[BasePlugin] = plugins or []
|
963
|
-
self._global_model: Optional[LanguageModelName] = model
|
964
|
-
self._global_settings: Dict[str, Any] = {
|
965
|
-
"temperature": temperature,
|
966
|
-
"max_tokens": max_tokens,
|
967
|
-
"tools": tools,
|
968
|
-
"verbose": verbose,
|
969
|
-
"debug": debug,
|
970
|
-
"max_steps": max_steps,
|
971
|
-
"end_strategy": end_strategy,
|
972
|
-
"end_tool": end_tool,
|
973
|
-
"summarize_tools": summarize_tools,
|
974
|
-
"summarize_tools_with_model": summarize_tools_with_model,
|
975
|
-
**kwargs,
|
976
|
-
}
|
977
|
-
# Remove None values from settings
|
978
|
-
self._global_settings = {
|
979
|
-
k: v for k, v in self._global_settings.items() if v is not None
|
980
|
-
}
|
981
|
-
|
982
|
-
self._pydantic_graph: Optional[PydanticGraph] = None
|
983
|
-
self._action_nodes: Dict[str, Type[ActionNode]] = {}
|
984
|
-
self._start_action_name: Optional[str] = None
|
985
|
-
self._start_action_func: Optional[Callable] = None
|
986
|
-
self._state: Optional[StateT] = state
|
987
|
-
self._state_class: Optional[Type[StateT]] = None
|
988
|
-
# Initialize the graph automatically
|
989
|
-
self._initialize()
|
990
|
-
|
991
|
-
def _initialize(self) -> None:
|
992
|
-
"""Initialize the graph by collecting actions and creating the pydantic graph."""
|
993
|
-
self._collect_state_class()
|
994
|
-
self._collect_actions()
|
995
|
-
self._create_pydantic_graph()
|
996
|
-
|
997
|
-
def _collect_state_class(self) -> None:
|
998
|
-
"""Collect the State class if defined in the graph."""
|
999
|
-
# Look for a State class defined in the graph
|
1000
|
-
for attr_name in dir(self.__class__):
|
1001
|
-
attr = getattr(self.__class__, attr_name)
|
1002
|
-
if (
|
1003
|
-
isinstance(attr, type)
|
1004
|
-
and attr_name == "State"
|
1005
|
-
and attr != self.__class__
|
1006
|
-
):
|
1007
|
-
self._state_class = attr
|
1008
|
-
# If no state was provided in constructor, try to create default instance
|
1009
|
-
if self._state is None:
|
1010
|
-
try:
|
1011
|
-
if hasattr(attr, "__call__"):
|
1012
|
-
self._state = attr()
|
1013
|
-
except Exception:
|
1014
|
-
# If we can't create a default instance, leave it as None
|
1015
|
-
pass
|
1016
|
-
break
|
1017
|
-
|
1018
|
-
def _collect_actions(self) -> None:
|
1019
|
-
"""Collect all actions defined in the graph class."""
|
1020
|
-
actions_found = []
|
1021
|
-
start_action = None
|
1022
|
-
end_action = None
|
1023
|
-
|
1024
|
-
# Get the graph class docstring for global system prompt
|
1025
|
-
graph_docstring = self.__class__.__doc__ or ""
|
1026
|
-
|
1027
|
-
for attr_name in dir(self):
|
1028
|
-
attr = getattr(self, attr_name)
|
1029
|
-
if hasattr(attr, "_action_name"):
|
1030
|
-
action_name = attr._action_name
|
1031
|
-
action_node_class = attr._action_node_class
|
1032
|
-
|
1033
|
-
self._action_nodes[action_name] = action_node_class
|
1034
|
-
actions_found.append((action_name, attr))
|
1035
|
-
|
1036
|
-
if hasattr(attr, "_is_start") and attr._is_start:
|
1037
|
-
if self._start_action_name is not None:
|
1038
|
-
raise ValueError(
|
1039
|
-
f"Multiple start actions: {self._start_action_name} and {action_name}"
|
1040
|
-
)
|
1041
|
-
self._start_action_name = action_name
|
1042
|
-
self._start_action_func = attr
|
1043
|
-
start_action = attr
|
1044
|
-
|
1045
|
-
# Check if this is an end action (terminates=True)
|
1046
|
-
if (
|
1047
|
-
hasattr(attr, "_action_settings")
|
1048
|
-
and attr._action_settings.terminates
|
1049
|
-
):
|
1050
|
-
end_action = attr
|
1051
|
-
|
1052
|
-
# If no explicit start action was defined and we have exactly one action,
|
1053
|
-
# automatically make it the start action
|
1054
|
-
if self._start_action_name is None and len(actions_found) == 1:
|
1055
|
-
action_name, action_func = actions_found[0]
|
1056
|
-
self._start_action_name = action_name
|
1057
|
-
self._start_action_func = action_func
|
1058
|
-
|
1059
|
-
# Special case: If we have exactly 2 actions (start -> end), automatically set up routing
|
1060
|
-
if len(actions_found) == 2 and start_action and end_action:
|
1061
|
-
# Check if the start action doesn't already have a 'next' defined
|
1062
|
-
if start_action._action_settings.next is None:
|
1063
|
-
# Automatically set the start action to route to the end action
|
1064
|
-
start_action._action_settings.next = end_action._action_name
|
1065
|
-
|
1066
|
-
# Store the graph docstring in all action nodes for access during execution
|
1067
|
-
for action_node_class in self._action_nodes.values():
|
1068
|
-
# We'll add this to the action node instances when they're created
|
1069
|
-
action_node_class._graph_docstring = graph_docstring
|
1070
|
-
|
1071
|
-
def _create_pydantic_graph(self) -> None:
|
1072
|
-
"""Create the underlying pydantic graph from collected actions."""
|
1073
|
-
if not self._action_nodes:
|
1074
|
-
raise ValueError("No actions defined in graph")
|
1075
|
-
|
1076
|
-
# Create the pydantic graph with the node classes
|
1077
|
-
node_classes = list(self._action_nodes.values())
|
1078
|
-
self._pydantic_graph = PydanticGraph(nodes=node_classes)
|
1079
|
-
|
1080
|
-
def _get_start_action_signature(self) -> inspect.Signature:
|
1081
|
-
"""Get the signature of the start action for type-safe run methods."""
|
1082
|
-
if self._start_action_func is None:
|
1083
|
-
return inspect.Signature([])
|
1084
|
-
|
1085
|
-
sig = inspect.signature(self._start_action_func)
|
1086
|
-
# Filter out 'self', 'ctx'/'context', 'agent', 'language_model' parameters
|
1087
|
-
params = []
|
1088
|
-
for param_name, param in sig.parameters.items():
|
1089
|
-
if param_name not in ("self", "ctx", "context", "agent", "language_model"):
|
1090
|
-
params.append(param)
|
1091
|
-
|
1092
|
-
return inspect.Signature(params)
|
1093
|
-
|
1094
|
-
def run(
|
1095
|
-
self,
|
1096
|
-
*args,
|
1097
|
-
state: Optional[StateT] = None,
|
1098
|
-
history: Optional[AgentMessages] = None,
|
1099
|
-
verbose: bool = False,
|
1100
|
-
debug: bool = False,
|
1101
|
-
**kwargs,
|
1102
|
-
) -> GraphResponse[T, StateT]:
|
1103
|
-
"""
|
1104
|
-
Run the graph with the given parameters.
|
1105
|
-
The signature is dynamically determined by the start action.
|
1106
|
-
|
1107
|
-
Args:
|
1108
|
-
*args: Arguments for the start action
|
1109
|
-
state: Optional state object to use for the execution
|
1110
|
-
history: Optional chat history in various formats (str, messages list, History object)
|
1111
|
-
verbose: Enable verbose logging
|
1112
|
-
debug: Enable debug logging
|
1113
|
-
**kwargs: Additional keyword arguments for the start action and language model
|
1114
|
-
|
1115
|
-
Returns:
|
1116
|
-
GraphResponse containing the execution result and metadata
|
1117
|
-
"""
|
1118
|
-
|
1119
|
-
if self._start_action_name is None:
|
1120
|
-
raise ValueError("No start action defined")
|
1121
|
-
|
1122
|
-
# Get the start action node class
|
1123
|
-
start_node_class = self._action_nodes[self._start_action_name]
|
1124
|
-
|
1125
|
-
# Create the start node instance with the provided arguments
|
1126
|
-
start_sig = self._get_start_action_signature()
|
1127
|
-
|
1128
|
-
# Separate language model kwargs from start action kwargs
|
1129
|
-
language_model_kwargs = {}
|
1130
|
-
start_action_kwargs = {}
|
1131
|
-
|
1132
|
-
# Language model specific parameters
|
1133
|
-
lm_params = {
|
1134
|
-
"temperature",
|
1135
|
-
"max_tokens",
|
1136
|
-
"top_p",
|
1137
|
-
"frequency_penalty",
|
1138
|
-
"presence_penalty",
|
1139
|
-
"stop",
|
1140
|
-
"stream",
|
1141
|
-
"response_format",
|
1142
|
-
"seed",
|
1143
|
-
"tools",
|
1144
|
-
"tool_choice",
|
1145
|
-
"parallel_tool_calls",
|
1146
|
-
"functions",
|
1147
|
-
"function_call",
|
1148
|
-
"user",
|
1149
|
-
"system",
|
1150
|
-
"n",
|
1151
|
-
"echo",
|
1152
|
-
"logprobs",
|
1153
|
-
"top_logprobs",
|
1154
|
-
"suffix",
|
1155
|
-
"max_retries",
|
1156
|
-
"timeout",
|
1157
|
-
"model",
|
1158
|
-
"type",
|
1159
|
-
"instructor_mode",
|
1160
|
-
"max_steps",
|
1161
|
-
"end_strategy",
|
1162
|
-
"end_tool",
|
1163
|
-
}
|
1164
|
-
|
1165
|
-
for key, value in kwargs.items():
|
1166
|
-
if key in lm_params:
|
1167
|
-
language_model_kwargs[key] = value
|
1168
|
-
else:
|
1169
|
-
start_action_kwargs[key] = value
|
1170
|
-
|
1171
|
-
# Bind arguments to start action parameters
|
1172
|
-
try:
|
1173
|
-
bound_args = start_sig.bind(*args, **start_action_kwargs)
|
1174
|
-
bound_args.apply_defaults()
|
1175
|
-
except TypeError as e:
|
1176
|
-
raise ValueError(
|
1177
|
-
f"Invalid arguments for start action '{self._start_action_name}': {e}"
|
1178
|
-
)
|
1179
|
-
|
1180
|
-
start_node = start_node_class(**bound_args.arguments)
|
1181
|
-
# Pass the graph docstring to the node for global system prompt
|
1182
|
-
start_node._graph_docstring = self.__class__.__doc__ or ""
|
1183
|
-
|
1184
|
-
# Merge global settings with provided kwargs
|
1185
|
-
merged_settings = self._global_settings.copy()
|
1186
|
-
merged_settings.update(language_model_kwargs)
|
1187
|
-
|
1188
|
-
# Include the global model if it's set and not overridden
|
1189
|
-
if self._global_model and "model" not in merged_settings:
|
1190
|
-
merged_settings["model"] = self._global_model
|
1191
|
-
|
1192
|
-
# Pass verbose/debug flags (prefer explicit params over global settings)
|
1193
|
-
start_node._verbose = (
|
1194
|
-
verbose if verbose else merged_settings.get("verbose", False)
|
1195
|
-
)
|
1196
|
-
start_node._debug = debug if debug else merged_settings.get("debug", False)
|
1197
|
-
start_node._language_model_kwargs = merged_settings
|
1198
|
-
|
1199
|
-
# Pass history if provided
|
1200
|
-
start_node._history = history
|
1201
|
-
# Pass the graph's action nodes for routing
|
1202
|
-
start_node._graph_action_nodes = self._action_nodes
|
1203
|
-
|
1204
|
-
# Initialize execution tracking
|
1205
|
-
self._execution_tracker = []
|
1206
|
-
start_node._execution_tracker = self._execution_tracker
|
1207
|
-
|
1208
|
-
# Pass end strategy parameters (from merged settings)
|
1209
|
-
if "max_steps" in merged_settings:
|
1210
|
-
start_node._max_steps = merged_settings["max_steps"]
|
1211
|
-
if "end_strategy" in merged_settings:
|
1212
|
-
start_node._end_strategy = merged_settings["end_strategy"]
|
1213
|
-
if "end_tool" in merged_settings:
|
1214
|
-
start_node._end_tool = merged_settings["end_tool"]
|
1215
|
-
|
1216
|
-
# Run the pydantic graph
|
1217
|
-
if not self._pydantic_graph:
|
1218
|
-
raise ValueError("Graph not initialized")
|
1219
|
-
|
1220
|
-
# Use the provided state or the graph's state
|
1221
|
-
execution_state = state if state is not None else self._state
|
1222
|
-
# Pass state to the node
|
1223
|
-
start_node._state = execution_state
|
1224
|
-
|
1225
|
-
# Execute the graph using pydantic-graph
|
1226
|
-
try:
|
1227
|
-
# For now, use sync execution - would implement proper async support
|
1228
|
-
result = self._pydantic_graph.run_sync(start_node, state=execution_state)
|
1229
|
-
|
1230
|
-
# Extract the actual output from pydantic-graph result
|
1231
|
-
if hasattr(result, "data"):
|
1232
|
-
output = result.data
|
1233
|
-
elif hasattr(result, "output"):
|
1234
|
-
output = result.output
|
1235
|
-
else:
|
1236
|
-
output = str(result)
|
1237
|
-
|
1238
|
-
# Get nodes executed from the execution tracker
|
1239
|
-
nodes_executed = getattr(self, "_execution_tracker", [])
|
1240
|
-
|
1241
|
-
# If no nodes tracked, at least include the start node
|
1242
|
-
if not nodes_executed:
|
1243
|
-
nodes_executed = [self._start_action_name]
|
1244
|
-
|
1245
|
-
# Create our response object
|
1246
|
-
return GraphResponse(
|
1247
|
-
type="graph",
|
1248
|
-
model=self._global_model or "openai/gpt-4o-mini",
|
1249
|
-
output=output,
|
1250
|
-
content=str(output),
|
1251
|
-
completion=None,
|
1252
|
-
state=execution_state,
|
1253
|
-
history=[], # Would be populated from pydantic-graph execution
|
1254
|
-
start_node=self._start_action_name,
|
1255
|
-
nodes_executed=nodes_executed,
|
1256
|
-
metadata={},
|
1257
|
-
)
|
1258
|
-
|
1259
|
-
except Exception as e:
|
1260
|
-
raise RuntimeError(f"Graph execution failed: {e}") from e
|
1261
|
-
|
1262
|
-
def iter(
|
1263
|
-
self,
|
1264
|
-
*args,
|
1265
|
-
state: Optional[StateT] = None,
|
1266
|
-
history: Optional[AgentMessages] = None,
|
1267
|
-
verbose: bool = False,
|
1268
|
-
debug: bool = False,
|
1269
|
-
max_steps: Optional[int] = None,
|
1270
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
1271
|
-
end_tool: Optional[Callable] = None,
|
1272
|
-
**kwargs,
|
1273
|
-
) -> GraphStream[T, StateT]:
|
1274
|
-
"""
|
1275
|
-
Create an iterator for the graph execution.
|
1276
|
-
The signature is dynamically determined by the start action.
|
1277
|
-
|
1278
|
-
Args:
|
1279
|
-
*args: Arguments for the start action
|
1280
|
-
state: Optional state object to use for the execution
|
1281
|
-
history: Optional chat history in various formats (str, messages list, History object)
|
1282
|
-
verbose: Enable verbose logging
|
1283
|
-
debug: Enable debug logging
|
1284
|
-
max_steps: Maximum number of steps to execute
|
1285
|
-
end_strategy: Strategy for ending execution
|
1286
|
-
end_tool: Tool to use for ending execution
|
1287
|
-
**kwargs: Additional keyword arguments for the start action and language model
|
1288
|
-
|
1289
|
-
Returns:
|
1290
|
-
GraphStream that can be iterated over to get each execution step
|
1291
|
-
"""
|
1292
|
-
|
1293
|
-
if self._start_action_name is None:
|
1294
|
-
raise ValueError("No start action defined")
|
1295
|
-
|
1296
|
-
# Get the start action node class
|
1297
|
-
start_node_class = self._action_nodes[self._start_action_name]
|
1298
|
-
|
1299
|
-
# Create the start node instance with the provided arguments
|
1300
|
-
start_sig = self._get_start_action_signature()
|
1301
|
-
|
1302
|
-
# Separate language model kwargs from start action kwargs
|
1303
|
-
language_model_kwargs = {}
|
1304
|
-
start_action_kwargs = {}
|
1305
|
-
|
1306
|
-
# Language model specific parameters
|
1307
|
-
lm_params = {
|
1308
|
-
"temperature",
|
1309
|
-
"max_tokens",
|
1310
|
-
"top_p",
|
1311
|
-
"frequency_penalty",
|
1312
|
-
"presence_penalty",
|
1313
|
-
"stop",
|
1314
|
-
"stream",
|
1315
|
-
"response_format",
|
1316
|
-
"seed",
|
1317
|
-
"tools",
|
1318
|
-
"tool_choice",
|
1319
|
-
"parallel_tool_calls",
|
1320
|
-
"functions",
|
1321
|
-
"function_call",
|
1322
|
-
"user",
|
1323
|
-
"system",
|
1324
|
-
"n",
|
1325
|
-
"echo",
|
1326
|
-
"logprobs",
|
1327
|
-
"top_logprobs",
|
1328
|
-
"suffix",
|
1329
|
-
"max_retries",
|
1330
|
-
"timeout",
|
1331
|
-
"model",
|
1332
|
-
"type",
|
1333
|
-
"instructor_mode",
|
1334
|
-
"max_steps",
|
1335
|
-
"end_strategy",
|
1336
|
-
"end_tool",
|
1337
|
-
}
|
1338
|
-
|
1339
|
-
for key, value in kwargs.items():
|
1340
|
-
if key in lm_params:
|
1341
|
-
language_model_kwargs[key] = value
|
1342
|
-
else:
|
1343
|
-
start_action_kwargs[key] = value
|
1344
|
-
|
1345
|
-
try:
|
1346
|
-
bound_args = start_sig.bind(*args, **start_action_kwargs)
|
1347
|
-
bound_args.apply_defaults()
|
1348
|
-
except TypeError as e:
|
1349
|
-
raise ValueError(
|
1350
|
-
f"Invalid arguments for start action '{self._start_action_name}': {e}"
|
1351
|
-
)
|
1352
|
-
|
1353
|
-
start_node = start_node_class(**bound_args.arguments)
|
1354
|
-
# Pass the graph docstring to the node for global system prompt
|
1355
|
-
start_node._graph_docstring = self.__class__.__doc__ or ""
|
1356
|
-
|
1357
|
-
# Merge global settings with provided kwargs
|
1358
|
-
merged_settings = self._global_settings.copy()
|
1359
|
-
merged_settings.update(language_model_kwargs)
|
1360
|
-
|
1361
|
-
# Include the global model if it's set and not overridden
|
1362
|
-
if self._global_model and "model" not in merged_settings:
|
1363
|
-
merged_settings["model"] = self._global_model
|
1364
|
-
|
1365
|
-
# Pass verbose/debug flags (prefer explicit params over global settings)
|
1366
|
-
start_node._verbose = (
|
1367
|
-
verbose if verbose else merged_settings.get("verbose", False)
|
1368
|
-
)
|
1369
|
-
start_node._debug = debug if debug else merged_settings.get("debug", False)
|
1370
|
-
start_node._language_model_kwargs = merged_settings
|
1371
|
-
|
1372
|
-
# Pass history if provided
|
1373
|
-
start_node._history = history
|
1374
|
-
# Pass the graph's action nodes for routing
|
1375
|
-
start_node._graph_action_nodes = self._action_nodes
|
1376
|
-
|
1377
|
-
# Pass end strategy parameters (prefer explicit params over merged settings)
|
1378
|
-
start_node._max_steps = (
|
1379
|
-
max_steps if max_steps is not None else merged_settings.get("max_steps")
|
1380
|
-
)
|
1381
|
-
start_node._end_strategy = (
|
1382
|
-
end_strategy
|
1383
|
-
if end_strategy is not None
|
1384
|
-
else merged_settings.get("end_strategy")
|
1385
|
-
)
|
1386
|
-
start_node._end_tool = (
|
1387
|
-
end_tool if end_tool is not None else merged_settings.get("end_tool")
|
1388
|
-
)
|
1389
|
-
|
1390
|
-
# Use the provided state or the graph's state
|
1391
|
-
execution_state = state if state is not None else self._state
|
1392
|
-
# Pass state to the node
|
1393
|
-
start_node._state = execution_state
|
1394
|
-
|
1395
|
-
# Create and return GraphStream
|
1396
|
-
return GraphStream(
|
1397
|
-
graph=self,
|
1398
|
-
start_node=start_node,
|
1399
|
-
state=execution_state,
|
1400
|
-
verbose=verbose,
|
1401
|
-
debug=debug,
|
1402
|
-
max_steps=max_steps,
|
1403
|
-
end_strategy=end_strategy,
|
1404
|
-
end_tool=end_tool,
|
1405
|
-
**language_model_kwargs,
|
1406
|
-
)
|
1407
|
-
|
1408
|
-
async def async_run(
|
1409
|
-
self,
|
1410
|
-
*args,
|
1411
|
-
state: Optional[StateT] = None,
|
1412
|
-
history: Optional[AgentMessages] = None,
|
1413
|
-
verbose: bool = False,
|
1414
|
-
debug: bool = False,
|
1415
|
-
max_steps: Optional[int] = None,
|
1416
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
1417
|
-
end_tool: Optional[Callable] = None,
|
1418
|
-
**kwargs,
|
1419
|
-
) -> GraphResponse[T, StateT]:
|
1420
|
-
"""Async version of run.
|
1421
|
-
|
1422
|
-
Args:
|
1423
|
-
*args: Arguments for the start action
|
1424
|
-
state: Optional state object to use for the execution
|
1425
|
-
history: Optional chat history in various formats (str, messages list, History object)
|
1426
|
-
verbose: Enable verbose logging
|
1427
|
-
debug: Enable debug logging
|
1428
|
-
**kwargs: Additional keyword arguments for the start action and language model
|
1429
|
-
|
1430
|
-
Returns:
|
1431
|
-
GraphResponse containing the execution result and metadata
|
1432
|
-
"""
|
1433
|
-
|
1434
|
-
if self._start_action_name is None:
|
1435
|
-
raise ValueError("No start action defined")
|
1436
|
-
|
1437
|
-
# Get the start action node class
|
1438
|
-
start_node_class = self._action_nodes[self._start_action_name]
|
1439
|
-
|
1440
|
-
# Create the start node instance with the provided arguments
|
1441
|
-
start_sig = self._get_start_action_signature()
|
1442
|
-
|
1443
|
-
# Separate language model kwargs from start action kwargs
|
1444
|
-
language_model_kwargs = {}
|
1445
|
-
start_action_kwargs = {}
|
1446
|
-
|
1447
|
-
# Language model specific parameters
|
1448
|
-
lm_params = {
|
1449
|
-
"temperature",
|
1450
|
-
"max_tokens",
|
1451
|
-
"top_p",
|
1452
|
-
"frequency_penalty",
|
1453
|
-
"presence_penalty",
|
1454
|
-
"stop",
|
1455
|
-
"stream",
|
1456
|
-
"response_format",
|
1457
|
-
"seed",
|
1458
|
-
"tools",
|
1459
|
-
"tool_choice",
|
1460
|
-
"parallel_tool_calls",
|
1461
|
-
"functions",
|
1462
|
-
"function_call",
|
1463
|
-
"user",
|
1464
|
-
"system",
|
1465
|
-
"n",
|
1466
|
-
"echo",
|
1467
|
-
"logprobs",
|
1468
|
-
"top_logprobs",
|
1469
|
-
"suffix",
|
1470
|
-
"max_retries",
|
1471
|
-
"timeout",
|
1472
|
-
"model",
|
1473
|
-
"type",
|
1474
|
-
"instructor_mode",
|
1475
|
-
"max_steps",
|
1476
|
-
"end_strategy",
|
1477
|
-
"end_tool",
|
1478
|
-
}
|
1479
|
-
|
1480
|
-
for key, value in kwargs.items():
|
1481
|
-
if key in lm_params:
|
1482
|
-
language_model_kwargs[key] = value
|
1483
|
-
else:
|
1484
|
-
start_action_kwargs[key] = value
|
1485
|
-
|
1486
|
-
try:
|
1487
|
-
bound_args = start_sig.bind(*args, **start_action_kwargs)
|
1488
|
-
bound_args.apply_defaults()
|
1489
|
-
except TypeError as e:
|
1490
|
-
raise ValueError(
|
1491
|
-
f"Invalid arguments for start action '{self._start_action_name}': {e}"
|
1492
|
-
)
|
1493
|
-
|
1494
|
-
start_node = start_node_class(**bound_args.arguments)
|
1495
|
-
# Pass the graph docstring to the node for global system prompt
|
1496
|
-
start_node._graph_docstring = self.__class__.__doc__ or ""
|
1497
|
-
|
1498
|
-
# Merge global settings with provided kwargs
|
1499
|
-
merged_settings = self._global_settings.copy()
|
1500
|
-
merged_settings.update(language_model_kwargs)
|
1501
|
-
|
1502
|
-
# Include the global model if it's set and not overridden
|
1503
|
-
if self._global_model and "model" not in merged_settings:
|
1504
|
-
merged_settings["model"] = self._global_model
|
1505
|
-
|
1506
|
-
# Pass verbose/debug flags (prefer explicit params over global settings)
|
1507
|
-
start_node._verbose = (
|
1508
|
-
verbose if verbose else merged_settings.get("verbose", False)
|
1509
|
-
)
|
1510
|
-
start_node._debug = debug if debug else merged_settings.get("debug", False)
|
1511
|
-
start_node._language_model_kwargs = merged_settings
|
1512
|
-
|
1513
|
-
# Pass history if provided
|
1514
|
-
start_node._history = history
|
1515
|
-
# Pass the graph's action nodes for routing
|
1516
|
-
start_node._graph_action_nodes = self._action_nodes
|
1517
|
-
|
1518
|
-
# Initialize execution tracking
|
1519
|
-
self._execution_tracker = []
|
1520
|
-
start_node._execution_tracker = self._execution_tracker
|
1521
|
-
|
1522
|
-
# Pass end strategy parameters (prefer explicit params over merged settings)
|
1523
|
-
start_node._max_steps = (
|
1524
|
-
max_steps if max_steps is not None else merged_settings.get("max_steps")
|
1525
|
-
)
|
1526
|
-
start_node._end_strategy = (
|
1527
|
-
end_strategy
|
1528
|
-
if end_strategy is not None
|
1529
|
-
else merged_settings.get("end_strategy")
|
1530
|
-
)
|
1531
|
-
start_node._end_tool = (
|
1532
|
-
end_tool if end_tool is not None else merged_settings.get("end_tool")
|
1533
|
-
)
|
1534
|
-
|
1535
|
-
# Run the pydantic graph asynchronously
|
1536
|
-
if not self._pydantic_graph:
|
1537
|
-
raise ValueError("Graph not initialized")
|
1538
|
-
|
1539
|
-
# Use the provided state or the graph's state
|
1540
|
-
execution_state = state if state is not None else self._state
|
1541
|
-
# Pass state to the node
|
1542
|
-
start_node._state = execution_state
|
1543
|
-
|
1544
|
-
try:
|
1545
|
-
# Execute the graph using pydantic-graph async
|
1546
|
-
result = await self._pydantic_graph.run(start_node, state=execution_state)
|
1547
|
-
|
1548
|
-
# Extract the actual output from pydantic-graph result
|
1549
|
-
if hasattr(result, "data"):
|
1550
|
-
output = result.data
|
1551
|
-
elif hasattr(result, "output"):
|
1552
|
-
output = result.output
|
1553
|
-
else:
|
1554
|
-
output = str(result)
|
1555
|
-
|
1556
|
-
# Get nodes executed from the execution tracker
|
1557
|
-
nodes_executed = getattr(self, "_execution_tracker", [])
|
1558
|
-
|
1559
|
-
# If no nodes tracked, at least include the start node
|
1560
|
-
if not nodes_executed:
|
1561
|
-
nodes_executed = [self._start_action_name]
|
1562
|
-
|
1563
|
-
# Create our response object
|
1564
|
-
return GraphResponse(
|
1565
|
-
type="graph",
|
1566
|
-
model=self._global_model or "openai/gpt-4o-mini",
|
1567
|
-
output=output,
|
1568
|
-
content=str(output),
|
1569
|
-
completion=None,
|
1570
|
-
state=execution_state,
|
1571
|
-
history=[], # Would be populated from pydantic-graph execution
|
1572
|
-
start_node=self._start_action_name,
|
1573
|
-
nodes_executed=nodes_executed,
|
1574
|
-
metadata={},
|
1575
|
-
)
|
1576
|
-
|
1577
|
-
except Exception as e:
|
1578
|
-
raise RuntimeError(f"Async graph execution failed: {e}") from e
|
1579
|
-
|
1580
|
-
async def async_iter(
|
1581
|
-
self,
|
1582
|
-
*args,
|
1583
|
-
state: Optional[StateT] = None,
|
1584
|
-
history: Optional[AgentMessages] = None,
|
1585
|
-
verbose: bool = False,
|
1586
|
-
debug: bool = False,
|
1587
|
-
max_steps: Optional[int] = None,
|
1588
|
-
end_strategy: Optional[Literal["tool"]] = None,
|
1589
|
-
end_tool: Optional[Callable] = None,
|
1590
|
-
**kwargs,
|
1591
|
-
) -> GraphStream[T, StateT]:
|
1592
|
-
"""Async version of iter.
|
1593
|
-
|
1594
|
-
Args:
|
1595
|
-
*args: Arguments for the start action
|
1596
|
-
state: Optional state object to use for the execution
|
1597
|
-
history: Optional chat history in various formats (str, messages list, History object)
|
1598
|
-
verbose: Enable verbose logging
|
1599
|
-
debug: Enable debug logging
|
1600
|
-
max_steps: Maximum number of steps to execute
|
1601
|
-
end_strategy: Strategy for ending execution
|
1602
|
-
end_tool: Tool to use for ending execution
|
1603
|
-
**kwargs: Additional keyword arguments for the start action and language model
|
1604
|
-
|
1605
|
-
Returns:
|
1606
|
-
GraphStream that can be iterated over asynchronously
|
1607
|
-
"""
|
1608
|
-
|
1609
|
-
if self._start_action_name is None:
|
1610
|
-
raise ValueError("No start action defined")
|
1611
|
-
|
1612
|
-
start_node_class = self._action_nodes[self._start_action_name]
|
1613
|
-
start_sig = self._get_start_action_signature()
|
1614
|
-
|
1615
|
-
# Separate language model kwargs from start action kwargs
|
1616
|
-
language_model_kwargs = {}
|
1617
|
-
start_action_kwargs = {}
|
1618
|
-
|
1619
|
-
# Language model specific parameters
|
1620
|
-
lm_params = {
|
1621
|
-
"temperature",
|
1622
|
-
"max_tokens",
|
1623
|
-
"top_p",
|
1624
|
-
"frequency_penalty",
|
1625
|
-
"presence_penalty",
|
1626
|
-
"stop",
|
1627
|
-
"stream",
|
1628
|
-
"response_format",
|
1629
|
-
"seed",
|
1630
|
-
"tools",
|
1631
|
-
"tool_choice",
|
1632
|
-
"parallel_tool_calls",
|
1633
|
-
"functions",
|
1634
|
-
"function_call",
|
1635
|
-
"user",
|
1636
|
-
"system",
|
1637
|
-
"n",
|
1638
|
-
"echo",
|
1639
|
-
"logprobs",
|
1640
|
-
"top_logprobs",
|
1641
|
-
"suffix",
|
1642
|
-
"max_retries",
|
1643
|
-
"timeout",
|
1644
|
-
"model",
|
1645
|
-
"type",
|
1646
|
-
"instructor_mode",
|
1647
|
-
"max_steps",
|
1648
|
-
"end_strategy",
|
1649
|
-
"end_tool",
|
1650
|
-
}
|
1651
|
-
|
1652
|
-
for key, value in kwargs.items():
|
1653
|
-
if key in lm_params:
|
1654
|
-
language_model_kwargs[key] = value
|
1655
|
-
else:
|
1656
|
-
start_action_kwargs[key] = value
|
1657
|
-
|
1658
|
-
try:
|
1659
|
-
bound_args = start_sig.bind(*args, **start_action_kwargs)
|
1660
|
-
bound_args.apply_defaults()
|
1661
|
-
except TypeError as e:
|
1662
|
-
raise ValueError(
|
1663
|
-
f"Invalid arguments for start action '{self._start_action_name}': {e}"
|
1664
|
-
)
|
1665
|
-
|
1666
|
-
start_node = start_node_class(**bound_args.arguments)
|
1667
|
-
# Pass the graph docstring to the node for global system prompt
|
1668
|
-
start_node._graph_docstring = self.__class__.__doc__ or ""
|
1669
|
-
|
1670
|
-
# Merge global settings with provided kwargs
|
1671
|
-
merged_settings = self._global_settings.copy()
|
1672
|
-
merged_settings.update(language_model_kwargs)
|
1673
|
-
|
1674
|
-
# Include the global model if it's set and not overridden
|
1675
|
-
if self._global_model and "model" not in merged_settings:
|
1676
|
-
merged_settings["model"] = self._global_model
|
1677
|
-
|
1678
|
-
# Pass verbose/debug flags (prefer explicit params over global settings)
|
1679
|
-
start_node._verbose = (
|
1680
|
-
verbose if verbose else merged_settings.get("verbose", False)
|
1681
|
-
)
|
1682
|
-
start_node._debug = debug if debug else merged_settings.get("debug", False)
|
1683
|
-
start_node._language_model_kwargs = merged_settings
|
1684
|
-
|
1685
|
-
# Pass history if provided
|
1686
|
-
start_node._history = history
|
1687
|
-
# Pass the graph's action nodes for routing
|
1688
|
-
start_node._graph_action_nodes = self._action_nodes
|
1689
|
-
|
1690
|
-
# Pass end strategy parameters (prefer explicit params over merged settings)
|
1691
|
-
start_node._max_steps = (
|
1692
|
-
max_steps if max_steps is not None else merged_settings.get("max_steps")
|
1693
|
-
)
|
1694
|
-
start_node._end_strategy = (
|
1695
|
-
end_strategy
|
1696
|
-
if end_strategy is not None
|
1697
|
-
else merged_settings.get("end_strategy")
|
1698
|
-
)
|
1699
|
-
start_node._end_tool = (
|
1700
|
-
end_tool if end_tool is not None else merged_settings.get("end_tool")
|
1701
|
-
)
|
1702
|
-
|
1703
|
-
# Use the provided state or the graph's state
|
1704
|
-
execution_state = state if state is not None else self._state
|
1705
|
-
# Pass state to the node
|
1706
|
-
start_node._state = execution_state
|
1707
|
-
|
1708
|
-
# Create and return GraphStream
|
1709
|
-
return GraphStream(
|
1710
|
-
graph=self,
|
1711
|
-
start_node=start_node,
|
1712
|
-
state=execution_state,
|
1713
|
-
verbose=verbose,
|
1714
|
-
debug=debug,
|
1715
|
-
max_steps=max_steps,
|
1716
|
-
end_strategy=end_strategy,
|
1717
|
-
end_tool=end_tool,
|
1718
|
-
**language_model_kwargs,
|
1719
|
-
)
|
1720
|
-
|
1721
|
-
@classmethod
|
1722
|
-
def builder(cls) -> GraphBuilder[StateT, T]:
|
1723
|
-
"""Create a builder for this graph."""
|
1724
|
-
return GraphBuilder(cls)
|
1725
|
-
|
1726
|
-
def as_a2a(
|
1727
|
-
self,
|
1728
|
-
*,
|
1729
|
-
# Worker configuration
|
1730
|
-
state: Optional[StateT] = None,
|
1731
|
-
# Storage and broker configuration
|
1732
|
-
storage: Optional[Any] = None,
|
1733
|
-
broker: Optional[Any] = None,
|
1734
|
-
# Server configuration
|
1735
|
-
host: str = "0.0.0.0",
|
1736
|
-
port: int = 8000,
|
1737
|
-
reload: bool = False,
|
1738
|
-
workers: int = 1,
|
1739
|
-
log_level: str = "info",
|
1740
|
-
# A2A configuration
|
1741
|
-
name: Optional[str] = None,
|
1742
|
-
url: Optional[str] = None,
|
1743
|
-
version: str = "1.0.0",
|
1744
|
-
description: Optional[str] = None,
|
1745
|
-
# Advanced configuration
|
1746
|
-
lifespan_timeout: int = 30,
|
1747
|
-
**uvicorn_kwargs: Any,
|
1748
|
-
) -> "FastA2A": # type: ignore
|
1749
|
-
"""
|
1750
|
-
Convert this graph to an A2A server application.
|
1751
|
-
|
1752
|
-
This method creates a FastA2A server that can handle A2A requests
|
1753
|
-
for this graph instance. It sets up the necessary Worker, Storage,
|
1754
|
-
and Broker components automatically.
|
1755
|
-
|
1756
|
-
Args:
|
1757
|
-
state: Initial state for the graph (overrides instance state)
|
1758
|
-
storage: Custom storage backend (defaults to InMemoryStorage)
|
1759
|
-
broker: Custom broker backend (defaults to InMemoryBroker)
|
1760
|
-
host: Host to bind the server to
|
1761
|
-
port: Port to bind the server to
|
1762
|
-
reload: Enable auto-reload for development
|
1763
|
-
workers: Number of worker processes
|
1764
|
-
log_level: Logging level
|
1765
|
-
name: Graph name for the A2A server
|
1766
|
-
url: URL where the graph is hosted
|
1767
|
-
version: API version
|
1768
|
-
description: API description for the A2A server
|
1769
|
-
lifespan_timeout: Timeout for lifespan events
|
1770
|
-
**uvicorn_kwargs: Additional arguments passed to uvicorn
|
1771
|
-
|
1772
|
-
Returns:
|
1773
|
-
FastA2A application instance that can be run with uvicorn
|
1774
|
-
|
1775
|
-
Examples:
|
1776
|
-
Convert graph to A2A server:
|
1777
|
-
```python
|
1778
|
-
class MyGraph(BaseGraph):
|
1779
|
-
@action.start()
|
1780
|
-
def process(self, message: str) -> str:
|
1781
|
-
return f"Processed: {message}"
|
1782
|
-
|
1783
|
-
graph = MyGraph()
|
1784
|
-
app = graph.as_a2a(port=8080)
|
1785
|
-
|
1786
|
-
# Run with uvicorn
|
1787
|
-
import uvicorn
|
1788
|
-
uvicorn.run(app, host="0.0.0.0", port=8080)
|
1789
|
-
```
|
1790
|
-
|
1791
|
-
Or use the CLI:
|
1792
|
-
```bash
|
1793
|
-
uvicorn mymodule:graph.as_a2a() --reload
|
1794
|
-
```
|
1795
|
-
"""
|
1796
|
-
from ..a2a import as_a2a_app
|
1797
|
-
|
1798
|
-
return as_a2a_app(
|
1799
|
-
self,
|
1800
|
-
state=state if state is not None else self._state,
|
1801
|
-
storage=storage,
|
1802
|
-
broker=broker,
|
1803
|
-
host=host,
|
1804
|
-
port=port,
|
1805
|
-
reload=reload,
|
1806
|
-
workers=workers,
|
1807
|
-
log_level=log_level,
|
1808
|
-
name=name or self.__class__.__name__,
|
1809
|
-
url=url,
|
1810
|
-
version=version,
|
1811
|
-
description=description or self.__class__.__doc__,
|
1812
|
-
lifespan_timeout=lifespan_timeout,
|
1813
|
-
**uvicorn_kwargs,
|
1814
|
-
)
|
1815
|
-
|
1816
|
-
def visualize(self, filename: str) -> None:
|
1817
|
-
"""Visualize the graph as mermaid.
|
1818
|
-
|
1819
|
-
Args:
|
1820
|
-
filename: The filename to save the visualization to.
|
1821
|
-
|
1822
|
-
Ex: 'graph.png' / 'graph.mmd'
|
1823
|
-
|
1824
|
-
Returns:
|
1825
|
-
None
|
1826
|
-
|
1827
|
-
"""
|
1828
|
-
visualize_base_graph(self, filename)
|