hammad-python 0.0.24__py3-none-any.whl → 0.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,7 +12,9 @@ from typing import (
12
12
  Callable,
13
13
  get_type_hints,
14
14
  ParamSpec,
15
+ TypeAlias,
15
16
  Awaitable,
17
+ TYPE_CHECKING,
16
18
  )
17
19
  from typing_extensions import Literal
18
20
  from dataclasses import dataclass, field
@@ -21,12 +23,11 @@ from functools import wraps
21
23
  import asyncio
22
24
 
23
25
  from pydantic_graph import BaseNode, End, Graph as PydanticGraph, GraphRunContext
24
- from pydantic import BaseModel
25
26
  from ..models.language.utils import (
26
- LanguageModelRequestBuilder,
27
27
  parse_messages_input,
28
28
  consolidate_system_messages,
29
29
  )
30
+ from ...formatting.text import convert_to_text
30
31
 
31
32
  from ..agents.agent import Agent
32
33
  from ..agents.types.agent_response import AgentResponse
@@ -44,6 +45,12 @@ from .types import (
44
45
  GraphHistoryEntry,
45
46
  )
46
47
 
48
+ if TYPE_CHECKING:
49
+ try:
50
+ from fasta2a import FastA2A
51
+ except ImportError:
52
+ FastA2A: TypeAlias = Any
53
+
47
54
  __all__ = [
48
55
  "BaseGraph",
49
56
  "action",
@@ -51,6 +58,8 @@ __all__ = [
51
58
  "GraphBuilder",
52
59
  "GraphStream",
53
60
  "GraphResponseChunk",
61
+ "select",
62
+ "SelectionStrategy",
54
63
  ]
55
64
 
56
65
  T = TypeVar("T")
@@ -58,6 +67,257 @@ StateT = TypeVar("StateT")
58
67
  P = ParamSpec("P")
59
68
 
60
69
 
70
+ class SelectionStrategy:
71
+ """LLM-based selection strategy for choosing the next action."""
72
+
73
+ def __init__(
74
+ self,
75
+ *actions: str,
76
+ instructions: Optional[str] = None,
77
+ model: Optional[str] = None,
78
+ ):
79
+ self.actions = list(actions)
80
+ self.instructions = instructions
81
+ self.model = model or "openai/gpt-4o-mini"
82
+ self._language_model = None
83
+ self._use_all_actions = (
84
+ len(actions) == 0
85
+ ) # If no actions specified, use all available
86
+
87
+ def _get_language_model(self):
88
+ """Lazy load the language model."""
89
+ if self._language_model is None:
90
+ from ..models.language.model import LanguageModel
91
+
92
+ self._language_model = LanguageModel(model=self.model)
93
+ return self._language_model
94
+
95
+ def select(self, context: Optional[Dict[str, Any]] = None) -> str:
96
+ """Use LLM to select the most appropriate action."""
97
+ if not context:
98
+ context = {}
99
+
100
+ # Get available actions
101
+ actions_to_choose_from = self.actions
102
+ if self._use_all_actions and "all_actions" in context:
103
+ # Use all available actions from the graph
104
+ actions_to_choose_from = context["all_actions"]
105
+
106
+ if not actions_to_choose_from:
107
+ return ""
108
+
109
+ # If only one action, return it
110
+ if len(actions_to_choose_from) == 1:
111
+ return actions_to_choose_from[0]
112
+
113
+ # Import here to avoid circular imports
114
+ from pydantic import BaseModel, Field, create_model
115
+ from enum import Enum
116
+
117
+ # Create enum for available actions
118
+ ActionEnum = Enum(
119
+ "ActionEnum", {action: action for action in actions_to_choose_from}
120
+ )
121
+
122
+ # Create selection model
123
+ SelectionModel = create_model(
124
+ "ActionSelection",
125
+ action=(
126
+ ActionEnum,
127
+ Field(description="The selected action to execute next"),
128
+ ),
129
+ reasoning=(str, Field(description="Brief reasoning for the selection")),
130
+ )
131
+
132
+ # Build context description
133
+ context_parts = []
134
+
135
+ # Add result from previous action
136
+ if "result" in context:
137
+ context_parts.append(f"Previous action result: {context['result']}")
138
+
139
+ # Add conversation history
140
+ if "messages" in context and context["messages"]:
141
+ # Get last few messages for context
142
+ recent_messages = context["messages"][-5:] # Last 5 messages
143
+ messages_str = "\n".join(
144
+ [
145
+ f"{msg.get('role', 'unknown')}: {msg.get('content', '')}"
146
+ for msg in recent_messages
147
+ ]
148
+ )
149
+ context_parts.append(f"Recent conversation:\n{messages_str}")
150
+
151
+ # Add state information
152
+ if "state" in context and context["state"]:
153
+ context_parts.append(f"Current state: {context['state']}")
154
+
155
+ context_description = "\n\n".join(context_parts)
156
+
157
+ # Build selection prompt
158
+ base_instructions = f"""Based on the context below, select the most appropriate next action from the available options.
159
+
160
+ Available actions:
161
+ {", ".join(actions_to_choose_from)}
162
+
163
+ Context:
164
+ {context_description}
165
+
166
+ Consider the conversation flow, user's request, and any patterns in the conversation when making your selection.
167
+ For example, if the user asked to do something multiple times (e.g., "reason twice"), and you've only done it once, select that action again."""
168
+
169
+ # Add custom instructions if provided
170
+ if self.instructions:
171
+ base_instructions = (
172
+ f"{base_instructions}\n\nAdditional instructions:\n{self.instructions}"
173
+ )
174
+
175
+ # Get language model to make selection
176
+ try:
177
+ lm = self._get_language_model()
178
+ response = lm.run(
179
+ messages=[{"role": "user", "content": base_instructions}],
180
+ type=SelectionModel,
181
+ )
182
+
183
+ selected_action = response.output.action.value
184
+
185
+ # Validate the selection
186
+ if selected_action in actions_to_choose_from:
187
+ return selected_action
188
+ else:
189
+ # Fallback to first action if invalid selection
190
+ return actions_to_choose_from[0]
191
+
192
+ except Exception:
193
+ # Fallback to first action on any error
194
+ return actions_to_choose_from[0] if actions_to_choose_from else ""
195
+
196
+ def __repr__(self) -> str:
197
+ if self._use_all_actions:
198
+ return f"SelectionStrategy(all_actions)"
199
+ return f"SelectionStrategy({', '.join(repr(a) for a in self.actions)})"
200
+
201
+ def select(self, context: Optional[Dict[str, Any]] = None) -> str:
202
+ """Use LLM to select the most appropriate action."""
203
+ if not context or not self.actions:
204
+ return self.actions[0] if self.actions else ""
205
+
206
+ # Import here to avoid circular imports
207
+ from pydantic import BaseModel, Field, create_model
208
+ from enum import Enum
209
+
210
+ # Create enum for available actions
211
+ ActionEnum = Enum("ActionEnum", {action: action for action in self.actions})
212
+
213
+ # Create selection model
214
+ SelectionModel = create_model(
215
+ "ActionSelection",
216
+ action=(
217
+ ActionEnum,
218
+ Field(description="The selected action to execute next"),
219
+ ),
220
+ reasoning=(str, Field(description="Brief reasoning for the selection")),
221
+ )
222
+
223
+ # Build context description
224
+ context_parts = []
225
+
226
+ # Add result from previous action
227
+ if "result" in context:
228
+ context_parts.append(f"Previous action result: {context['result']}")
229
+
230
+ # Add conversation history
231
+ if "messages" in context and context["messages"]:
232
+ # Get last few messages for context
233
+ recent_messages = context["messages"][-5:] # Last 5 messages
234
+ messages_str = "\n".join(
235
+ [
236
+ f"{msg.get('role', 'unknown')}: {msg.get('content', '')}"
237
+ for msg in recent_messages
238
+ ]
239
+ )
240
+ context_parts.append(f"Recent conversation:\n{messages_str}")
241
+
242
+ # Add state information
243
+ if "state" in context and context["state"]:
244
+ context_parts.append(f"Current state: {context['state']}")
245
+
246
+ context_description = "\n\n".join(context_parts)
247
+
248
+ # Build selection prompt
249
+ base_instructions = f"""Based on the context below, select the most appropriate next action from the available options.
250
+
251
+ Available actions:
252
+ {", ".join(self.actions)}
253
+
254
+ Context:
255
+ {context_description}
256
+
257
+ Consider the conversation flow and any specific instructions from the user when making your selection."""
258
+
259
+ # Add custom instructions if provided
260
+ if self.instructions:
261
+ base_instructions = (
262
+ f"{base_instructions}\n\nAdditional instructions:\n{self.instructions}"
263
+ )
264
+
265
+ # Get language model to make selection
266
+ try:
267
+ lm = self._get_language_model()
268
+ response = lm.run(
269
+ messages=[{"role": "user", "content": base_instructions}],
270
+ type=SelectionModel,
271
+ )
272
+
273
+ selected_action = response.output.action.value
274
+
275
+ # Validate the selection
276
+ if selected_action in self.actions:
277
+ return selected_action
278
+ else:
279
+ # Fallback to first action if invalid selection
280
+ return self.actions[0]
281
+
282
+ except Exception:
283
+ # Fallback to first action on any error
284
+ return self.actions[0] if self.actions else ""
285
+
286
+
287
+ def select(
288
+ *actions: str, instructions: Optional[str] = None, model: Optional[str] = None
289
+ ) -> SelectionStrategy:
290
+ """
291
+ Create an LLM-based selection strategy for choosing between multiple actions.
292
+
293
+ Args:
294
+ *actions: The action names to choose from. If empty, will select from all available actions.
295
+ instructions: Optional instructions for the LLM selection
296
+ model: Optional model to use for selection (defaults to gpt-4o-mini)
297
+
298
+ Returns:
299
+ A SelectionStrategy instance
300
+
301
+ Examples:
302
+ # Select between specific actions
303
+ @action(next=select("poem", "response"))
304
+ def reasoning(self, message: str) -> str:
305
+ ...
306
+
307
+ # Select from all available actions in the graph
308
+ @action(next=select())
309
+ def reasoning(self, message: str) -> str:
310
+ ...
311
+
312
+ # With custom instructions
313
+ @action(next=select("reasoning", "response",
314
+ instructions="If the user asked for multiple reasonings, select 'reasoning' again"))
315
+ def reasoning(self, message: str) -> str:
316
+ ...
317
+ """
318
+ return SelectionStrategy(*actions, instructions=instructions, model=model)
319
+
320
+
61
321
  class ActionNode(BaseNode[StateT, None, Any]):
62
322
  """A pydantic-graph node that wraps a user-defined action function."""
63
323
 
@@ -80,6 +340,10 @@ class ActionNode(BaseNode[StateT, None, Any]):
80
340
  async def run(self, ctx: GraphRunContext[StateT]) -> Union[BaseNode, End]:
81
341
  """Execute the action function using Agent/LanguageModel infrastructure."""
82
342
 
343
+ # Track this node's execution
344
+ execution_tracker = getattr(self, "_execution_tracker", [])
345
+ execution_tracker.append(self.action_name)
346
+
83
347
  # Create enhanced context that wraps pydantic-graph context
84
348
  enhanced_ctx = GraphContext(
85
349
  pydantic_context=ctx,
@@ -104,18 +368,12 @@ class ActionNode(BaseNode[StateT, None, Any]):
104
368
  if hasattr(self, "_graph_docstring"):
105
369
  global_system_prompt = self._graph_docstring
106
370
 
107
- # Add well-defined step execution context
108
- step_context = f"""
109
- You are executing step '{self.action_name}' in a multi-step graph workflow.
110
-
111
- Step Purpose: {field_instructions or "Execute the requested action"}
112
-
113
- Execution Guidelines:
114
- - Focus on completing this specific step's objective
115
- - Provide clear, actionable output that can be used by subsequent steps
116
- - If this step involves decision-making, be explicit about your reasoning
117
- - Maintain consistency with the overall workflow context
118
- """
371
+ # Get state from the context if available
372
+ current_state = None
373
+ if hasattr(ctx, "state") and ctx.state is not None:
374
+ current_state = ctx.state
375
+ elif hasattr(self, "_state"):
376
+ current_state = getattr(self, "_state", None)
119
377
 
120
378
  # Check if the action function expects to handle the language model itself
121
379
  expects_language_model = (
@@ -124,14 +382,19 @@ Execution Guidelines:
124
382
 
125
383
  if expects_language_model:
126
384
  # Legacy mode: action function expects to handle language model
127
- # Combine global system prompt with field-level instructions and step context
385
+ # Combine global system prompt with field-level instructions and state
128
386
  combined_instructions = global_system_prompt
129
- if step_context:
130
- combined_instructions += f"\n\n{step_context}"
131
387
  if field_instructions and field_instructions not in combined_instructions:
132
- combined_instructions += (
133
- f"\n\nAdditional Instructions: {field_instructions}"
134
- )
388
+ if combined_instructions:
389
+ combined_instructions += f"\n\n{field_instructions}"
390
+ else:
391
+ combined_instructions = field_instructions
392
+
393
+ # Add state to instructions if available
394
+ if current_state is not None:
395
+ state_str = convert_to_text(current_state, show_defaults=False)
396
+ if state_str:
397
+ combined_instructions += f"\n\nState: {state_str}"
135
398
 
136
399
  # Get verbose/debug flags and language model kwargs from the node
137
400
  verbose = getattr(self, "_verbose", self.settings.verbose)
@@ -144,17 +407,40 @@ Execution Guidelines:
144
407
  end_tool = getattr(self, "_end_tool", self.settings.end_tool)
145
408
 
146
409
  if self.settings.tools or self.settings.instructions:
410
+ # Get model from settings, then language_model_kwargs, then default
411
+ model = self.settings.model or language_model_kwargs.get(
412
+ "model", "openai/gpt-4o-mini"
413
+ )
414
+
415
+ # Remove parameters that will be passed explicitly to avoid duplicates
416
+ filtered_kwargs = {
417
+ k: v
418
+ for k, v in language_model_kwargs.items()
419
+ if k
420
+ not in [
421
+ "model",
422
+ "name",
423
+ "instructions",
424
+ "tools",
425
+ "max_steps",
426
+ "end_strategy",
427
+ "end_tool",
428
+ "verbose",
429
+ "debug",
430
+ ]
431
+ }
432
+
147
433
  agent = Agent(
148
434
  name=self.settings.name or self.action_name,
149
435
  instructions=self.settings.instructions or combined_instructions,
150
- model=self.settings.model or "openai/gpt-4o-mini",
436
+ model=model,
151
437
  tools=self.settings.tools,
152
438
  max_steps=max_steps,
153
439
  end_strategy=end_strategy,
154
440
  end_tool=end_tool,
155
441
  verbose=verbose,
156
442
  debug=debug,
157
- **language_model_kwargs,
443
+ **filtered_kwargs,
158
444
  )
159
445
  # Pass history to context if available
160
446
  history = getattr(self, "_history", None)
@@ -168,11 +454,23 @@ Execution Guidelines:
168
454
  else:
169
455
  result = self.action_func(enhanced_ctx, agent, **action_params)
170
456
  else:
457
+ # Get model from settings, then language_model_kwargs, then default
458
+ model = self.settings.model or language_model_kwargs.get(
459
+ "model", "openai/gpt-4o-mini"
460
+ )
461
+
462
+ # Remove parameters that will be passed explicitly to avoid duplicates
463
+ filtered_kwargs = {
464
+ k: v
465
+ for k, v in language_model_kwargs.items()
466
+ if k not in ["model", "verbose", "debug"]
467
+ }
468
+
171
469
  language_model = LanguageModel(
172
- model=self.settings.model or "openai/gpt-4o-mini",
470
+ model=model,
173
471
  verbose=verbose,
174
472
  debug=debug,
175
- **language_model_kwargs,
473
+ **filtered_kwargs,
176
474
  )
177
475
  # Pass history to context if available
178
476
  history = getattr(self, "_history", None)
@@ -189,42 +487,37 @@ Execution Guidelines:
189
487
  )
190
488
  else:
191
489
  # New mode: framework handles language model internally
192
- # Build the user message from the action parameters with clear context
490
+ # Build the user message from the action parameters
193
491
  user_message = ""
194
492
  if action_params:
195
493
  if len(action_params) == 1:
196
- # Single parameter - use its value directly with context
494
+ # Single parameter - use its value directly
197
495
  param_value = list(action_params.values())[0]
198
- user_message = f"Process the following input for step '{self.action_name}':\n\n{param_value}"
496
+ user_message = str(param_value)
199
497
  else:
200
498
  # Multiple parameters - format them clearly
201
499
  param_list = "\n".join(
202
- f"- {k}: {v}" for k, v in action_params.items()
500
+ f"{k}: {v}" for k, v in action_params.items()
203
501
  )
204
- user_message = f"Execute step '{self.action_name}' with the following parameters:\n\n{param_list}"
502
+ user_message = param_list
205
503
  else:
206
- # No parameters - provide clear step instruction
207
- user_message = f"Execute the '{self.action_name}' step of the workflow."
504
+ # No parameters - check if we have previous conversation history
505
+ # If we do, don't add an empty user message
506
+ user_message = ""
208
507
 
209
- # Combine global system prompt with step context and field-level instructions
508
+ # Combine global system prompt with field-level instructions and state
210
509
  combined_instructions = global_system_prompt
211
- if step_context:
212
- combined_instructions += f"\n\n{step_context}"
213
510
  if field_instructions and field_instructions not in combined_instructions:
214
- combined_instructions += (
215
- f"\n\nAdditional Instructions: {field_instructions}"
216
- )
511
+ if combined_instructions:
512
+ combined_instructions += f"\n\n{field_instructions}"
513
+ else:
514
+ combined_instructions = field_instructions
217
515
 
218
- # Add execution guidelines for framework mode
219
- execution_guidelines = """
220
-
221
- Execution Guidelines:
222
- - Provide a clear, direct response that addresses the step's objective
223
- - Your output will be used as input for subsequent workflow steps
224
- - Be concise but comprehensive in your response
225
- - If making decisions or analysis, show your reasoning process
226
- """
227
- combined_instructions += execution_guidelines
516
+ # Add state to instructions if available
517
+ if current_state is not None:
518
+ state_str = convert_to_text(current_state, show_defaults=False)
519
+ if state_str:
520
+ combined_instructions += f"\n\nContext: {state_str}"
228
521
 
229
522
  # Get verbose/debug flags and language model kwargs from the node
230
523
  verbose = getattr(self, "_verbose", self.settings.verbose)
@@ -239,59 +532,144 @@ Execution Guidelines:
239
532
  # Determine if we need to use Agent or LanguageModel
240
533
  if self.settings.tools or self.settings.instructions:
241
534
  # Use Agent for complex operations with tools/instructions
535
+ # Get model from settings, then language_model_kwargs, then default
536
+ model = self.settings.model or language_model_kwargs.get(
537
+ "model", "openai/gpt-4o-mini"
538
+ )
539
+
540
+ # Remove parameters that will be passed explicitly to avoid duplicates
541
+ filtered_kwargs = {
542
+ k: v
543
+ for k, v in language_model_kwargs.items()
544
+ if k
545
+ not in [
546
+ "model",
547
+ "name",
548
+ "instructions",
549
+ "tools",
550
+ "max_steps",
551
+ "end_strategy",
552
+ "end_tool",
553
+ "verbose",
554
+ "debug",
555
+ ]
556
+ }
557
+
242
558
  agent = Agent(
243
559
  name=self.settings.name or self.action_name,
244
560
  instructions=self.settings.instructions or combined_instructions,
245
- model=self.settings.model or "openai/gpt-4o-mini",
561
+ model=model,
246
562
  tools=self.settings.tools,
247
563
  max_steps=max_steps,
248
564
  end_strategy=end_strategy,
249
565
  end_tool=end_tool,
250
566
  verbose=verbose,
251
567
  debug=debug,
252
- **language_model_kwargs,
568
+ **filtered_kwargs,
253
569
  )
254
570
 
255
571
  # Get history if available
256
572
  history = getattr(self, "_history", None)
257
573
 
574
+ # Check if we have previous conversation history from the graph execution
575
+ previous_messages = getattr(self, "_graph_messages", [])
576
+
577
+ # Store the current user message for history building
578
+ if user_message:
579
+ self._current_user_message = user_message
580
+
258
581
  # Run the agent with the user message and history
259
582
  if history:
260
583
  # If history is provided, we need to combine it with the user message
261
584
  # The history should be the conversation context, and user_message is the new input
262
585
  combined_messages = parse_messages_input(history)
263
- combined_messages.append({"role": "user", "content": user_message})
586
+ combined_messages.extend(previous_messages)
587
+ if user_message: # Only add non-empty user messages
588
+ combined_messages.append(
589
+ {"role": "user", "content": user_message}
590
+ )
591
+ agent_result = await agent.async_run(combined_messages)
592
+ elif previous_messages:
593
+ # If we have previous messages from the graph, use them
594
+ combined_messages = previous_messages.copy()
595
+ if user_message: # Only add non-empty user messages
596
+ combined_messages.append(
597
+ {"role": "user", "content": user_message}
598
+ )
264
599
  agent_result = await agent.async_run(combined_messages)
265
600
  else:
266
- agent_result = await agent.async_run(user_message)
601
+ # Only run with user message if it's not empty
602
+ if user_message:
603
+ agent_result = await agent.async_run(user_message)
604
+ else:
605
+ # If no user message and no history, we can't run the agent
606
+ raise ValueError(
607
+ "No user message or history provided for agent execution"
608
+ )
267
609
  result = agent_result.output
268
610
  else:
269
611
  # Use LanguageModel for simple operations
612
+ # Get model from settings, then language_model_kwargs, then default
613
+ model = self.settings.model or language_model_kwargs.get(
614
+ "model", "openai/gpt-4o-mini"
615
+ )
616
+
617
+ # Remove parameters that will be passed explicitly to avoid duplicates
618
+ filtered_kwargs = {
619
+ k: v
620
+ for k, v in language_model_kwargs.items()
621
+ if k not in ["model", "verbose", "debug"]
622
+ }
623
+
270
624
  language_model = LanguageModel(
271
- model=self.settings.model or "openai/gpt-4o-mini",
625
+ model=model,
272
626
  verbose=verbose,
273
627
  debug=debug,
274
- **language_model_kwargs,
628
+ **filtered_kwargs,
275
629
  )
276
630
 
277
631
  # Get history if available
278
632
  history = getattr(self, "_history", None)
279
633
 
634
+ # Check if we have previous conversation history from the graph execution
635
+ previous_messages = getattr(self, "_graph_messages", [])
636
+
280
637
  # Create messages using the language model utils
281
638
  if history:
282
639
  # If history is provided, use it as the base messages
283
640
  messages = parse_messages_input(
284
641
  history, instructions=combined_instructions
285
642
  )
643
+ # Add any previous graph messages
644
+ messages.extend(previous_messages)
286
645
  # Then add the user message from action parameters
287
- messages.append({"role": "user", "content": user_message})
288
- else:
289
- # Otherwise, use the user message
646
+ if user_message: # Only add non-empty user messages
647
+ messages.append({"role": "user", "content": user_message})
648
+ elif previous_messages:
649
+ # If we have previous messages from the graph, use them
290
650
  messages = parse_messages_input(
291
- user_message, instructions=combined_instructions
651
+ "", instructions=combined_instructions
292
652
  )
653
+ messages.extend(previous_messages)
654
+ if user_message: # Only add non-empty user messages
655
+ messages.append({"role": "user", "content": user_message})
656
+ else:
657
+ # Otherwise, use the user message (if not empty)
658
+ if user_message:
659
+ messages = parse_messages_input(
660
+ user_message, instructions=combined_instructions
661
+ )
662
+ else:
663
+ # If no user message and no history, just use instructions
664
+ messages = parse_messages_input(
665
+ "", instructions=combined_instructions
666
+ )
293
667
  messages = consolidate_system_messages(messages)
294
668
 
669
+ # Store the current user message for history building
670
+ if user_message:
671
+ self._current_user_message = user_message
672
+
295
673
  # Run the language model with the consolidated messages
296
674
  lm_result = await language_model.async_run(messages)
297
675
  result = lm_result.output
@@ -310,9 +688,83 @@ Execution Guidelines:
310
688
  elif self.settings.terminates:
311
689
  return End(result)
312
690
  else:
313
- # For non-terminating actions that don't return a node, continue to next
314
- # This would be more sophisticated in a real implementation with routing
315
- return End(result)
691
+ # Check if there's a next action defined
692
+ if self.settings.next:
693
+ # Handle different types of next specifications
694
+ next_action_name = None
695
+
696
+ if isinstance(self.settings.next, str):
697
+ # Simple string case
698
+ next_action_name = self.settings.next
699
+ elif isinstance(self.settings.next, list):
700
+ # List case - for now, just pick the first one
701
+ # In the future, this could execute all in parallel
702
+ if self.settings.next:
703
+ next_action_name = self.settings.next[0]
704
+ elif isinstance(self.settings.next, SelectionStrategy):
705
+ # Selection strategy case - use the strategy to pick an action
706
+ context = {
707
+ "result": result,
708
+ "state": getattr(self, "_state", None),
709
+ "messages": getattr(self, "_graph_messages", []),
710
+ }
711
+ # If using all actions, pass them in the context
712
+ if self.settings.next._use_all_actions and hasattr(
713
+ self, "_graph_action_nodes"
714
+ ):
715
+ context["all_actions"] = list(self._graph_action_nodes.keys())
716
+ next_action_name = self.settings.next.select(context)
717
+ else:
718
+ # Invalid type for next
719
+ return End(result)
720
+
721
+ # Find the next node class from the graph's action nodes
722
+ if hasattr(self, "_graph_action_nodes") and next_action_name:
723
+ next_node_class = self._graph_action_nodes.get(next_action_name)
724
+ if next_node_class:
725
+ # Create the next node instance
726
+ # For graph flow, we don't pass the result as a parameter
727
+ # The conversation history will contain the context
728
+ next_node = next_node_class()
729
+
730
+ # Copy over any graph-specific attributes
731
+ for attr in [
732
+ "_graph_docstring",
733
+ "_verbose",
734
+ "_debug",
735
+ "_language_model_kwargs",
736
+ "_history",
737
+ "_state",
738
+ "_graph_action_nodes",
739
+ "_execution_tracker",
740
+ ]:
741
+ if hasattr(self, attr):
742
+ setattr(next_node, attr, getattr(self, attr))
743
+
744
+ # Build up the conversation history for the next node
745
+ current_messages = getattr(self, "_graph_messages", [])
746
+ # Add the current interaction to the conversation history
747
+ # Only add the user message if it was actually provided (not empty)
748
+ if (
749
+ hasattr(self, "_current_user_message")
750
+ and self._current_user_message
751
+ ):
752
+ current_messages.append(
753
+ {"role": "user", "content": self._current_user_message}
754
+ )
755
+ # Add the assistant response from this node
756
+ current_messages.append(
757
+ {"role": "assistant", "content": str(result)}
758
+ )
759
+ next_node._graph_messages = current_messages
760
+
761
+ return next_node
762
+
763
+ # If we can't find any valid next node, terminate
764
+ return End(result)
765
+ else:
766
+ # No next action defined, terminate
767
+ return End(result)
316
768
 
317
769
 
318
770
  class ActionDecorator:
@@ -333,7 +785,7 @@ class ActionDecorator:
333
785
  start: bool = False,
334
786
  terminates: bool = False,
335
787
  xml: Optional[str] = None,
336
- next: Optional[Union[str, List[str]]] = None,
788
+ next: Optional[Union[str, List[str], SelectionStrategy]] = None,
337
789
  read_history: bool = False,
338
790
  persist_history: bool = False,
339
791
  condition: Optional[str] = None,
@@ -374,7 +826,7 @@ class ActionDecorator:
374
826
  def decorator(f: Callable) -> Callable:
375
827
  action_name = name or f.__name__
376
828
 
377
- # Create a dynamic ActionNode class for this specific action
829
+ # Create a dynamic ActionNode class for this specific action with unique name
378
830
  class DynamicActionNode(ActionNode[StateT]):
379
831
  def __init__(self, **action_params):
380
832
  super().__init__(
@@ -384,6 +836,11 @@ class ActionDecorator:
384
836
  **action_params,
385
837
  )
386
838
 
839
+ @classmethod
840
+ def get_node_id(cls):
841
+ """Override to provide unique node ID based on action name."""
842
+ return f"DynamicActionNode_{action_name}"
843
+
387
844
  # Store the action
388
845
  self._actions[action_name] = DynamicActionNode
389
846
  if start:
@@ -460,10 +917,44 @@ class GraphBuilder(Generic[StateT, T]):
460
917
  class BaseGraph(Generic[StateT, T]):
461
918
  """Base class for graphs that provides action decorator support on top of pydantic-graph."""
462
919
 
463
- def __init__(self, state: Optional[StateT] = None):
464
- self._plugins: List[BasePlugin] = []
465
- self._global_model: Optional[LanguageModelName] = None
466
- self._global_settings: Dict[str, Any] = {}
920
+ def __init__(
921
+ self,
922
+ state: Optional[StateT] = None,
923
+ *,
924
+ model: Optional[LanguageModelName | str] = None,
925
+ temperature: Optional[float] = None,
926
+ max_tokens: Optional[int] = None,
927
+ tools: Optional[List[Callable]] = None,
928
+ verbose: bool = False,
929
+ debug: bool = False,
930
+ max_steps: Optional[int] = None,
931
+ end_strategy: Optional[Literal["tool"]] = None,
932
+ end_tool: Optional[Callable] = None,
933
+ summarize_tools: bool = True,
934
+ summarize_tools_with_model: bool = False,
935
+ plugins: Optional[List[BasePlugin]] = None,
936
+ **kwargs: Any,
937
+ ):
938
+ self._plugins: List[BasePlugin] = plugins or []
939
+ self._global_model: Optional[LanguageModelName] = model
940
+ self._global_settings: Dict[str, Any] = {
941
+ "temperature": temperature,
942
+ "max_tokens": max_tokens,
943
+ "tools": tools,
944
+ "verbose": verbose,
945
+ "debug": debug,
946
+ "max_steps": max_steps,
947
+ "end_strategy": end_strategy,
948
+ "end_tool": end_tool,
949
+ "summarize_tools": summarize_tools,
950
+ "summarize_tools_with_model": summarize_tools_with_model,
951
+ **kwargs,
952
+ }
953
+ # Remove None values from settings
954
+ self._global_settings = {
955
+ k: v for k, v in self._global_settings.items() if v is not None
956
+ }
957
+
467
958
  self._pydantic_graph: Optional[PydanticGraph] = None
468
959
  self._action_nodes: Dict[str, Type[ActionNode]] = {}
469
960
  self._start_action_name: Optional[str] = None
@@ -503,6 +994,8 @@ class BaseGraph(Generic[StateT, T]):
503
994
  def _collect_actions(self) -> None:
504
995
  """Collect all actions defined in the graph class."""
505
996
  actions_found = []
997
+ start_action = None
998
+ end_action = None
506
999
 
507
1000
  # Get the graph class docstring for global system prompt
508
1001
  graph_docstring = self.__class__.__doc__ or ""
@@ -523,6 +1016,14 @@ class BaseGraph(Generic[StateT, T]):
523
1016
  )
524
1017
  self._start_action_name = action_name
525
1018
  self._start_action_func = attr
1019
+ start_action = attr
1020
+
1021
+ # Check if this is an end action (terminates=True)
1022
+ if (
1023
+ hasattr(attr, "_action_settings")
1024
+ and attr._action_settings.terminates
1025
+ ):
1026
+ end_action = attr
526
1027
 
527
1028
  # If no explicit start action was defined and we have exactly one action,
528
1029
  # automatically make it the start action
@@ -531,6 +1032,13 @@ class BaseGraph(Generic[StateT, T]):
531
1032
  self._start_action_name = action_name
532
1033
  self._start_action_func = action_func
533
1034
 
1035
+ # Special case: If we have exactly 2 actions (start -> end), automatically set up routing
1036
+ if len(actions_found) == 2 and start_action and end_action:
1037
+ # Check if the start action doesn't already have a 'next' defined
1038
+ if start_action._action_settings.next is None:
1039
+ # Automatically set the start action to route to the end action
1040
+ start_action._action_settings.next = end_action._action_name
1041
+
534
1042
  # Store the graph docstring in all action nodes for access during execution
535
1043
  for action_node_class in self._action_nodes.values():
536
1044
  # We'll add this to the action node instances when they're created
@@ -648,20 +1156,34 @@ class BaseGraph(Generic[StateT, T]):
648
1156
  start_node = start_node_class(**bound_args.arguments)
649
1157
  # Pass the graph docstring to the node for global system prompt
650
1158
  start_node._graph_docstring = self.__class__.__doc__ or ""
651
- # Pass verbose/debug flags and language model kwargs
652
- start_node._verbose = verbose
653
- start_node._debug = debug
654
- start_node._language_model_kwargs = language_model_kwargs
1159
+
1160
+ # Merge global settings with provided kwargs
1161
+ merged_settings = self._global_settings.copy()
1162
+ merged_settings.update(language_model_kwargs)
1163
+
1164
+ # Pass verbose/debug flags (prefer explicit params over global settings)
1165
+ start_node._verbose = (
1166
+ verbose if verbose else merged_settings.get("verbose", False)
1167
+ )
1168
+ start_node._debug = debug if debug else merged_settings.get("debug", False)
1169
+ start_node._language_model_kwargs = merged_settings
1170
+
655
1171
  # Pass history if provided
656
1172
  start_node._history = history
1173
+ # Pass the graph's action nodes for routing
1174
+ start_node._graph_action_nodes = self._action_nodes
1175
+
1176
+ # Initialize execution tracking
1177
+ self._execution_tracker = []
1178
+ start_node._execution_tracker = self._execution_tracker
657
1179
 
658
- # Pass end strategy parameters if provided
659
- if "max_steps" in language_model_kwargs:
660
- start_node._max_steps = language_model_kwargs["max_steps"]
661
- if "end_strategy" in language_model_kwargs:
662
- start_node._end_strategy = language_model_kwargs["end_strategy"]
663
- if "end_tool" in language_model_kwargs:
664
- start_node._end_tool = language_model_kwargs["end_tool"]
1180
+ # Pass end strategy parameters (from merged settings)
1181
+ if "max_steps" in merged_settings:
1182
+ start_node._max_steps = merged_settings["max_steps"]
1183
+ if "end_strategy" in merged_settings:
1184
+ start_node._end_strategy = merged_settings["end_strategy"]
1185
+ if "end_tool" in merged_settings:
1186
+ start_node._end_tool = merged_settings["end_tool"]
665
1187
 
666
1188
  # Run the pydantic graph
667
1189
  if not self._pydantic_graph:
@@ -669,6 +1191,8 @@ class BaseGraph(Generic[StateT, T]):
669
1191
 
670
1192
  # Use the provided state or the graph's state
671
1193
  execution_state = state if state is not None else self._state
1194
+ # Pass state to the node
1195
+ start_node._state = execution_state
672
1196
 
673
1197
  # Execute the graph using pydantic-graph
674
1198
  try:
@@ -683,6 +1207,13 @@ class BaseGraph(Generic[StateT, T]):
683
1207
  else:
684
1208
  output = str(result)
685
1209
 
1210
+ # Get nodes executed from the execution tracker
1211
+ nodes_executed = getattr(self, "_execution_tracker", [])
1212
+
1213
+ # If no nodes tracked, at least include the start node
1214
+ if not nodes_executed:
1215
+ nodes_executed = [self._start_action_name]
1216
+
686
1217
  # Create our response object
687
1218
  return GraphResponse(
688
1219
  type="graph",
@@ -693,7 +1224,7 @@ class BaseGraph(Generic[StateT, T]):
693
1224
  state=execution_state,
694
1225
  history=[], # Would be populated from pydantic-graph execution
695
1226
  start_node=self._start_action_name,
696
- nodes_executed=[self._start_action_name], # Would track from execution
1227
+ nodes_executed=nodes_executed,
697
1228
  metadata={},
698
1229
  )
699
1230
 
@@ -794,23 +1325,40 @@ class BaseGraph(Generic[StateT, T]):
794
1325
  start_node = start_node_class(**bound_args.arguments)
795
1326
  # Pass the graph docstring to the node for global system prompt
796
1327
  start_node._graph_docstring = self.__class__.__doc__ or ""
797
- # Pass verbose/debug flags and language model kwargs
798
- start_node._verbose = verbose
799
- start_node._debug = debug
800
- start_node._language_model_kwargs = language_model_kwargs
1328
+
1329
+ # Merge global settings with provided kwargs
1330
+ merged_settings = self._global_settings.copy()
1331
+ merged_settings.update(language_model_kwargs)
1332
+
1333
+ # Pass verbose/debug flags (prefer explicit params over global settings)
1334
+ start_node._verbose = (
1335
+ verbose if verbose else merged_settings.get("verbose", False)
1336
+ )
1337
+ start_node._debug = debug if debug else merged_settings.get("debug", False)
1338
+ start_node._language_model_kwargs = merged_settings
1339
+
801
1340
  # Pass history if provided
802
1341
  start_node._history = history
1342
+ # Pass the graph's action nodes for routing
1343
+ start_node._graph_action_nodes = self._action_nodes
803
1344
 
804
- # Pass end strategy parameters if provided
805
- if max_steps is not None:
806
- start_node._max_steps = max_steps
807
- if end_strategy is not None:
808
- start_node._end_strategy = end_strategy
809
- if end_tool is not None:
810
- start_node._end_tool = end_tool
1345
+ # Pass end strategy parameters (prefer explicit params over merged settings)
1346
+ start_node._max_steps = (
1347
+ max_steps if max_steps is not None else merged_settings.get("max_steps")
1348
+ )
1349
+ start_node._end_strategy = (
1350
+ end_strategy
1351
+ if end_strategy is not None
1352
+ else merged_settings.get("end_strategy")
1353
+ )
1354
+ start_node._end_tool = (
1355
+ end_tool if end_tool is not None else merged_settings.get("end_tool")
1356
+ )
811
1357
 
812
1358
  # Use the provided state or the graph's state
813
1359
  execution_state = state if state is not None else self._state
1360
+ # Pass state to the node
1361
+ start_node._state = execution_state
814
1362
 
815
1363
  # Create and return GraphStream
816
1364
  return GraphStream(
@@ -914,20 +1462,39 @@ class BaseGraph(Generic[StateT, T]):
914
1462
  start_node = start_node_class(**bound_args.arguments)
915
1463
  # Pass the graph docstring to the node for global system prompt
916
1464
  start_node._graph_docstring = self.__class__.__doc__ or ""
917
- # Pass verbose/debug flags and language model kwargs
918
- start_node._verbose = verbose
919
- start_node._debug = debug
920
- start_node._language_model_kwargs = language_model_kwargs
1465
+
1466
+ # Merge global settings with provided kwargs
1467
+ merged_settings = self._global_settings.copy()
1468
+ merged_settings.update(language_model_kwargs)
1469
+
1470
+ # Pass verbose/debug flags (prefer explicit params over global settings)
1471
+ start_node._verbose = (
1472
+ verbose if verbose else merged_settings.get("verbose", False)
1473
+ )
1474
+ start_node._debug = debug if debug else merged_settings.get("debug", False)
1475
+ start_node._language_model_kwargs = merged_settings
1476
+
921
1477
  # Pass history if provided
922
1478
  start_node._history = history
1479
+ # Pass the graph's action nodes for routing
1480
+ start_node._graph_action_nodes = self._action_nodes
923
1481
 
924
- # Pass end strategy parameters if provided
925
- if max_steps is not None:
926
- start_node._max_steps = max_steps
927
- if end_strategy is not None:
928
- start_node._end_strategy = end_strategy
929
- if end_tool is not None:
930
- start_node._end_tool = end_tool
1482
+ # Initialize execution tracking
1483
+ self._execution_tracker = []
1484
+ start_node._execution_tracker = self._execution_tracker
1485
+
1486
+ # Pass end strategy parameters (prefer explicit params over merged settings)
1487
+ start_node._max_steps = (
1488
+ max_steps if max_steps is not None else merged_settings.get("max_steps")
1489
+ )
1490
+ start_node._end_strategy = (
1491
+ end_strategy
1492
+ if end_strategy is not None
1493
+ else merged_settings.get("end_strategy")
1494
+ )
1495
+ start_node._end_tool = (
1496
+ end_tool if end_tool is not None else merged_settings.get("end_tool")
1497
+ )
931
1498
 
932
1499
  # Run the pydantic graph asynchronously
933
1500
  if not self._pydantic_graph:
@@ -935,6 +1502,8 @@ class BaseGraph(Generic[StateT, T]):
935
1502
 
936
1503
  # Use the provided state or the graph's state
937
1504
  execution_state = state if state is not None else self._state
1505
+ # Pass state to the node
1506
+ start_node._state = execution_state
938
1507
 
939
1508
  try:
940
1509
  # Execute the graph using pydantic-graph async
@@ -948,6 +1517,13 @@ class BaseGraph(Generic[StateT, T]):
948
1517
  else:
949
1518
  output = str(result)
950
1519
 
1520
+ # Get nodes executed from the execution tracker
1521
+ nodes_executed = getattr(self, "_execution_tracker", [])
1522
+
1523
+ # If no nodes tracked, at least include the start node
1524
+ if not nodes_executed:
1525
+ nodes_executed = [self._start_action_name]
1526
+
951
1527
  # Create our response object
952
1528
  return GraphResponse(
953
1529
  type="graph",
@@ -958,7 +1534,7 @@ class BaseGraph(Generic[StateT, T]):
958
1534
  state=execution_state,
959
1535
  history=[], # Would be populated from pydantic-graph execution
960
1536
  start_node=self._start_action_name,
961
- nodes_executed=[self._start_action_name], # Would track from execution
1537
+ nodes_executed=nodes_executed,
962
1538
  metadata={},
963
1539
  )
964
1540
 
@@ -1054,23 +1630,40 @@ class BaseGraph(Generic[StateT, T]):
1054
1630
  start_node = start_node_class(**bound_args.arguments)
1055
1631
  # Pass the graph docstring to the node for global system prompt
1056
1632
  start_node._graph_docstring = self.__class__.__doc__ or ""
1057
- # Pass verbose/debug flags and language model kwargs
1058
- start_node._verbose = verbose
1059
- start_node._debug = debug
1060
- start_node._language_model_kwargs = language_model_kwargs
1633
+
1634
+ # Merge global settings with provided kwargs
1635
+ merged_settings = self._global_settings.copy()
1636
+ merged_settings.update(language_model_kwargs)
1637
+
1638
+ # Pass verbose/debug flags (prefer explicit params over global settings)
1639
+ start_node._verbose = (
1640
+ verbose if verbose else merged_settings.get("verbose", False)
1641
+ )
1642
+ start_node._debug = debug if debug else merged_settings.get("debug", False)
1643
+ start_node._language_model_kwargs = merged_settings
1644
+
1061
1645
  # Pass history if provided
1062
1646
  start_node._history = history
1647
+ # Pass the graph's action nodes for routing
1648
+ start_node._graph_action_nodes = self._action_nodes
1063
1649
 
1064
- # Pass end strategy parameters if provided
1065
- if max_steps is not None:
1066
- start_node._max_steps = max_steps
1067
- if end_strategy is not None:
1068
- start_node._end_strategy = end_strategy
1069
- if end_tool is not None:
1070
- start_node._end_tool = end_tool
1650
+ # Pass end strategy parameters (prefer explicit params over merged settings)
1651
+ start_node._max_steps = (
1652
+ max_steps if max_steps is not None else merged_settings.get("max_steps")
1653
+ )
1654
+ start_node._end_strategy = (
1655
+ end_strategy
1656
+ if end_strategy is not None
1657
+ else merged_settings.get("end_strategy")
1658
+ )
1659
+ start_node._end_tool = (
1660
+ end_tool if end_tool is not None else merged_settings.get("end_tool")
1661
+ )
1071
1662
 
1072
1663
  # Use the provided state or the graph's state
1073
1664
  execution_state = state if state is not None else self._state
1665
+ # Pass state to the node
1666
+ start_node._state = execution_state
1074
1667
 
1075
1668
  # Create and return GraphStream
1076
1669
  return GraphStream(
@@ -1101,3 +1694,93 @@ class BaseGraph(Generic[StateT, T]):
1101
1694
  def builder(cls) -> GraphBuilder[StateT, T]:
1102
1695
  """Create a builder for this graph."""
1103
1696
  return GraphBuilder(cls)
1697
+
1698
+ def as_a2a(
1699
+ self,
1700
+ *,
1701
+ # Worker configuration
1702
+ state: Optional[StateT] = None,
1703
+ # Storage and broker configuration
1704
+ storage: Optional[Any] = None,
1705
+ broker: Optional[Any] = None,
1706
+ # Server configuration
1707
+ host: str = "0.0.0.0",
1708
+ port: int = 8000,
1709
+ reload: bool = False,
1710
+ workers: int = 1,
1711
+ log_level: str = "info",
1712
+ # A2A configuration
1713
+ name: Optional[str] = None,
1714
+ url: Optional[str] = None,
1715
+ version: str = "1.0.0",
1716
+ description: Optional[str] = None,
1717
+ # Advanced configuration
1718
+ lifespan_timeout: int = 30,
1719
+ **uvicorn_kwargs: Any,
1720
+ ) -> "FastA2A": # type: ignore
1721
+ """
1722
+ Convert this graph to an A2A server application.
1723
+
1724
+ This method creates a FastA2A server that can handle A2A requests
1725
+ for this graph instance. It sets up the necessary Worker, Storage,
1726
+ and Broker components automatically.
1727
+
1728
+ Args:
1729
+ state: Initial state for the graph (overrides instance state)
1730
+ storage: Custom storage backend (defaults to InMemoryStorage)
1731
+ broker: Custom broker backend (defaults to InMemoryBroker)
1732
+ host: Host to bind the server to
1733
+ port: Port to bind the server to
1734
+ reload: Enable auto-reload for development
1735
+ workers: Number of worker processes
1736
+ log_level: Logging level
1737
+ name: Graph name for the A2A server
1738
+ url: URL where the graph is hosted
1739
+ version: API version
1740
+ description: API description for the A2A server
1741
+ lifespan_timeout: Timeout for lifespan events
1742
+ **uvicorn_kwargs: Additional arguments passed to uvicorn
1743
+
1744
+ Returns:
1745
+ FastA2A application instance that can be run with uvicorn
1746
+
1747
+ Examples:
1748
+ Convert graph to A2A server:
1749
+ ```python
1750
+ class MyGraph(BaseGraph):
1751
+ @action.start()
1752
+ def process(self, message: str) -> str:
1753
+ return f"Processed: {message}"
1754
+
1755
+ graph = MyGraph()
1756
+ app = graph.as_a2a(port=8080)
1757
+
1758
+ # Run with uvicorn
1759
+ import uvicorn
1760
+ uvicorn.run(app, host="0.0.0.0", port=8080)
1761
+ ```
1762
+
1763
+ Or use the CLI:
1764
+ ```bash
1765
+ uvicorn mymodule:graph.as_a2a() --reload
1766
+ ```
1767
+ """
1768
+ from ..a2a import as_a2a_app
1769
+
1770
+ return as_a2a_app(
1771
+ self,
1772
+ state=state if state is not None else self._state,
1773
+ storage=storage,
1774
+ broker=broker,
1775
+ host=host,
1776
+ port=port,
1777
+ reload=reload,
1778
+ workers=workers,
1779
+ log_level=log_level,
1780
+ name=name or self.__class__.__name__,
1781
+ url=url,
1782
+ version=version,
1783
+ description=description or self.__class__.__doc__,
1784
+ lifespan_timeout=lifespan_timeout,
1785
+ **uvicorn_kwargs,
1786
+ )