hammad-python 0.0.24__py3-none-any.whl → 0.0.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- """hammad.genai.graphs.base - Graph implementation using pydantic-graph with Agent/LanguageModel integration"""
1
+ """hammad.genai.graphs.base"""
2
2
 
3
3
  from typing import (
4
4
  Any,
@@ -12,7 +12,9 @@ from typing import (
12
12
  Callable,
13
13
  get_type_hints,
14
14
  ParamSpec,
15
+ TypeAlias,
15
16
  Awaitable,
17
+ TYPE_CHECKING,
16
18
  )
17
19
  from typing_extensions import Literal
18
20
  from dataclasses import dataclass, field
@@ -21,12 +23,11 @@ from functools import wraps
21
23
  import asyncio
22
24
 
23
25
  from pydantic_graph import BaseNode, End, Graph as PydanticGraph, GraphRunContext
24
- from pydantic import BaseModel
25
26
  from ..models.language.utils import (
26
- LanguageModelRequestBuilder,
27
27
  parse_messages_input,
28
28
  consolidate_system_messages,
29
29
  )
30
+ from ...formatting.text import convert_to_text
30
31
 
31
32
  from ..agents.agent import Agent
32
33
  from ..agents.types.agent_response import AgentResponse
@@ -43,6 +44,13 @@ from .types import (
43
44
  ActionSettings,
44
45
  GraphHistoryEntry,
45
46
  )
47
+ from ._utils import visualize_base_graph
48
+
49
+ if TYPE_CHECKING:
50
+ try:
51
+ from fasta2a import FastA2A
52
+ except ImportError:
53
+ FastA2A: TypeAlias = Any
46
54
 
47
55
  __all__ = [
48
56
  "BaseGraph",
@@ -51,6 +59,8 @@ __all__ = [
51
59
  "GraphBuilder",
52
60
  "GraphStream",
53
61
  "GraphResponseChunk",
62
+ "select",
63
+ "SelectionStrategy",
54
64
  ]
55
65
 
56
66
  T = TypeVar("T")
@@ -58,6 +68,257 @@ StateT = TypeVar("StateT")
58
68
  P = ParamSpec("P")
59
69
 
60
70
 
71
+ class SelectionStrategy:
72
+ """LLM-based selection strategy for choosing the next action."""
73
+
74
+ def __init__(
75
+ self,
76
+ *actions: str,
77
+ instructions: Optional[str] = None,
78
+ model: Optional[str] = None,
79
+ ):
80
+ self.actions = list(actions)
81
+ self.instructions = instructions
82
+ self.model = model or "openai/gpt-4o-mini"
83
+ self._language_model = None
84
+ self._use_all_actions = (
85
+ len(actions) == 0
86
+ ) # If no actions specified, use all available
87
+
88
+ def _get_language_model(self):
89
+ """Lazy load the language model."""
90
+ if self._language_model is None:
91
+ from ..models.language.model import LanguageModel
92
+
93
+ self._language_model = LanguageModel(model=self.model)
94
+ return self._language_model
95
+
96
+ def select(self, context: Optional[Dict[str, Any]] = None) -> str:
97
+ """Use LLM to select the most appropriate action."""
98
+ if not context:
99
+ context = {}
100
+
101
+ # Get available actions
102
+ actions_to_choose_from = self.actions
103
+ if self._use_all_actions and "all_actions" in context:
104
+ # Use all available actions from the graph
105
+ actions_to_choose_from = context["all_actions"]
106
+
107
+ if not actions_to_choose_from:
108
+ return ""
109
+
110
+ # If only one action, return it
111
+ if len(actions_to_choose_from) == 1:
112
+ return actions_to_choose_from[0]
113
+
114
+ # Import here to avoid circular imports
115
+ from pydantic import BaseModel, Field, create_model
116
+ from enum import Enum
117
+
118
+ # Create enum for available actions
119
+ ActionEnum = Enum(
120
+ "ActionEnum", {action: action for action in actions_to_choose_from}
121
+ )
122
+
123
+ # Create selection model
124
+ SelectionModel = create_model(
125
+ "ActionSelection",
126
+ action=(
127
+ ActionEnum,
128
+ Field(description="The selected action to execute next"),
129
+ ),
130
+ reasoning=(str, Field(description="Brief reasoning for the selection")),
131
+ )
132
+
133
+ # Build context description
134
+ context_parts = []
135
+
136
+ # Add result from previous action
137
+ if "result" in context:
138
+ context_parts.append(f"Previous action result: {context['result']}")
139
+
140
+ # Add conversation history
141
+ if "messages" in context and context["messages"]:
142
+ # Get last few messages for context
143
+ recent_messages = context["messages"][-5:] # Last 5 messages
144
+ messages_str = "\n".join(
145
+ [
146
+ f"{msg.get('role', 'unknown')}: {msg.get('content', '')}"
147
+ for msg in recent_messages
148
+ ]
149
+ )
150
+ context_parts.append(f"Recent conversation:\n{messages_str}")
151
+
152
+ # Add state information
153
+ if "state" in context and context["state"]:
154
+ context_parts.append(f"Current state: {context['state']}")
155
+
156
+ context_description = "\n\n".join(context_parts)
157
+
158
+ # Build selection prompt
159
+ base_instructions = f"""Based on the context below, select the most appropriate next action from the available options.
160
+
161
+ Available actions:
162
+ {", ".join(actions_to_choose_from)}
163
+
164
+ Context:
165
+ {context_description}
166
+
167
+ Consider the conversation flow, user's request, and any patterns in the conversation when making your selection.
168
+ For example, if the user asked to do something multiple times (e.g., "reason twice"), and you've only done it once, select that action again."""
169
+
170
+ # Add custom instructions if provided
171
+ if self.instructions:
172
+ base_instructions = (
173
+ f"{base_instructions}\n\nAdditional instructions:\n{self.instructions}"
174
+ )
175
+
176
+ # Get language model to make selection
177
+ try:
178
+ lm = self._get_language_model()
179
+ response = lm.run(
180
+ messages=[{"role": "user", "content": base_instructions}],
181
+ type=SelectionModel,
182
+ )
183
+
184
+ selected_action = response.output.action.value
185
+
186
+ # Validate the selection
187
+ if selected_action in actions_to_choose_from:
188
+ return selected_action
189
+ else:
190
+ # Fallback to first action if invalid selection
191
+ return actions_to_choose_from[0]
192
+
193
+ except Exception:
194
+ # Fallback to first action on any error
195
+ return actions_to_choose_from[0] if actions_to_choose_from else ""
196
+
197
+ def __repr__(self) -> str:
198
+ if self._use_all_actions:
199
+ return f"SelectionStrategy(all_actions)"
200
+ return f"SelectionStrategy({', '.join(repr(a) for a in self.actions)})"
201
+
202
+ def select(self, context: Optional[Dict[str, Any]] = None) -> str:
203
+ """Use LLM to select the most appropriate action."""
204
+ if not context or not self.actions:
205
+ return self.actions[0] if self.actions else ""
206
+
207
+ # Import here to avoid circular imports
208
+ from pydantic import BaseModel, Field, create_model
209
+ from enum import Enum
210
+
211
+ # Create enum for available actions
212
+ ActionEnum = Enum("ActionEnum", {action: action for action in self.actions})
213
+
214
+ # Create selection model
215
+ SelectionModel = create_model(
216
+ "ActionSelection",
217
+ action=(
218
+ ActionEnum,
219
+ Field(description="The selected action to execute next"),
220
+ ),
221
+ reasoning=(str, Field(description="Brief reasoning for the selection")),
222
+ )
223
+
224
+ # Build context description
225
+ context_parts = []
226
+
227
+ # Add result from previous action
228
+ if "result" in context:
229
+ context_parts.append(f"Previous action result: {context['result']}")
230
+
231
+ # Add conversation history
232
+ if "messages" in context and context["messages"]:
233
+ # Get last few messages for context
234
+ recent_messages = context["messages"][-5:] # Last 5 messages
235
+ messages_str = "\n".join(
236
+ [
237
+ f"{msg.get('role', 'unknown')}: {msg.get('content', '')}"
238
+ for msg in recent_messages
239
+ ]
240
+ )
241
+ context_parts.append(f"Recent conversation:\n{messages_str}")
242
+
243
+ # Add state information
244
+ if "state" in context and context["state"]:
245
+ context_parts.append(f"Current state: {context['state']}")
246
+
247
+ context_description = "\n\n".join(context_parts)
248
+
249
+ # Build selection prompt
250
+ base_instructions = f"""Based on the context below, select the most appropriate next action from the available options.
251
+
252
+ Available actions:
253
+ {", ".join(self.actions)}
254
+
255
+ Context:
256
+ {context_description}
257
+
258
+ Consider the conversation flow and any specific instructions from the user when making your selection."""
259
+
260
+ # Add custom instructions if provided
261
+ if self.instructions:
262
+ base_instructions = (
263
+ f"{base_instructions}\n\nAdditional instructions:\n{self.instructions}"
264
+ )
265
+
266
+ # Get language model to make selection
267
+ try:
268
+ lm = self._get_language_model()
269
+ response = lm.run(
270
+ messages=[{"role": "user", "content": base_instructions}],
271
+ type=SelectionModel,
272
+ )
273
+
274
+ selected_action = response.output.action.value
275
+
276
+ # Validate the selection
277
+ if selected_action in self.actions:
278
+ return selected_action
279
+ else:
280
+ # Fallback to first action if invalid selection
281
+ return self.actions[0]
282
+
283
+ except Exception:
284
+ # Fallback to first action on any error
285
+ return self.actions[0] if self.actions else ""
286
+
287
+
288
+ def select(
289
+ *actions: str, instructions: Optional[str] = None, model: Optional[str] = None
290
+ ) -> SelectionStrategy:
291
+ """
292
+ Create an LLM-based selection strategy for choosing between multiple actions.
293
+
294
+ Args:
295
+ *actions: The action names to choose from. If empty, will select from all available actions.
296
+ instructions: Optional instructions for the LLM selection
297
+ model: Optional model to use for selection (defaults to gpt-4o-mini)
298
+
299
+ Returns:
300
+ A SelectionStrategy instance
301
+
302
+ Examples:
303
+ # Select between specific actions
304
+ @action(next=select("poem", "response"))
305
+ def reasoning(self, message: str) -> str:
306
+ ...
307
+
308
+ # Select from all available actions in the graph
309
+ @action(next=select())
310
+ def reasoning(self, message: str) -> str:
311
+ ...
312
+
313
+ # With custom instructions
314
+ @action(next=select("reasoning", "response",
315
+ instructions="If the user asked for multiple reasonings, select 'reasoning' again"))
316
+ def reasoning(self, message: str) -> str:
317
+ ...
318
+ """
319
+ return SelectionStrategy(*actions, instructions=instructions, model=model)
320
+
321
+
61
322
  class ActionNode(BaseNode[StateT, None, Any]):
62
323
  """A pydantic-graph node that wraps a user-defined action function."""
63
324
 
@@ -80,6 +341,10 @@ class ActionNode(BaseNode[StateT, None, Any]):
80
341
  async def run(self, ctx: GraphRunContext[StateT]) -> Union[BaseNode, End]:
81
342
  """Execute the action function using Agent/LanguageModel infrastructure."""
82
343
 
344
+ # Track this node's execution
345
+ execution_tracker = getattr(self, "_execution_tracker", [])
346
+ execution_tracker.append(self.action_name)
347
+
83
348
  # Create enhanced context that wraps pydantic-graph context
84
349
  enhanced_ctx = GraphContext(
85
350
  pydantic_context=ctx,
@@ -104,18 +369,12 @@ class ActionNode(BaseNode[StateT, None, Any]):
104
369
  if hasattr(self, "_graph_docstring"):
105
370
  global_system_prompt = self._graph_docstring
106
371
 
107
- # Add well-defined step execution context
108
- step_context = f"""
109
- You are executing step '{self.action_name}' in a multi-step graph workflow.
110
-
111
- Step Purpose: {field_instructions or "Execute the requested action"}
112
-
113
- Execution Guidelines:
114
- - Focus on completing this specific step's objective
115
- - Provide clear, actionable output that can be used by subsequent steps
116
- - If this step involves decision-making, be explicit about your reasoning
117
- - Maintain consistency with the overall workflow context
118
- """
372
+ # Get state from the context if available
373
+ current_state = None
374
+ if hasattr(ctx, "state") and ctx.state is not None:
375
+ current_state = ctx.state
376
+ elif hasattr(self, "_state"):
377
+ current_state = getattr(self, "_state", None)
119
378
 
120
379
  # Check if the action function expects to handle the language model itself
121
380
  expects_language_model = (
@@ -124,14 +383,19 @@ Execution Guidelines:
124
383
 
125
384
  if expects_language_model:
126
385
  # Legacy mode: action function expects to handle language model
127
- # Combine global system prompt with field-level instructions and step context
386
+ # Combine global system prompt with field-level instructions and state
128
387
  combined_instructions = global_system_prompt
129
- if step_context:
130
- combined_instructions += f"\n\n{step_context}"
131
388
  if field_instructions and field_instructions not in combined_instructions:
132
- combined_instructions += (
133
- f"\n\nAdditional Instructions: {field_instructions}"
134
- )
389
+ if combined_instructions:
390
+ combined_instructions += f"\n\n{field_instructions}"
391
+ else:
392
+ combined_instructions = field_instructions
393
+
394
+ # Add state to instructions if available
395
+ if current_state is not None:
396
+ state_str = convert_to_text(current_state, show_defaults=False)
397
+ if state_str:
398
+ combined_instructions += f"\n\nState: {state_str}"
135
399
 
136
400
  # Get verbose/debug flags and language model kwargs from the node
137
401
  verbose = getattr(self, "_verbose", self.settings.verbose)
@@ -144,17 +408,40 @@ Execution Guidelines:
144
408
  end_tool = getattr(self, "_end_tool", self.settings.end_tool)
145
409
 
146
410
  if self.settings.tools or self.settings.instructions:
411
+ # Get model from settings, then language_model_kwargs, then default
412
+ model = self.settings.model or language_model_kwargs.get(
413
+ "model", "openai/gpt-4o-mini"
414
+ )
415
+
416
+ # Remove parameters that will be passed explicitly to avoid duplicates
417
+ filtered_kwargs = {
418
+ k: v
419
+ for k, v in language_model_kwargs.items()
420
+ if k
421
+ not in [
422
+ "model",
423
+ "name",
424
+ "instructions",
425
+ "tools",
426
+ "max_steps",
427
+ "end_strategy",
428
+ "end_tool",
429
+ "verbose",
430
+ "debug",
431
+ ]
432
+ }
433
+
147
434
  agent = Agent(
148
435
  name=self.settings.name or self.action_name,
149
436
  instructions=self.settings.instructions or combined_instructions,
150
- model=self.settings.model or "openai/gpt-4o-mini",
437
+ model=model,
151
438
  tools=self.settings.tools,
152
439
  max_steps=max_steps,
153
440
  end_strategy=end_strategy,
154
441
  end_tool=end_tool,
155
442
  verbose=verbose,
156
443
  debug=debug,
157
- **language_model_kwargs,
444
+ **filtered_kwargs,
158
445
  )
159
446
  # Pass history to context if available
160
447
  history = getattr(self, "_history", None)
@@ -168,11 +455,23 @@ Execution Guidelines:
168
455
  else:
169
456
  result = self.action_func(enhanced_ctx, agent, **action_params)
170
457
  else:
458
+ # Get model from settings, then language_model_kwargs, then default
459
+ model = self.settings.model or language_model_kwargs.get(
460
+ "model", "openai/gpt-4o-mini"
461
+ )
462
+
463
+ # Remove parameters that will be passed explicitly to avoid duplicates
464
+ filtered_kwargs = {
465
+ k: v
466
+ for k, v in language_model_kwargs.items()
467
+ if k not in ["model", "verbose", "debug"]
468
+ }
469
+
171
470
  language_model = LanguageModel(
172
- model=self.settings.model or "openai/gpt-4o-mini",
471
+ model=model,
173
472
  verbose=verbose,
174
473
  debug=debug,
175
- **language_model_kwargs,
474
+ **filtered_kwargs,
176
475
  )
177
476
  # Pass history to context if available
178
477
  history = getattr(self, "_history", None)
@@ -189,42 +488,37 @@ Execution Guidelines:
189
488
  )
190
489
  else:
191
490
  # New mode: framework handles language model internally
192
- # Build the user message from the action parameters with clear context
491
+ # Build the user message from the action parameters
193
492
  user_message = ""
194
493
  if action_params:
195
494
  if len(action_params) == 1:
196
- # Single parameter - use its value directly with context
495
+ # Single parameter - use its value directly
197
496
  param_value = list(action_params.values())[0]
198
- user_message = f"Process the following input for step '{self.action_name}':\n\n{param_value}"
497
+ user_message = str(param_value)
199
498
  else:
200
499
  # Multiple parameters - format them clearly
201
500
  param_list = "\n".join(
202
- f"- {k}: {v}" for k, v in action_params.items()
501
+ f"{k}: {v}" for k, v in action_params.items()
203
502
  )
204
- user_message = f"Execute step '{self.action_name}' with the following parameters:\n\n{param_list}"
503
+ user_message = param_list
205
504
  else:
206
- # No parameters - provide clear step instruction
207
- user_message = f"Execute the '{self.action_name}' step of the workflow."
505
+ # No parameters - check if we have previous conversation history
506
+ # If we do, don't add an empty user message
507
+ user_message = ""
208
508
 
209
- # Combine global system prompt with step context and field-level instructions
509
+ # Combine global system prompt with field-level instructions and state
210
510
  combined_instructions = global_system_prompt
211
- if step_context:
212
- combined_instructions += f"\n\n{step_context}"
213
511
  if field_instructions and field_instructions not in combined_instructions:
214
- combined_instructions += (
215
- f"\n\nAdditional Instructions: {field_instructions}"
216
- )
512
+ if combined_instructions:
513
+ combined_instructions += f"\n\n{field_instructions}"
514
+ else:
515
+ combined_instructions = field_instructions
217
516
 
218
- # Add execution guidelines for framework mode
219
- execution_guidelines = """
220
-
221
- Execution Guidelines:
222
- - Provide a clear, direct response that addresses the step's objective
223
- - Your output will be used as input for subsequent workflow steps
224
- - Be concise but comprehensive in your response
225
- - If making decisions or analysis, show your reasoning process
226
- """
227
- combined_instructions += execution_guidelines
517
+ # Add state to instructions if available
518
+ if current_state is not None:
519
+ state_str = convert_to_text(current_state, show_defaults=False)
520
+ if state_str:
521
+ combined_instructions += f"\n\nContext: {state_str}"
228
522
 
229
523
  # Get verbose/debug flags and language model kwargs from the node
230
524
  verbose = getattr(self, "_verbose", self.settings.verbose)
@@ -239,59 +533,144 @@ Execution Guidelines:
239
533
  # Determine if we need to use Agent or LanguageModel
240
534
  if self.settings.tools or self.settings.instructions:
241
535
  # Use Agent for complex operations with tools/instructions
536
+ # Get model from settings, then language_model_kwargs, then default
537
+ model = self.settings.model or language_model_kwargs.get(
538
+ "model", "openai/gpt-4o-mini"
539
+ )
540
+
541
+ # Remove parameters that will be passed explicitly to avoid duplicates
542
+ filtered_kwargs = {
543
+ k: v
544
+ for k, v in language_model_kwargs.items()
545
+ if k
546
+ not in [
547
+ "model",
548
+ "name",
549
+ "instructions",
550
+ "tools",
551
+ "max_steps",
552
+ "end_strategy",
553
+ "end_tool",
554
+ "verbose",
555
+ "debug",
556
+ ]
557
+ }
558
+
242
559
  agent = Agent(
243
560
  name=self.settings.name or self.action_name,
244
561
  instructions=self.settings.instructions or combined_instructions,
245
- model=self.settings.model or "openai/gpt-4o-mini",
562
+ model=model,
246
563
  tools=self.settings.tools,
247
564
  max_steps=max_steps,
248
565
  end_strategy=end_strategy,
249
566
  end_tool=end_tool,
250
567
  verbose=verbose,
251
568
  debug=debug,
252
- **language_model_kwargs,
569
+ **filtered_kwargs,
253
570
  )
254
571
 
255
572
  # Get history if available
256
573
  history = getattr(self, "_history", None)
257
574
 
575
+ # Check if we have previous conversation history from the graph execution
576
+ previous_messages = getattr(self, "_graph_messages", [])
577
+
578
+ # Store the current user message for history building
579
+ if user_message:
580
+ self._current_user_message = user_message
581
+
258
582
  # Run the agent with the user message and history
259
583
  if history:
260
584
  # If history is provided, we need to combine it with the user message
261
585
  # The history should be the conversation context, and user_message is the new input
262
586
  combined_messages = parse_messages_input(history)
263
- combined_messages.append({"role": "user", "content": user_message})
587
+ combined_messages.extend(previous_messages)
588
+ if user_message: # Only add non-empty user messages
589
+ combined_messages.append(
590
+ {"role": "user", "content": user_message}
591
+ )
592
+ agent_result = await agent.async_run(combined_messages)
593
+ elif previous_messages:
594
+ # If we have previous messages from the graph, use them
595
+ combined_messages = previous_messages.copy()
596
+ if user_message: # Only add non-empty user messages
597
+ combined_messages.append(
598
+ {"role": "user", "content": user_message}
599
+ )
264
600
  agent_result = await agent.async_run(combined_messages)
265
601
  else:
266
- agent_result = await agent.async_run(user_message)
602
+ # Only run with user message if it's not empty
603
+ if user_message:
604
+ agent_result = await agent.async_run(user_message)
605
+ else:
606
+ # If no user message and no history, we can't run the agent
607
+ raise ValueError(
608
+ "No user message or history provided for agent execution"
609
+ )
267
610
  result = agent_result.output
268
611
  else:
269
612
  # Use LanguageModel for simple operations
613
+ # Get model from settings, then language_model_kwargs, then default
614
+ model = self.settings.model or language_model_kwargs.get(
615
+ "model", "openai/gpt-4o-mini"
616
+ )
617
+
618
+ # Remove parameters that will be passed explicitly to avoid duplicates
619
+ filtered_kwargs = {
620
+ k: v
621
+ for k, v in language_model_kwargs.items()
622
+ if k not in ["model", "verbose", "debug"]
623
+ }
624
+
270
625
  language_model = LanguageModel(
271
- model=self.settings.model or "openai/gpt-4o-mini",
626
+ model=model,
272
627
  verbose=verbose,
273
628
  debug=debug,
274
- **language_model_kwargs,
629
+ **filtered_kwargs,
275
630
  )
276
631
 
277
632
  # Get history if available
278
633
  history = getattr(self, "_history", None)
279
634
 
635
+ # Check if we have previous conversation history from the graph execution
636
+ previous_messages = getattr(self, "_graph_messages", [])
637
+
280
638
  # Create messages using the language model utils
281
639
  if history:
282
640
  # If history is provided, use it as the base messages
283
641
  messages = parse_messages_input(
284
642
  history, instructions=combined_instructions
285
643
  )
644
+ # Add any previous graph messages
645
+ messages.extend(previous_messages)
286
646
  # Then add the user message from action parameters
287
- messages.append({"role": "user", "content": user_message})
288
- else:
289
- # Otherwise, use the user message
647
+ if user_message: # Only add non-empty user messages
648
+ messages.append({"role": "user", "content": user_message})
649
+ elif previous_messages:
650
+ # If we have previous messages from the graph, use them
290
651
  messages = parse_messages_input(
291
- user_message, instructions=combined_instructions
652
+ "", instructions=combined_instructions
292
653
  )
654
+ messages.extend(previous_messages)
655
+ if user_message: # Only add non-empty user messages
656
+ messages.append({"role": "user", "content": user_message})
657
+ else:
658
+ # Otherwise, use the user message (if not empty)
659
+ if user_message:
660
+ messages = parse_messages_input(
661
+ user_message, instructions=combined_instructions
662
+ )
663
+ else:
664
+ # If no user message and no history, just use instructions
665
+ messages = parse_messages_input(
666
+ "", instructions=combined_instructions
667
+ )
293
668
  messages = consolidate_system_messages(messages)
294
669
 
670
+ # Store the current user message for history building
671
+ if user_message:
672
+ self._current_user_message = user_message
673
+
295
674
  # Run the language model with the consolidated messages
296
675
  lm_result = await language_model.async_run(messages)
297
676
  result = lm_result.output
@@ -310,9 +689,83 @@ Execution Guidelines:
310
689
  elif self.settings.terminates:
311
690
  return End(result)
312
691
  else:
313
- # For non-terminating actions that don't return a node, continue to next
314
- # This would be more sophisticated in a real implementation with routing
315
- return End(result)
692
+ # Check if there's a next action defined
693
+ if self.settings.next:
694
+ # Handle different types of next specifications
695
+ next_action_name = None
696
+
697
+ if isinstance(self.settings.next, str):
698
+ # Simple string case
699
+ next_action_name = self.settings.next
700
+ elif isinstance(self.settings.next, list):
701
+ # List case - for now, just pick the first one
702
+ # In the future, this could execute all in parallel
703
+ if self.settings.next:
704
+ next_action_name = self.settings.next[0]
705
+ elif isinstance(self.settings.next, SelectionStrategy):
706
+ # Selection strategy case - use the strategy to pick an action
707
+ context = {
708
+ "result": result,
709
+ "state": getattr(self, "_state", None),
710
+ "messages": getattr(self, "_graph_messages", []),
711
+ }
712
+ # If using all actions, pass them in the context
713
+ if self.settings.next._use_all_actions and hasattr(
714
+ self, "_graph_action_nodes"
715
+ ):
716
+ context["all_actions"] = list(self._graph_action_nodes.keys())
717
+ next_action_name = self.settings.next.select(context)
718
+ else:
719
+ # Invalid type for next
720
+ return End(result)
721
+
722
+ # Find the next node class from the graph's action nodes
723
+ if hasattr(self, "_graph_action_nodes") and next_action_name:
724
+ next_node_class = self._graph_action_nodes.get(next_action_name)
725
+ if next_node_class:
726
+ # Create the next node instance
727
+ # For graph flow, we don't pass the result as a parameter
728
+ # The conversation history will contain the context
729
+ next_node = next_node_class()
730
+
731
+ # Copy over any graph-specific attributes
732
+ for attr in [
733
+ "_graph_docstring",
734
+ "_verbose",
735
+ "_debug",
736
+ "_language_model_kwargs",
737
+ "_history",
738
+ "_state",
739
+ "_graph_action_nodes",
740
+ "_execution_tracker",
741
+ ]:
742
+ if hasattr(self, attr):
743
+ setattr(next_node, attr, getattr(self, attr))
744
+
745
+ # Build up the conversation history for the next node
746
+ current_messages = getattr(self, "_graph_messages", [])
747
+ # Add the current interaction to the conversation history
748
+ # Only add the user message if it was actually provided (not empty)
749
+ if (
750
+ hasattr(self, "_current_user_message")
751
+ and self._current_user_message
752
+ ):
753
+ current_messages.append(
754
+ {"role": "user", "content": self._current_user_message}
755
+ )
756
+ # Add the assistant response from this node
757
+ current_messages.append(
758
+ {"role": "assistant", "content": str(result)}
759
+ )
760
+ next_node._graph_messages = current_messages
761
+
762
+ return next_node
763
+
764
+ # If we can't find any valid next node, terminate
765
+ return End(result)
766
+ else:
767
+ # No next action defined, terminate
768
+ return End(result)
316
769
 
317
770
 
318
771
  class ActionDecorator:
@@ -333,7 +786,7 @@ class ActionDecorator:
333
786
  start: bool = False,
334
787
  terminates: bool = False,
335
788
  xml: Optional[str] = None,
336
- next: Optional[Union[str, List[str]]] = None,
789
+ next: Optional[Union[str, List[str], SelectionStrategy]] = None,
337
790
  read_history: bool = False,
338
791
  persist_history: bool = False,
339
792
  condition: Optional[str] = None,
@@ -373,8 +826,31 @@ class ActionDecorator:
373
826
 
374
827
  def decorator(f: Callable) -> Callable:
375
828
  action_name = name or f.__name__
829
+
830
+ # Check if action name is reserved
831
+ reserved_names = {
832
+ 'run', 'async_run', 'iter', 'async_iter',
833
+ 'visualize', 'builder', 'as_a2a',
834
+ '_initialize', '_collect_state_class', '_collect_actions',
835
+ '_create_pydantic_graph', '_get_start_action_signature'
836
+ }
837
+ if action_name in reserved_names:
838
+ raise ValueError(
839
+ f"Action name '{action_name}' is reserved and cannot be used. "
840
+ f"Reserved names include: {', '.join(sorted(reserved_names))}. "
841
+ "Please choose a different name for your action."
842
+ )
843
+
844
+ # Check that the action has at least one parameter besides 'self'
845
+ sig = inspect.signature(f)
846
+ params = [p for p in sig.parameters if p != 'self']
847
+ if not params:
848
+ raise ValueError(
849
+ f"Action '{action_name}' must have at least one parameter besides 'self'. "
850
+ "Actions need input parameters to process."
851
+ )
376
852
 
377
- # Create a dynamic ActionNode class for this specific action
853
+ # Create a dynamic ActionNode class for this specific action with unique name
378
854
  class DynamicActionNode(ActionNode[StateT]):
379
855
  def __init__(self, **action_params):
380
856
  super().__init__(
@@ -384,6 +860,11 @@ class ActionDecorator:
384
860
  **action_params,
385
861
  )
386
862
 
863
+ @classmethod
864
+ def get_node_id(cls):
865
+ """Override to provide unique node ID based on action name."""
866
+ return f"DynamicActionNode_{action_name}"
867
+
387
868
  # Store the action
388
869
  self._actions[action_name] = DynamicActionNode
389
870
  if start:
@@ -460,10 +941,44 @@ class GraphBuilder(Generic[StateT, T]):
460
941
  class BaseGraph(Generic[StateT, T]):
461
942
  """Base class for graphs that provides action decorator support on top of pydantic-graph."""
462
943
 
463
- def __init__(self, state: Optional[StateT] = None):
464
- self._plugins: List[BasePlugin] = []
465
- self._global_model: Optional[LanguageModelName] = None
466
- self._global_settings: Dict[str, Any] = {}
944
+ def __init__(
945
+ self,
946
+ state: Optional[StateT] = None,
947
+ *,
948
+ model: Optional[LanguageModelName | str] = "openai/gpt-4.1-nano",
949
+ temperature: Optional[float] = None,
950
+ max_tokens: Optional[int] = None,
951
+ tools: Optional[List[Callable]] = None,
952
+ verbose: bool = False,
953
+ debug: bool = False,
954
+ max_steps: Optional[int] = None,
955
+ end_strategy: Optional[Literal["tool"]] = None,
956
+ end_tool: Optional[Callable] = None,
957
+ summarize_tools: bool = True,
958
+ summarize_tools_with_model: bool = False,
959
+ plugins: Optional[List[BasePlugin]] = None,
960
+ **kwargs: Any,
961
+ ):
962
+ self._plugins: List[BasePlugin] = plugins or []
963
+ self._global_model: Optional[LanguageModelName] = model
964
+ self._global_settings: Dict[str, Any] = {
965
+ "temperature": temperature,
966
+ "max_tokens": max_tokens,
967
+ "tools": tools,
968
+ "verbose": verbose,
969
+ "debug": debug,
970
+ "max_steps": max_steps,
971
+ "end_strategy": end_strategy,
972
+ "end_tool": end_tool,
973
+ "summarize_tools": summarize_tools,
974
+ "summarize_tools_with_model": summarize_tools_with_model,
975
+ **kwargs,
976
+ }
977
+ # Remove None values from settings
978
+ self._global_settings = {
979
+ k: v for k, v in self._global_settings.items() if v is not None
980
+ }
981
+
467
982
  self._pydantic_graph: Optional[PydanticGraph] = None
468
983
  self._action_nodes: Dict[str, Type[ActionNode]] = {}
469
984
  self._start_action_name: Optional[str] = None
@@ -503,6 +1018,8 @@ class BaseGraph(Generic[StateT, T]):
503
1018
  def _collect_actions(self) -> None:
504
1019
  """Collect all actions defined in the graph class."""
505
1020
  actions_found = []
1021
+ start_action = None
1022
+ end_action = None
506
1023
 
507
1024
  # Get the graph class docstring for global system prompt
508
1025
  graph_docstring = self.__class__.__doc__ or ""
@@ -523,6 +1040,14 @@ class BaseGraph(Generic[StateT, T]):
523
1040
  )
524
1041
  self._start_action_name = action_name
525
1042
  self._start_action_func = attr
1043
+ start_action = attr
1044
+
1045
+ # Check if this is an end action (terminates=True)
1046
+ if (
1047
+ hasattr(attr, "_action_settings")
1048
+ and attr._action_settings.terminates
1049
+ ):
1050
+ end_action = attr
526
1051
 
527
1052
  # If no explicit start action was defined and we have exactly one action,
528
1053
  # automatically make it the start action
@@ -531,6 +1056,13 @@ class BaseGraph(Generic[StateT, T]):
531
1056
  self._start_action_name = action_name
532
1057
  self._start_action_func = action_func
533
1058
 
1059
+ # Special case: If we have exactly 2 actions (start -> end), automatically set up routing
1060
+ if len(actions_found) == 2 and start_action and end_action:
1061
+ # Check if the start action doesn't already have a 'next' defined
1062
+ if start_action._action_settings.next is None:
1063
+ # Automatically set the start action to route to the end action
1064
+ start_action._action_settings.next = end_action._action_name
1065
+
534
1066
  # Store the graph docstring in all action nodes for access during execution
535
1067
  for action_node_class in self._action_nodes.values():
536
1068
  # We'll add this to the action node instances when they're created
@@ -648,20 +1180,38 @@ class BaseGraph(Generic[StateT, T]):
648
1180
  start_node = start_node_class(**bound_args.arguments)
649
1181
  # Pass the graph docstring to the node for global system prompt
650
1182
  start_node._graph_docstring = self.__class__.__doc__ or ""
651
- # Pass verbose/debug flags and language model kwargs
652
- start_node._verbose = verbose
653
- start_node._debug = debug
654
- start_node._language_model_kwargs = language_model_kwargs
1183
+
1184
+ # Merge global settings with provided kwargs
1185
+ merged_settings = self._global_settings.copy()
1186
+ merged_settings.update(language_model_kwargs)
1187
+
1188
+ # Include the global model if it's set and not overridden
1189
+ if self._global_model and "model" not in merged_settings:
1190
+ merged_settings["model"] = self._global_model
1191
+
1192
+ # Pass verbose/debug flags (prefer explicit params over global settings)
1193
+ start_node._verbose = (
1194
+ verbose if verbose else merged_settings.get("verbose", False)
1195
+ )
1196
+ start_node._debug = debug if debug else merged_settings.get("debug", False)
1197
+ start_node._language_model_kwargs = merged_settings
1198
+
655
1199
  # Pass history if provided
656
1200
  start_node._history = history
1201
+ # Pass the graph's action nodes for routing
1202
+ start_node._graph_action_nodes = self._action_nodes
657
1203
 
658
- # Pass end strategy parameters if provided
659
- if "max_steps" in language_model_kwargs:
660
- start_node._max_steps = language_model_kwargs["max_steps"]
661
- if "end_strategy" in language_model_kwargs:
662
- start_node._end_strategy = language_model_kwargs["end_strategy"]
663
- if "end_tool" in language_model_kwargs:
664
- start_node._end_tool = language_model_kwargs["end_tool"]
1204
+ # Initialize execution tracking
1205
+ self._execution_tracker = []
1206
+ start_node._execution_tracker = self._execution_tracker
1207
+
1208
+ # Pass end strategy parameters (from merged settings)
1209
+ if "max_steps" in merged_settings:
1210
+ start_node._max_steps = merged_settings["max_steps"]
1211
+ if "end_strategy" in merged_settings:
1212
+ start_node._end_strategy = merged_settings["end_strategy"]
1213
+ if "end_tool" in merged_settings:
1214
+ start_node._end_tool = merged_settings["end_tool"]
665
1215
 
666
1216
  # Run the pydantic graph
667
1217
  if not self._pydantic_graph:
@@ -669,6 +1219,8 @@ class BaseGraph(Generic[StateT, T]):
669
1219
 
670
1220
  # Use the provided state or the graph's state
671
1221
  execution_state = state if state is not None else self._state
1222
+ # Pass state to the node
1223
+ start_node._state = execution_state
672
1224
 
673
1225
  # Execute the graph using pydantic-graph
674
1226
  try:
@@ -683,6 +1235,13 @@ class BaseGraph(Generic[StateT, T]):
683
1235
  else:
684
1236
  output = str(result)
685
1237
 
1238
+ # Get nodes executed from the execution tracker
1239
+ nodes_executed = getattr(self, "_execution_tracker", [])
1240
+
1241
+ # If no nodes tracked, at least include the start node
1242
+ if not nodes_executed:
1243
+ nodes_executed = [self._start_action_name]
1244
+
686
1245
  # Create our response object
687
1246
  return GraphResponse(
688
1247
  type="graph",
@@ -693,7 +1252,7 @@ class BaseGraph(Generic[StateT, T]):
693
1252
  state=execution_state,
694
1253
  history=[], # Would be populated from pydantic-graph execution
695
1254
  start_node=self._start_action_name,
696
- nodes_executed=[self._start_action_name], # Would track from execution
1255
+ nodes_executed=nodes_executed,
697
1256
  metadata={},
698
1257
  )
699
1258
 
@@ -794,23 +1353,44 @@ class BaseGraph(Generic[StateT, T]):
794
1353
  start_node = start_node_class(**bound_args.arguments)
795
1354
  # Pass the graph docstring to the node for global system prompt
796
1355
  start_node._graph_docstring = self.__class__.__doc__ or ""
797
- # Pass verbose/debug flags and language model kwargs
798
- start_node._verbose = verbose
799
- start_node._debug = debug
800
- start_node._language_model_kwargs = language_model_kwargs
1356
+
1357
+ # Merge global settings with provided kwargs
1358
+ merged_settings = self._global_settings.copy()
1359
+ merged_settings.update(language_model_kwargs)
1360
+
1361
+ # Include the global model if it's set and not overridden
1362
+ if self._global_model and "model" not in merged_settings:
1363
+ merged_settings["model"] = self._global_model
1364
+
1365
+ # Pass verbose/debug flags (prefer explicit params over global settings)
1366
+ start_node._verbose = (
1367
+ verbose if verbose else merged_settings.get("verbose", False)
1368
+ )
1369
+ start_node._debug = debug if debug else merged_settings.get("debug", False)
1370
+ start_node._language_model_kwargs = merged_settings
1371
+
801
1372
  # Pass history if provided
802
1373
  start_node._history = history
1374
+ # Pass the graph's action nodes for routing
1375
+ start_node._graph_action_nodes = self._action_nodes
803
1376
 
804
- # Pass end strategy parameters if provided
805
- if max_steps is not None:
806
- start_node._max_steps = max_steps
807
- if end_strategy is not None:
808
- start_node._end_strategy = end_strategy
809
- if end_tool is not None:
810
- start_node._end_tool = end_tool
1377
+ # Pass end strategy parameters (prefer explicit params over merged settings)
1378
+ start_node._max_steps = (
1379
+ max_steps if max_steps is not None else merged_settings.get("max_steps")
1380
+ )
1381
+ start_node._end_strategy = (
1382
+ end_strategy
1383
+ if end_strategy is not None
1384
+ else merged_settings.get("end_strategy")
1385
+ )
1386
+ start_node._end_tool = (
1387
+ end_tool if end_tool is not None else merged_settings.get("end_tool")
1388
+ )
811
1389
 
812
1390
  # Use the provided state or the graph's state
813
1391
  execution_state = state if state is not None else self._state
1392
+ # Pass state to the node
1393
+ start_node._state = execution_state
814
1394
 
815
1395
  # Create and return GraphStream
816
1396
  return GraphStream(
@@ -914,20 +1494,43 @@ class BaseGraph(Generic[StateT, T]):
914
1494
  start_node = start_node_class(**bound_args.arguments)
915
1495
  # Pass the graph docstring to the node for global system prompt
916
1496
  start_node._graph_docstring = self.__class__.__doc__ or ""
917
- # Pass verbose/debug flags and language model kwargs
918
- start_node._verbose = verbose
919
- start_node._debug = debug
920
- start_node._language_model_kwargs = language_model_kwargs
1497
+
1498
+ # Merge global settings with provided kwargs
1499
+ merged_settings = self._global_settings.copy()
1500
+ merged_settings.update(language_model_kwargs)
1501
+
1502
+ # Include the global model if it's set and not overridden
1503
+ if self._global_model and "model" not in merged_settings:
1504
+ merged_settings["model"] = self._global_model
1505
+
1506
+ # Pass verbose/debug flags (prefer explicit params over global settings)
1507
+ start_node._verbose = (
1508
+ verbose if verbose else merged_settings.get("verbose", False)
1509
+ )
1510
+ start_node._debug = debug if debug else merged_settings.get("debug", False)
1511
+ start_node._language_model_kwargs = merged_settings
1512
+
921
1513
  # Pass history if provided
922
1514
  start_node._history = history
1515
+ # Pass the graph's action nodes for routing
1516
+ start_node._graph_action_nodes = self._action_nodes
923
1517
 
924
- # Pass end strategy parameters if provided
925
- if max_steps is not None:
926
- start_node._max_steps = max_steps
927
- if end_strategy is not None:
928
- start_node._end_strategy = end_strategy
929
- if end_tool is not None:
930
- start_node._end_tool = end_tool
1518
+ # Initialize execution tracking
1519
+ self._execution_tracker = []
1520
+ start_node._execution_tracker = self._execution_tracker
1521
+
1522
+ # Pass end strategy parameters (prefer explicit params over merged settings)
1523
+ start_node._max_steps = (
1524
+ max_steps if max_steps is not None else merged_settings.get("max_steps")
1525
+ )
1526
+ start_node._end_strategy = (
1527
+ end_strategy
1528
+ if end_strategy is not None
1529
+ else merged_settings.get("end_strategy")
1530
+ )
1531
+ start_node._end_tool = (
1532
+ end_tool if end_tool is not None else merged_settings.get("end_tool")
1533
+ )
931
1534
 
932
1535
  # Run the pydantic graph asynchronously
933
1536
  if not self._pydantic_graph:
@@ -935,6 +1538,8 @@ class BaseGraph(Generic[StateT, T]):
935
1538
 
936
1539
  # Use the provided state or the graph's state
937
1540
  execution_state = state if state is not None else self._state
1541
+ # Pass state to the node
1542
+ start_node._state = execution_state
938
1543
 
939
1544
  try:
940
1545
  # Execute the graph using pydantic-graph async
@@ -948,6 +1553,13 @@ class BaseGraph(Generic[StateT, T]):
948
1553
  else:
949
1554
  output = str(result)
950
1555
 
1556
+ # Get nodes executed from the execution tracker
1557
+ nodes_executed = getattr(self, "_execution_tracker", [])
1558
+
1559
+ # If no nodes tracked, at least include the start node
1560
+ if not nodes_executed:
1561
+ nodes_executed = [self._start_action_name]
1562
+
951
1563
  # Create our response object
952
1564
  return GraphResponse(
953
1565
  type="graph",
@@ -958,7 +1570,7 @@ class BaseGraph(Generic[StateT, T]):
958
1570
  state=execution_state,
959
1571
  history=[], # Would be populated from pydantic-graph execution
960
1572
  start_node=self._start_action_name,
961
- nodes_executed=[self._start_action_name], # Would track from execution
1573
+ nodes_executed=nodes_executed,
962
1574
  metadata={},
963
1575
  )
964
1576
 
@@ -1054,23 +1666,44 @@ class BaseGraph(Generic[StateT, T]):
1054
1666
  start_node = start_node_class(**bound_args.arguments)
1055
1667
  # Pass the graph docstring to the node for global system prompt
1056
1668
  start_node._graph_docstring = self.__class__.__doc__ or ""
1057
- # Pass verbose/debug flags and language model kwargs
1058
- start_node._verbose = verbose
1059
- start_node._debug = debug
1060
- start_node._language_model_kwargs = language_model_kwargs
1669
+
1670
+ # Merge global settings with provided kwargs
1671
+ merged_settings = self._global_settings.copy()
1672
+ merged_settings.update(language_model_kwargs)
1673
+
1674
+ # Include the global model if it's set and not overridden
1675
+ if self._global_model and "model" not in merged_settings:
1676
+ merged_settings["model"] = self._global_model
1677
+
1678
+ # Pass verbose/debug flags (prefer explicit params over global settings)
1679
+ start_node._verbose = (
1680
+ verbose if verbose else merged_settings.get("verbose", False)
1681
+ )
1682
+ start_node._debug = debug if debug else merged_settings.get("debug", False)
1683
+ start_node._language_model_kwargs = merged_settings
1684
+
1061
1685
  # Pass history if provided
1062
1686
  start_node._history = history
1687
+ # Pass the graph's action nodes for routing
1688
+ start_node._graph_action_nodes = self._action_nodes
1063
1689
 
1064
- # Pass end strategy parameters if provided
1065
- if max_steps is not None:
1066
- start_node._max_steps = max_steps
1067
- if end_strategy is not None:
1068
- start_node._end_strategy = end_strategy
1069
- if end_tool is not None:
1070
- start_node._end_tool = end_tool
1690
+ # Pass end strategy parameters (prefer explicit params over merged settings)
1691
+ start_node._max_steps = (
1692
+ max_steps if max_steps is not None else merged_settings.get("max_steps")
1693
+ )
1694
+ start_node._end_strategy = (
1695
+ end_strategy
1696
+ if end_strategy is not None
1697
+ else merged_settings.get("end_strategy")
1698
+ )
1699
+ start_node._end_tool = (
1700
+ end_tool if end_tool is not None else merged_settings.get("end_tool")
1701
+ )
1071
1702
 
1072
1703
  # Use the provided state or the graph's state
1073
1704
  execution_state = state if state is not None else self._state
1705
+ # Pass state to the node
1706
+ start_node._state = execution_state
1074
1707
 
1075
1708
  # Create and return GraphStream
1076
1709
  return GraphStream(
@@ -1085,19 +1718,111 @@ class BaseGraph(Generic[StateT, T]):
1085
1718
  **language_model_kwargs,
1086
1719
  )
1087
1720
 
1088
- def visualize(self, filename: str) -> None:
1089
- """Generate a visualization of the graph using pydantic-graph's mermaid support."""
1090
- if self._pydantic_graph and self._start_action_name:
1091
- start_node_class = self._action_nodes.get(self._start_action_name)
1092
- if start_node_class:
1093
- # Use pydantic-graph's built-in mermaid generation
1094
- mermaid_code = self._pydantic_graph.mermaid_code(
1095
- start_node=start_node_class
1096
- )
1097
- with open(filename, "w") as f:
1098
- f.write(mermaid_code)
1099
-
1100
1721
  @classmethod
1101
1722
  def builder(cls) -> GraphBuilder[StateT, T]:
1102
1723
  """Create a builder for this graph."""
1103
1724
  return GraphBuilder(cls)
1725
+
1726
+ def as_a2a(
1727
+ self,
1728
+ *,
1729
+ # Worker configuration
1730
+ state: Optional[StateT] = None,
1731
+ # Storage and broker configuration
1732
+ storage: Optional[Any] = None,
1733
+ broker: Optional[Any] = None,
1734
+ # Server configuration
1735
+ host: str = "0.0.0.0",
1736
+ port: int = 8000,
1737
+ reload: bool = False,
1738
+ workers: int = 1,
1739
+ log_level: str = "info",
1740
+ # A2A configuration
1741
+ name: Optional[str] = None,
1742
+ url: Optional[str] = None,
1743
+ version: str = "1.0.0",
1744
+ description: Optional[str] = None,
1745
+ # Advanced configuration
1746
+ lifespan_timeout: int = 30,
1747
+ **uvicorn_kwargs: Any,
1748
+ ) -> "FastA2A": # type: ignore
1749
+ """
1750
+ Convert this graph to an A2A server application.
1751
+
1752
+ This method creates a FastA2A server that can handle A2A requests
1753
+ for this graph instance. It sets up the necessary Worker, Storage,
1754
+ and Broker components automatically.
1755
+
1756
+ Args:
1757
+ state: Initial state for the graph (overrides instance state)
1758
+ storage: Custom storage backend (defaults to InMemoryStorage)
1759
+ broker: Custom broker backend (defaults to InMemoryBroker)
1760
+ host: Host to bind the server to
1761
+ port: Port to bind the server to
1762
+ reload: Enable auto-reload for development
1763
+ workers: Number of worker processes
1764
+ log_level: Logging level
1765
+ name: Graph name for the A2A server
1766
+ url: URL where the graph is hosted
1767
+ version: API version
1768
+ description: API description for the A2A server
1769
+ lifespan_timeout: Timeout for lifespan events
1770
+ **uvicorn_kwargs: Additional arguments passed to uvicorn
1771
+
1772
+ Returns:
1773
+ FastA2A application instance that can be run with uvicorn
1774
+
1775
+ Examples:
1776
+ Convert graph to A2A server:
1777
+ ```python
1778
+ class MyGraph(BaseGraph):
1779
+ @action.start()
1780
+ def process(self, message: str) -> str:
1781
+ return f"Processed: {message}"
1782
+
1783
+ graph = MyGraph()
1784
+ app = graph.as_a2a(port=8080)
1785
+
1786
+ # Run with uvicorn
1787
+ import uvicorn
1788
+ uvicorn.run(app, host="0.0.0.0", port=8080)
1789
+ ```
1790
+
1791
+ Or use the CLI:
1792
+ ```bash
1793
+ uvicorn mymodule:graph.as_a2a() --reload
1794
+ ```
1795
+ """
1796
+ from ..a2a import as_a2a_app
1797
+
1798
+ return as_a2a_app(
1799
+ self,
1800
+ state=state if state is not None else self._state,
1801
+ storage=storage,
1802
+ broker=broker,
1803
+ host=host,
1804
+ port=port,
1805
+ reload=reload,
1806
+ workers=workers,
1807
+ log_level=log_level,
1808
+ name=name or self.__class__.__name__,
1809
+ url=url,
1810
+ version=version,
1811
+ description=description or self.__class__.__doc__,
1812
+ lifespan_timeout=lifespan_timeout,
1813
+ **uvicorn_kwargs,
1814
+ )
1815
+
1816
+ def visualize(self, filename: str) -> None:
1817
+ """Visualize the graph as mermaid.
1818
+
1819
+ Args:
1820
+ filename: The filename to save the visualization to.
1821
+
1822
+ Ex: 'graph.png' / 'graph.mmd'
1823
+
1824
+ Returns:
1825
+ None
1826
+
1827
+ """
1828
+ visualize_base_graph(self, filename)