quantalogic 0.50.29__py3-none-any.whl → 0.52.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/flow/__init__.py +17 -0
- quantalogic/flow/flow.py +9 -7
- quantalogic/flow/flow_extractor.py +32 -100
- quantalogic/flow/flow_generator.py +10 -3
- quantalogic/flow/flow_manager.py +88 -33
- quantalogic/flow/flow_manager_schema.py +3 -4
- quantalogic/flow/flow_mermaid.py +240 -0
- quantalogic/flow/flow_validator.py +335 -0
- quantalogic/flow/flow_yaml.md +393 -322
- quantalogic/tools/__init__.py +3 -2
- quantalogic/tools/tool.py +129 -3
- quantalogic-0.52.0.dist-info/METADATA +787 -0
- {quantalogic-0.50.29.dist-info → quantalogic-0.52.0.dist-info}/RECORD +16 -14
- quantalogic-0.50.29.dist-info/METADATA +0 -554
- {quantalogic-0.50.29.dist-info → quantalogic-0.52.0.dist-info}/LICENSE +0 -0
- {quantalogic-0.50.29.dist-info → quantalogic-0.52.0.dist-info}/WHEEL +0 -0
- {quantalogic-0.50.29.dist-info → quantalogic-0.52.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,240 @@
|
|
1
|
+
import re
|
2
|
+
from typing import Dict, List, Optional, Set, Tuple
|
3
|
+
|
4
|
+
from quantalogic.flow.flow_manager import WorkflowManager
|
5
|
+
from quantalogic.flow.flow_manager_schema import NodeDefinition, WorkflowDefinition
|
6
|
+
|
7
|
+
|
8
|
+
def get_node_label_and_type(node_name: str, node_def: Optional[NodeDefinition], has_conditions: bool) -> Tuple[str, str, str]:
|
9
|
+
"""
|
10
|
+
Generate a label, type identifier, and shape for a node based on its definition and transition context.
|
11
|
+
|
12
|
+
Args:
|
13
|
+
node_name: The name of the node.
|
14
|
+
node_def: The NodeDefinition object from the workflow, or None if undefined.
|
15
|
+
has_conditions: True if the node has outgoing transitions with conditions.
|
16
|
+
|
17
|
+
Returns:
|
18
|
+
A tuple of (display label, type key for styling, shape identifier).
|
19
|
+
"""
|
20
|
+
# No truncation unless necessary, escape quotes for safety
|
21
|
+
escaped_name = node_name.replace('"', '\\"')
|
22
|
+
|
23
|
+
# Use diamond shape for nodes with conditional transitions, rectangle otherwise
|
24
|
+
shape = "diamond" if has_conditions else "rect"
|
25
|
+
|
26
|
+
if not node_def:
|
27
|
+
return f"{escaped_name} (unknown)", "unknown", shape
|
28
|
+
|
29
|
+
if node_def.function:
|
30
|
+
return f"{escaped_name} (function)", "function", shape
|
31
|
+
elif node_def.llm_config:
|
32
|
+
if node_def.llm_config.response_model:
|
33
|
+
return f"{escaped_name} (structured LLM)", "structured_llm", shape
|
34
|
+
return f"{escaped_name} (LLM)", "llm", shape
|
35
|
+
elif node_def.sub_workflow:
|
36
|
+
return f"{escaped_name} (Sub-Workflow)", "sub_workflow", shape
|
37
|
+
return f"{escaped_name} (unknown)", "unknown", shape
|
38
|
+
|
39
|
+
|
40
|
+
def generate_mermaid_diagram(
|
41
|
+
workflow_def: WorkflowDefinition,
|
42
|
+
include_subgraphs: bool = False,
|
43
|
+
title: Optional[str] = None,
|
44
|
+
include_legend: bool = True
|
45
|
+
) -> str:
|
46
|
+
"""
|
47
|
+
Generate a Mermaid flowchart diagram from a WorkflowDefinition with pastel colors and optimal UX.
|
48
|
+
|
49
|
+
Args:
|
50
|
+
workflow_def: The workflow definition to visualize.
|
51
|
+
include_subgraphs: If True, nests sub-workflows in Mermaid subgraphs.
|
52
|
+
title: Optional title for the diagram.
|
53
|
+
include_legend: If True, adds a comment-based legend explaining node types.
|
54
|
+
|
55
|
+
Returns:
|
56
|
+
A string containing the Mermaid syntax for the flowchart.
|
57
|
+
|
58
|
+
Raises:
|
59
|
+
ValueError: If node names contain invalid Mermaid characters.
|
60
|
+
"""
|
61
|
+
# Pastel color scheme for a soft, user-friendly look
|
62
|
+
node_styles: Dict[str, str] = {
|
63
|
+
"function": "fill:#90CAF9,stroke:#42A5F5,stroke-width:2px", # Pastel Blue
|
64
|
+
"structured_llm": "fill:#A5D6A7,stroke:#66BB6A,stroke-width:2px", # Pastel Green
|
65
|
+
"llm": "fill:#CE93D8,stroke:#AB47BC,stroke-width:2px", # Pastel Purple
|
66
|
+
"sub_workflow": "fill:#FFCCBC,stroke:#FF7043,stroke-width:2px", # Pastel Orange
|
67
|
+
"unknown": "fill:#CFD8DC,stroke:#B0BEC5,stroke-width:2px" # Pastel Grey
|
68
|
+
}
|
69
|
+
|
70
|
+
# Shape mappings for Mermaid syntax
|
71
|
+
shape_syntax: Dict[str, Tuple[str, str]] = {
|
72
|
+
"rect": ("[", "]"), # Rectangle for standard nodes
|
73
|
+
"diamond": ("{{", "}}") # Diamond for decision points
|
74
|
+
}
|
75
|
+
|
76
|
+
# Validate node names for Mermaid compatibility (alphanumeric, underscore, hyphen)
|
77
|
+
invalid_chars = r'[^a-zA-Z0-9_-]'
|
78
|
+
all_nodes: Set[str] = set()
|
79
|
+
if workflow_def.workflow.start:
|
80
|
+
if re.search(invalid_chars, workflow_def.workflow.start):
|
81
|
+
raise ValueError(f"Invalid node name '{workflow_def.workflow.start}' for Mermaid")
|
82
|
+
all_nodes.add(workflow_def.workflow.start)
|
83
|
+
for trans in workflow_def.workflow.transitions:
|
84
|
+
if re.search(invalid_chars, trans.from_node):
|
85
|
+
raise ValueError(f"Invalid node name '{trans.from_node}' for Mermaid")
|
86
|
+
all_nodes.add(trans.from_node)
|
87
|
+
if isinstance(trans.to_node, str):
|
88
|
+
if re.search(invalid_chars, trans.to_node):
|
89
|
+
raise ValueError(f"Invalid node name '{trans.to_node}' for Mermaid")
|
90
|
+
all_nodes.add(trans.to_node)
|
91
|
+
else:
|
92
|
+
for to_node in trans.to_node:
|
93
|
+
if re.search(invalid_chars, to_node):
|
94
|
+
raise ValueError(f"Invalid node name '{to_node}' for Mermaid")
|
95
|
+
all_nodes.add(to_node)
|
96
|
+
|
97
|
+
# Determine which nodes have conditional transitions
|
98
|
+
conditional_nodes: Set[str] = set()
|
99
|
+
for trans in workflow_def.workflow.transitions:
|
100
|
+
if trans.condition and isinstance(trans.to_node, str):
|
101
|
+
conditional_nodes.add(trans.from_node)
|
102
|
+
|
103
|
+
# Generate node definitions and track types/shapes
|
104
|
+
node_defs: List[str] = []
|
105
|
+
node_types: Dict[str, str] = {}
|
106
|
+
node_shapes: Dict[str, str] = {}
|
107
|
+
for node in all_nodes:
|
108
|
+
node_def = workflow_def.nodes.get(node)
|
109
|
+
has_conditions = node in conditional_nodes
|
110
|
+
label, node_type, shape = get_node_label_and_type(node, node_def, has_conditions)
|
111
|
+
start_shape, end_shape = shape_syntax[shape]
|
112
|
+
node_defs.append(f'{node}{start_shape}"{label}"{end_shape}')
|
113
|
+
node_types[node] = node_type
|
114
|
+
node_shapes[node] = shape
|
115
|
+
|
116
|
+
# Generate arrows for transitions (all solid lines)
|
117
|
+
arrows: List[str] = []
|
118
|
+
for trans in workflow_def.workflow.transitions:
|
119
|
+
from_node = trans.from_node
|
120
|
+
if isinstance(trans.to_node, str):
|
121
|
+
to_node = trans.to_node
|
122
|
+
condition = trans.condition
|
123
|
+
if condition:
|
124
|
+
cond = condition.replace('"', '\\"')[:30] + ("..." if len(condition) > 30 else "")
|
125
|
+
arrows.append(f'{from_node} -->|"{cond}"| {to_node}') # Solid arrow with condition
|
126
|
+
else:
|
127
|
+
arrows.append(f'{from_node} --> {to_node}')
|
128
|
+
else:
|
129
|
+
for to_node in trans.to_node:
|
130
|
+
arrows.append(f'{from_node} --> {to_node}') # Solid arrow for parallel
|
131
|
+
|
132
|
+
# Assemble the Mermaid syntax
|
133
|
+
mermaid_code = "```mermaid\n"
|
134
|
+
mermaid_code += "graph TD\n" # Top-down layout
|
135
|
+
if title:
|
136
|
+
mermaid_code += f" %% Diagram: {title}\n"
|
137
|
+
|
138
|
+
# Optional legend for UX
|
139
|
+
if include_legend:
|
140
|
+
mermaid_code += " %% Legend:\n"
|
141
|
+
mermaid_code += " %% - Rectangle: Process Step\n"
|
142
|
+
mermaid_code += " %% - Diamond: Decision Point\n"
|
143
|
+
mermaid_code += " %% - Colors: Blue (Function), Green (Structured LLM), Purple (LLM), Orange (Sub-Workflow), Grey (Unknown)\n"
|
144
|
+
|
145
|
+
# Add node definitions
|
146
|
+
for node_def in node_defs:
|
147
|
+
mermaid_code += f" {node_def}\n"
|
148
|
+
|
149
|
+
# Add transition arrows
|
150
|
+
for arrow in arrows:
|
151
|
+
mermaid_code += f" {arrow}\n"
|
152
|
+
|
153
|
+
# Add styles for node types (no stroke-dasharray for solid appearance)
|
154
|
+
for node, node_type in node_types.items():
|
155
|
+
if node_type in node_styles:
|
156
|
+
mermaid_code += f" style {node} {node_styles[node_type]}\n"
|
157
|
+
|
158
|
+
# Highlight the start node with a thicker border
|
159
|
+
if workflow_def.workflow.start and workflow_def.workflow.start in node_types:
|
160
|
+
mermaid_code += f" style {workflow_def.workflow.start} stroke-width:4px\n"
|
161
|
+
|
162
|
+
# Optional: Subgraphs for sub-workflows
|
163
|
+
if include_subgraphs:
|
164
|
+
for node, node_def in workflow_def.nodes.items():
|
165
|
+
if node_def and node_def.sub_workflow:
|
166
|
+
mermaid_code += f" subgraph {node}_sub[Sub-Workflow: {node}]\n"
|
167
|
+
sub_nodes = {node_def.sub_workflow.start} if node_def.sub_workflow.start else set()
|
168
|
+
for trans in node_def.sub_workflow.transitions:
|
169
|
+
sub_nodes.add(trans.from_node)
|
170
|
+
if isinstance(trans.to_node, str):
|
171
|
+
sub_nodes.add(trans.to_node)
|
172
|
+
else:
|
173
|
+
sub_nodes.update(trans.to_node)
|
174
|
+
for sub_node in sub_nodes:
|
175
|
+
mermaid_code += f" {sub_node}[[{sub_node}]]\n"
|
176
|
+
mermaid_code += " end\n"
|
177
|
+
|
178
|
+
mermaid_code += "```\n"
|
179
|
+
return mermaid_code
|
180
|
+
|
181
|
+
|
182
|
+
def main() -> None:
|
183
|
+
"""
|
184
|
+
Create a complex workflow and print its improved Mermaid diagram representation.
|
185
|
+
"""
|
186
|
+
manager = WorkflowManager()
|
187
|
+
|
188
|
+
# Add functions
|
189
|
+
manager.add_function(
|
190
|
+
name="analyze_sentiment",
|
191
|
+
type_="embedded",
|
192
|
+
code="async def analyze_sentiment(summary: str) -> str:\n return 'positive' if 'good' in summary.lower() else 'negative'",
|
193
|
+
)
|
194
|
+
manager.add_function(
|
195
|
+
name="extract_keywords",
|
196
|
+
type_="embedded",
|
197
|
+
code="async def extract_keywords(summary: str) -> str:\n return 'key1, key2'",
|
198
|
+
)
|
199
|
+
manager.add_function(
|
200
|
+
name="publish_content",
|
201
|
+
type_="embedded",
|
202
|
+
code="async def publish_content(summary: str, sentiment: str, keywords: str) -> str:\n return 'Published'",
|
203
|
+
)
|
204
|
+
manager.add_function(
|
205
|
+
name="revise_content",
|
206
|
+
type_="embedded",
|
207
|
+
code="async def revise_content(summary: str) -> str:\n return 'Revised summary'",
|
208
|
+
)
|
209
|
+
|
210
|
+
# Add LLM node
|
211
|
+
llm_config = {
|
212
|
+
"model": "grok/xai",
|
213
|
+
"system_prompt": "You are a concise summarizer.",
|
214
|
+
"prompt_template": "Summarize the following text: {{ input_text }}",
|
215
|
+
"temperature": "0.5",
|
216
|
+
"max_tokens": "150",
|
217
|
+
}
|
218
|
+
manager.add_node(name="summarize_text", llm_config=llm_config, output="summary")
|
219
|
+
|
220
|
+
# Add function nodes
|
221
|
+
manager.add_node(name="sentiment_analysis", function="analyze_sentiment", output="sentiment")
|
222
|
+
manager.add_node(name="keyword_extraction", function="extract_keywords", output="keywords")
|
223
|
+
manager.add_node(name="publish", function="publish_content", output="status")
|
224
|
+
manager.add_node(name="revise", function="revise_content", output="revised_summary")
|
225
|
+
|
226
|
+
# Define workflow structure
|
227
|
+
manager.set_start_node("summarize_text")
|
228
|
+
manager.add_transition(from_node="summarize_text", to_node=["sentiment_analysis", "keyword_extraction"])
|
229
|
+
manager.add_transition(from_node="sentiment_analysis", to_node="publish", condition="ctx['sentiment'] == 'positive'")
|
230
|
+
manager.add_transition(from_node="sentiment_analysis", to_node="revise", condition="ctx['sentiment'] == 'negative'")
|
231
|
+
manager.add_transition(from_node="keyword_extraction", to_node="publish")
|
232
|
+
|
233
|
+
# Generate and print the diagram
|
234
|
+
workflow_def = manager.workflow
|
235
|
+
diagram = generate_mermaid_diagram(workflow_def, include_subgraphs=False, title="Content Processing Workflow")
|
236
|
+
print(diagram)
|
237
|
+
|
238
|
+
|
239
|
+
if __name__ == "__main__":
|
240
|
+
main()
|
@@ -0,0 +1,335 @@
|
|
1
|
+
import ast
|
2
|
+
import re
|
3
|
+
from collections import defaultdict
|
4
|
+
from typing import Dict, List, Set, Optional
|
5
|
+
|
6
|
+
from pydantic import ValidationError, BaseModel
|
7
|
+
|
8
|
+
from quantalogic.flow.flow_manager import WorkflowManager
|
9
|
+
from quantalogic.flow.flow_manager_schema import (
|
10
|
+
FunctionDefinition,
|
11
|
+
LLMConfig,
|
12
|
+
NodeDefinition,
|
13
|
+
TransitionDefinition,
|
14
|
+
WorkflowDefinition,
|
15
|
+
WorkflowStructure,
|
16
|
+
)
|
17
|
+
|
18
|
+
|
19
|
+
class NodeError(BaseModel):
|
20
|
+
"""Represents an error associated with a specific node or workflow component."""
|
21
|
+
node_name: Optional[str] = None # None if the error isn’t tied to a specific node
|
22
|
+
description: str
|
23
|
+
|
24
|
+
|
25
|
+
def get_function_params(code: str, func_name: str) -> List[str]:
|
26
|
+
"""Extract parameter names from an embedded function's code."""
|
27
|
+
try:
|
28
|
+
tree = ast.parse(code)
|
29
|
+
for node in ast.walk(tree):
|
30
|
+
if isinstance(node, ast.FunctionDef) and node.name == func_name:
|
31
|
+
return [arg.arg for arg in node.args.args]
|
32
|
+
raise ValueError(f"Function '{func_name}' not found in code")
|
33
|
+
except SyntaxError as e:
|
34
|
+
raise ValueError(f"Invalid syntax in code: {e}")
|
35
|
+
|
36
|
+
|
37
|
+
def validate_workflow_definition(workflow_def: WorkflowDefinition) -> List[NodeError]:
|
38
|
+
"""Validate a workflow definition and return a list of NodeError objects."""
|
39
|
+
issues: List[NodeError] = []
|
40
|
+
output_names: Set[str] = set()
|
41
|
+
|
42
|
+
for name, func_def in workflow_def.functions.items():
|
43
|
+
if func_def.type == "embedded" and not func_def.code:
|
44
|
+
issues.append(NodeError(node_name=None, description=f"Embedded function '{name}' is missing 'code'"))
|
45
|
+
elif func_def.type == "external" and (not func_def.module or not func_def.function):
|
46
|
+
issues.append(NodeError(node_name=None, description=f"External function '{name}' is missing 'module' or 'function'"))
|
47
|
+
|
48
|
+
for name, node_def in workflow_def.nodes.items():
|
49
|
+
if node_def.function and node_def.function not in workflow_def.functions:
|
50
|
+
issues.append(NodeError(node_name=name, description=f"References undefined function '{node_def.function}'"))
|
51
|
+
|
52
|
+
if node_def.output:
|
53
|
+
if not node_def.output.isidentifier():
|
54
|
+
issues.append(NodeError(node_name=name, description=f"Has invalid output name '{node_def.output}'"))
|
55
|
+
elif node_def.output in output_names:
|
56
|
+
issues.append(NodeError(node_name=name, description=f"Has duplicate output name '{node_def.output}'"))
|
57
|
+
output_names.add(node_def.output)
|
58
|
+
|
59
|
+
if node_def.sub_workflow:
|
60
|
+
sub_issues = validate_workflow_structure(node_def.sub_workflow, workflow_def.nodes)
|
61
|
+
issues.extend(
|
62
|
+
NodeError(node_name=f"{name}/{issue.node_name}" if issue.node_name else name, description=issue.description)
|
63
|
+
for issue in sub_issues
|
64
|
+
)
|
65
|
+
|
66
|
+
if node_def.llm_config:
|
67
|
+
llm = node_def.llm_config
|
68
|
+
if not llm.model:
|
69
|
+
issues.append(NodeError(node_name=name, description="Missing 'model' in llm_config"))
|
70
|
+
if not llm.prompt_template:
|
71
|
+
issues.append(NodeError(node_name=name, description="Missing 'prompt_template' in llm_config"))
|
72
|
+
if llm.temperature < 0 or llm.temperature > 1:
|
73
|
+
issues.append(NodeError(node_name=name, description=f"Has invalid temperature: {llm.temperature}"))
|
74
|
+
|
75
|
+
issues.extend(validate_workflow_structure(workflow_def.workflow, workflow_def.nodes, is_main=True))
|
76
|
+
issues.extend(check_circular_transitions(workflow_def))
|
77
|
+
|
78
|
+
# Build the unified graph for main workflow and sub-workflows
|
79
|
+
successors = defaultdict(list)
|
80
|
+
predecessors = defaultdict(list)
|
81
|
+
all_nodes = set(workflow_def.nodes.keys())
|
82
|
+
|
83
|
+
# Add main workflow transitions
|
84
|
+
for trans in workflow_def.workflow.transitions:
|
85
|
+
from_node = trans.from_node
|
86
|
+
to_nodes = [trans.to_node] if isinstance(trans.to_node, str) else trans.to_node
|
87
|
+
for to_node in to_nodes:
|
88
|
+
successors[from_node].append(to_node)
|
89
|
+
predecessors[to_node].append(from_node)
|
90
|
+
|
91
|
+
# Add sub-workflow transitions with namespaced node names
|
92
|
+
for parent_name, node_def in workflow_def.nodes.items():
|
93
|
+
if node_def.sub_workflow:
|
94
|
+
for trans in node_def.sub_workflow.transitions:
|
95
|
+
from_node = f"{parent_name}/{trans.from_node}"
|
96
|
+
to_nodes = [trans.to_node] if isinstance(trans.to_node, str) else trans.to_node
|
97
|
+
namespaced_to_nodes = [f"{parent_name}/{to_node}" for to_node in to_nodes]
|
98
|
+
all_nodes.add(from_node)
|
99
|
+
all_nodes.update(namespaced_to_nodes)
|
100
|
+
successors[from_node].extend(namespaced_to_nodes)
|
101
|
+
for to_node in namespaced_to_nodes:
|
102
|
+
predecessors[to_node].append(from_node)
|
103
|
+
|
104
|
+
# Define function to get ancestors, handling cycles with a visited set
|
105
|
+
def get_ancestors(node: str, visited: Set[str] = None) -> Set[str]:
|
106
|
+
if visited is None:
|
107
|
+
visited = set()
|
108
|
+
if node in visited or node not in all_nodes:
|
109
|
+
return set()
|
110
|
+
visited.add(node)
|
111
|
+
ancestors = set(predecessors[node])
|
112
|
+
for pred in predecessors[node]:
|
113
|
+
ancestors.update(get_ancestors(pred, visited.copy()))
|
114
|
+
return ancestors
|
115
|
+
|
116
|
+
# Create output-to-node mapping, including sub-workflow nodes
|
117
|
+
output_to_node = {}
|
118
|
+
for node_name, node_def in workflow_def.nodes.items():
|
119
|
+
if node_def.output:
|
120
|
+
output_to_node[node_def.output] = node_name
|
121
|
+
if node_def.sub_workflow:
|
122
|
+
for sub_node_name in node_def.sub_workflow.__dict__.get("nodes", {}):
|
123
|
+
sub_node_def = workflow_def.nodes.get(sub_node_name)
|
124
|
+
if sub_node_def and sub_node_def.output:
|
125
|
+
output_to_node[sub_node_def.output] = f"{node_name}/{sub_node_name}"
|
126
|
+
|
127
|
+
# Check each node's inputs against ancestors' outputs, including sub-workflows
|
128
|
+
for node_name, node_def in workflow_def.nodes.items():
|
129
|
+
required_inputs = set()
|
130
|
+
full_node_name = node_name
|
131
|
+
|
132
|
+
if node_def.function:
|
133
|
+
func_def = workflow_def.functions.get(node_def.function)
|
134
|
+
if func_def and func_def.type == "embedded" and func_def.code:
|
135
|
+
try:
|
136
|
+
params = get_function_params(func_def.code, node_def.function)
|
137
|
+
required_inputs = set(params)
|
138
|
+
except ValueError as e:
|
139
|
+
issues.append(NodeError(node_name=node_name, description=f"Failed to parse function '{node_def.function}': {e}"))
|
140
|
+
else:
|
141
|
+
pass
|
142
|
+
elif node_def.llm_config:
|
143
|
+
prompt_template = node_def.llm_config.prompt_template
|
144
|
+
input_vars = set(re.findall(r"{{\s*([^}]+?)\s*}}", prompt_template))
|
145
|
+
cleaned_inputs = set()
|
146
|
+
for var in input_vars:
|
147
|
+
base_var = re.split(r"\s*[\+\-\*/]\s*", var.strip())[0].strip()
|
148
|
+
if base_var.isidentifier():
|
149
|
+
cleaned_inputs.add(base_var)
|
150
|
+
required_inputs = cleaned_inputs
|
151
|
+
elif node_def.sub_workflow:
|
152
|
+
for sub_node_name in node_def.sub_workflow.__dict__.get("nodes", {}):
|
153
|
+
sub_node_def = workflow_def.nodes.get(sub_node_name)
|
154
|
+
if sub_node_def:
|
155
|
+
full_node_name = f"{node_name}/{sub_node_name}"
|
156
|
+
if sub_node_def.function:
|
157
|
+
func_def = workflow_def.functions.get(sub_node_def.function)
|
158
|
+
if func_def and func_def.type == "embedded" and func_def.code:
|
159
|
+
try:
|
160
|
+
params = get_function_params(func_def.code, sub_node_def.function)
|
161
|
+
required_inputs = set(params)
|
162
|
+
except ValueError as e:
|
163
|
+
issues.append(NodeError(node_name=full_node_name, description=f"Failed to parse function '{sub_node_def.function}': {e}"))
|
164
|
+
elif sub_node_def.llm_config:
|
165
|
+
prompt_template = sub_node_def.llm_config.prompt_template
|
166
|
+
input_vars = set(re.findall(r"{{\s*([^}]+?)\s*}}", prompt_template))
|
167
|
+
cleaned_inputs = set()
|
168
|
+
for var in input_vars:
|
169
|
+
base_var = re.split(r"\s*[\+\-\*/]\s*", var.strip())[0].strip()
|
170
|
+
if base_var.isidentifier():
|
171
|
+
cleaned_inputs.add(base_var)
|
172
|
+
required_inputs = cleaned_inputs
|
173
|
+
|
174
|
+
if required_inputs:
|
175
|
+
ancestors = get_ancestors(full_node_name)
|
176
|
+
for input_name in required_inputs:
|
177
|
+
producer_node = output_to_node.get(input_name)
|
178
|
+
if producer_node is None or producer_node not in ancestors:
|
179
|
+
issues.append(NodeError(node_name=full_node_name, description=f"Requires input '{input_name}', but it is not produced by any ancestor"))
|
180
|
+
continue
|
181
|
+
|
182
|
+
if not required_inputs:
|
183
|
+
continue
|
184
|
+
|
185
|
+
ancestors = get_ancestors(full_node_name)
|
186
|
+
for input_name in required_inputs:
|
187
|
+
producer_node = output_to_node.get(input_name)
|
188
|
+
if producer_node is None or producer_node not in ancestors:
|
189
|
+
issues.append(NodeError(node_name=full_node_name, description=f"Requires input '{input_name}', but it is not produced by any ancestor"))
|
190
|
+
|
191
|
+
for observer in workflow_def.observers:
|
192
|
+
if observer not in workflow_def.functions:
|
193
|
+
issues.append(NodeError(node_name=None, description=f"Observer '{observer}' references undefined function"))
|
194
|
+
|
195
|
+
return issues
|
196
|
+
|
197
|
+
|
198
|
+
def validate_workflow_structure(structure: WorkflowStructure, nodes: Dict[str, NodeDefinition],
|
199
|
+
is_main: bool = False) -> List[NodeError]:
|
200
|
+
"""Validate a WorkflowStructure for consistency."""
|
201
|
+
issues: List[NodeError] = []
|
202
|
+
|
203
|
+
if is_main and not structure.start:
|
204
|
+
issues.append(NodeError(node_name=None, description="Main workflow is missing a start node"))
|
205
|
+
elif structure.start and structure.start not in nodes:
|
206
|
+
issues.append(NodeError(node_name=structure.start, description="Start node is not defined in nodes"))
|
207
|
+
|
208
|
+
for trans in structure.transitions:
|
209
|
+
if trans.from_node not in nodes:
|
210
|
+
issues.append(NodeError(node_name=trans.from_node, description="Transition from undefined node"))
|
211
|
+
to_nodes = [trans.to_node] if isinstance(trans.to_node, str) else trans.to_node
|
212
|
+
for to_node in to_nodes:
|
213
|
+
if to_node not in nodes:
|
214
|
+
issues.append(NodeError(node_name=to_node, description=f"Transition to undefined node from '{trans.from_node}'"))
|
215
|
+
if trans.condition:
|
216
|
+
try:
|
217
|
+
compile(trans.condition, "<string>", "eval")
|
218
|
+
except SyntaxError:
|
219
|
+
issues.append(NodeError(node_name=trans.from_node, description=f"Invalid condition syntax in transition: {trans.condition}"))
|
220
|
+
|
221
|
+
return issues
|
222
|
+
|
223
|
+
|
224
|
+
def check_circular_transitions(workflow_def: WorkflowDefinition) -> List[NodeError]:
|
225
|
+
"""Detect circular transitions in the workflow using DFS, allowing cycles with conditions."""
|
226
|
+
issues: List[NodeError] = []
|
227
|
+
|
228
|
+
def dfs(node: str, visited: Set[str], path: Set[str], transitions: List[TransitionDefinition], path_transitions: List[TransitionDefinition]) -> None:
|
229
|
+
if node in path:
|
230
|
+
cycle_nodes = list(path)[list(path).index(node):] + [node]
|
231
|
+
cycle = " -> ".join(cycle_nodes)
|
232
|
+
cycle_transitions = [
|
233
|
+
t for t in path_transitions
|
234
|
+
if t.from_node in cycle_nodes and
|
235
|
+
(isinstance(t.to_node, str) and t.to_node in cycle_nodes) or
|
236
|
+
(isinstance(t.to_node, list) and any(to in cycle_nodes for to in t.to_node))
|
237
|
+
]
|
238
|
+
if all(t.condition is None for t in cycle_transitions):
|
239
|
+
issues.append(NodeError(node_name=None, description=f"Unconditional circular transition detected: {cycle}"))
|
240
|
+
return
|
241
|
+
if node in visited or node not in workflow_def.nodes:
|
242
|
+
return
|
243
|
+
|
244
|
+
visited.add(node)
|
245
|
+
path.add(node)
|
246
|
+
|
247
|
+
for trans in transitions:
|
248
|
+
if trans.from_node == node:
|
249
|
+
path_transitions.append(trans)
|
250
|
+
to_nodes = [trans.to_node] if isinstance(trans.to_node, str) else trans.to_node
|
251
|
+
for next_node in to_nodes:
|
252
|
+
dfs(next_node, visited, path, transitions, path_transitions)
|
253
|
+
path_transitions.pop()
|
254
|
+
|
255
|
+
path.remove(node)
|
256
|
+
|
257
|
+
if workflow_def.workflow.start:
|
258
|
+
dfs(workflow_def.workflow.start, set(), set(), workflow_def.workflow.transitions, [])
|
259
|
+
|
260
|
+
for node_name, node_def in workflow_def.nodes.items():
|
261
|
+
if node_def.sub_workflow and node_def.sub_workflow.start:
|
262
|
+
dfs(node_def.sub_workflow.start, set(), set(), node_def.sub_workflow.transitions, [])
|
263
|
+
|
264
|
+
return issues
|
265
|
+
|
266
|
+
|
267
|
+
def main():
|
268
|
+
"""Build a sample workflow using WorkflowManager and validate it."""
|
269
|
+
manager = WorkflowManager()
|
270
|
+
|
271
|
+
# Define functions
|
272
|
+
manager.add_function(
|
273
|
+
name="say_hello",
|
274
|
+
type_="embedded",
|
275
|
+
code="def say_hello():\n return 'Hello, World!'"
|
276
|
+
)
|
277
|
+
manager.add_function(
|
278
|
+
name="say_goodbye",
|
279
|
+
type_="external",
|
280
|
+
module="external_module",
|
281
|
+
function="goodbye_func"
|
282
|
+
)
|
283
|
+
|
284
|
+
# Add nodes for main workflow
|
285
|
+
manager.add_node(name="start", function="say_hello", output="result")
|
286
|
+
manager.add_node(name="outro", function="non_existent") # Intentional: undefined function
|
287
|
+
|
288
|
+
# Add LLM node with valid temperature
|
289
|
+
manager.add_node(
|
290
|
+
name="ai_node",
|
291
|
+
llm_config={
|
292
|
+
"model": "gpt-3.5-turbo",
|
293
|
+
"prompt_template": "{{input}}",
|
294
|
+
"temperature": 0.7
|
295
|
+
}
|
296
|
+
)
|
297
|
+
|
298
|
+
# Add nodes and sub-workflow
|
299
|
+
manager.add_node(name="nested_start", function="say_hello", output="greeting")
|
300
|
+
manager.add_node(name="nested_end", function="say_goodbye")
|
301
|
+
sub_workflow = WorkflowStructure(start="nested_start")
|
302
|
+
sub_workflow.transitions.extend([
|
303
|
+
TransitionDefinition(from_node="nested_start", to_node="nested_end"),
|
304
|
+
TransitionDefinition(from_node="nested_end", to_node="nested_start") # Intentional: circular
|
305
|
+
])
|
306
|
+
manager.add_node(name="nested", sub_workflow=sub_workflow)
|
307
|
+
|
308
|
+
# Configure main workflow
|
309
|
+
manager.set_start_node("start")
|
310
|
+
manager.add_transition(from_node="start", to_node="outro")
|
311
|
+
manager.add_transition(from_node="outro", to_node="start") # Intentional: circular
|
312
|
+
manager.add_transition(from_node="start", to_node="missing_node", strict=False) # Intentional: undefined node
|
313
|
+
|
314
|
+
# Add observer with error handling
|
315
|
+
try:
|
316
|
+
manager.add_observer("undefined_observer") # Intentional: undefined observer
|
317
|
+
except ValueError:
|
318
|
+
pass # Allow validation to proceed
|
319
|
+
|
320
|
+
# Validate the constructed workflow
|
321
|
+
workflow = manager.workflow
|
322
|
+
issues = validate_workflow_definition(workflow)
|
323
|
+
|
324
|
+
# Display results
|
325
|
+
if issues:
|
326
|
+
print("Issues found in workflow definition:")
|
327
|
+
for issue in sorted(issues, key=lambda x: (x.node_name or '', x.description)):
|
328
|
+
node_part = f"Node '{issue.node_name}'" if issue.node_name else "Workflow"
|
329
|
+
print(f"- {node_part}: {issue.description}")
|
330
|
+
else:
|
331
|
+
print("No issues found in workflow definition.")
|
332
|
+
|
333
|
+
|
334
|
+
if __name__ == "__main__":
|
335
|
+
main()
|