quantalogic 0.52.0__py3-none-any.whl → 0.53.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/agent.py +1 -1
- quantalogic/flow/flow.py +48 -26
- quantalogic/flow/flow_extractor.py +11 -12
- quantalogic/flow/flow_generator.py +164 -31
- quantalogic/flow/flow_manager.py +39 -5
- quantalogic/flow/flow_manager_schema.py +22 -2
- quantalogic/flow/flow_mermaid.py +117 -75
- quantalogic/flow/flow_validator.py +41 -24
- quantalogic/flow/flow_yaml.md +76 -2
- quantalogic/prompts/system_prompt.j2 +35 -39
- {quantalogic-0.52.0.dist-info → quantalogic-0.53.0.dist-info}/METADATA +3 -1
- {quantalogic-0.52.0.dist-info → quantalogic-0.53.0.dist-info}/RECORD +15 -15
- {quantalogic-0.52.0.dist-info → quantalogic-0.53.0.dist-info}/LICENSE +0 -0
- {quantalogic-0.52.0.dist-info → quantalogic-0.53.0.dist-info}/WHEEL +0 -0
- {quantalogic-0.52.0.dist-info → quantalogic-0.53.0.dist-info}/entry_points.txt +0 -0
quantalogic/flow/flow_mermaid.py
CHANGED
@@ -41,23 +41,28 @@ def generate_mermaid_diagram(
|
|
41
41
|
workflow_def: WorkflowDefinition,
|
42
42
|
include_subgraphs: bool = False,
|
43
43
|
title: Optional[str] = None,
|
44
|
-
include_legend: bool = True
|
44
|
+
include_legend: bool = True,
|
45
|
+
diagram_type: str = "flowchart"
|
45
46
|
) -> str:
|
46
47
|
"""
|
47
|
-
Generate a Mermaid flowchart
|
48
|
+
Generate a Mermaid diagram (flowchart or stateDiagram) from a WorkflowDefinition with pastel colors and optimal UX.
|
48
49
|
|
49
50
|
Args:
|
50
51
|
workflow_def: The workflow definition to visualize.
|
51
|
-
include_subgraphs: If True, nests sub-workflows in Mermaid subgraphs.
|
52
|
+
include_subgraphs: If True, nests sub-workflows in Mermaid subgraphs (flowchart only).
|
52
53
|
title: Optional title for the diagram.
|
53
54
|
include_legend: If True, adds a comment-based legend explaining node types.
|
55
|
+
diagram_type: Type of diagram to generate: "flowchart" (default) or "stateDiagram".
|
54
56
|
|
55
57
|
Returns:
|
56
|
-
A string containing the Mermaid syntax for the
|
58
|
+
A string containing the Mermaid syntax for the diagram.
|
57
59
|
|
58
60
|
Raises:
|
59
|
-
ValueError: If node names contain invalid Mermaid characters.
|
61
|
+
ValueError: If node names contain invalid Mermaid characters or diagram_type is invalid.
|
60
62
|
"""
|
63
|
+
if diagram_type not in ("flowchart", "stateDiagram"):
|
64
|
+
raise ValueError(f"Invalid diagram_type '{diagram_type}'; must be 'flowchart' or 'stateDiagram'")
|
65
|
+
|
61
66
|
# Pastel color scheme for a soft, user-friendly look
|
62
67
|
node_styles: Dict[str, str] = {
|
63
68
|
"function": "fill:#90CAF9,stroke:#42A5F5,stroke-width:2px", # Pastel Blue
|
@@ -67,7 +72,7 @@ def generate_mermaid_diagram(
|
|
67
72
|
"unknown": "fill:#CFD8DC,stroke:#B0BEC5,stroke-width:2px" # Pastel Grey
|
68
73
|
}
|
69
74
|
|
70
|
-
# Shape mappings for
|
75
|
+
# Shape mappings for flowchart syntax
|
71
76
|
shape_syntax: Dict[str, Tuple[str, str]] = {
|
72
77
|
"rect": ("[", "]"), # Rectangle for standard nodes
|
73
78
|
"diamond": ("{{", "}}") # Diamond for decision points
|
@@ -100,89 +105,124 @@ def generate_mermaid_diagram(
|
|
100
105
|
if trans.condition and isinstance(trans.to_node, str):
|
101
106
|
conditional_nodes.add(trans.from_node)
|
102
107
|
|
103
|
-
#
|
104
|
-
node_defs: List[str] = []
|
108
|
+
# Shared node definitions and types
|
105
109
|
node_types: Dict[str, str] = {}
|
106
|
-
node_shapes: Dict[str, str] = {}
|
107
|
-
for node in all_nodes:
|
108
|
-
node_def = workflow_def.nodes.get(node)
|
109
|
-
has_conditions = node in conditional_nodes
|
110
|
-
label, node_type, shape = get_node_label_and_type(node, node_def, has_conditions)
|
111
|
-
start_shape, end_shape = shape_syntax[shape]
|
112
|
-
node_defs.append(f'{node}{start_shape}"{label}"{end_shape}')
|
113
|
-
node_types[node] = node_type
|
114
|
-
node_shapes[node] = shape
|
115
|
-
|
116
|
-
# Generate arrows for transitions (all solid lines)
|
117
|
-
arrows: List[str] = []
|
118
|
-
for trans in workflow_def.workflow.transitions:
|
119
|
-
from_node = trans.from_node
|
120
|
-
if isinstance(trans.to_node, str):
|
121
|
-
to_node = trans.to_node
|
122
|
-
condition = trans.condition
|
123
|
-
if condition:
|
124
|
-
cond = condition.replace('"', '\\"')[:30] + ("..." if len(condition) > 30 else "")
|
125
|
-
arrows.append(f'{from_node} -->|"{cond}"| {to_node}') # Solid arrow with condition
|
126
|
-
else:
|
127
|
-
arrows.append(f'{from_node} --> {to_node}')
|
128
|
-
else:
|
129
|
-
for to_node in trans.to_node:
|
130
|
-
arrows.append(f'{from_node} --> {to_node}') # Solid arrow for parallel
|
110
|
+
node_shapes: Dict[str, str] = {} # Only used for flowchart
|
131
111
|
|
132
112
|
# Assemble the Mermaid syntax
|
133
113
|
mermaid_code = "```mermaid\n"
|
134
|
-
|
114
|
+
if diagram_type == "flowchart":
|
115
|
+
mermaid_code += "graph TD\n" # Top-down layout
|
116
|
+
else: # stateDiagram
|
117
|
+
mermaid_code += "stateDiagram-v2\n"
|
118
|
+
|
135
119
|
if title:
|
136
120
|
mermaid_code += f" %% Diagram: {title}\n"
|
137
121
|
|
138
122
|
# Optional legend for UX
|
139
123
|
if include_legend:
|
140
124
|
mermaid_code += " %% Legend:\n"
|
141
|
-
|
142
|
-
|
125
|
+
if diagram_type == "flowchart":
|
126
|
+
mermaid_code += " %% - Rectangle: Process Step\n"
|
127
|
+
mermaid_code += " %% - Diamond: Decision Point\n"
|
143
128
|
mermaid_code += " %% - Colors: Blue (Function), Green (Structured LLM), Purple (LLM), Orange (Sub-Workflow), Grey (Unknown)\n"
|
144
129
|
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
130
|
+
if diagram_type == "flowchart":
|
131
|
+
# Flowchart-specific: Generate node definitions with shapes
|
132
|
+
node_defs: List[str] = []
|
133
|
+
for node in all_nodes:
|
134
|
+
node_def_flow: Optional[NodeDefinition] = workflow_def.nodes.get(node)
|
135
|
+
has_conditions = node in conditional_nodes
|
136
|
+
label, node_type, shape = get_node_label_and_type(node, node_def_flow, has_conditions)
|
137
|
+
start_shape, end_shape = shape_syntax[shape]
|
138
|
+
node_defs.append(f'{node}{start_shape}"{label}"{end_shape}')
|
139
|
+
node_types[node] = node_type
|
140
|
+
node_shapes[node] = shape
|
141
|
+
|
142
|
+
# Add node definitions
|
143
|
+
for node_def_str in node_defs:
|
144
|
+
mermaid_code += f" {node_def_str}\n"
|
145
|
+
|
146
|
+
# Generate arrows for transitions (all solid lines)
|
147
|
+
for trans in workflow_def.workflow.transitions:
|
148
|
+
from_node = trans.from_node
|
149
|
+
if isinstance(trans.to_node, str):
|
150
|
+
to_node = trans.to_node
|
151
|
+
condition = trans.condition
|
152
|
+
if condition:
|
153
|
+
cond = condition.replace('"', '\\"')[:30] + ("..." if len(condition) > 30 else "")
|
154
|
+
mermaid_code += f' {from_node} -->|"{cond}"| {to_node}\n'
|
155
|
+
else:
|
156
|
+
mermaid_code += f' {from_node} --> {to_node}\n'
|
157
|
+
else:
|
158
|
+
for to_node in trans.to_node:
|
159
|
+
mermaid_code += f' {from_node} --> {to_node}\n'
|
160
|
+
|
161
|
+
# Add styles for node types
|
162
|
+
for node, node_type in node_types.items():
|
163
|
+
if node_type in node_styles:
|
164
|
+
mermaid_code += f" style {node} {node_styles[node_type]}\n"
|
165
|
+
|
166
|
+
# Highlight the start node
|
167
|
+
if workflow_def.workflow.start and workflow_def.workflow.start in node_types:
|
168
|
+
mermaid_code += f" style {workflow_def.workflow.start} stroke-width:4px\n"
|
169
|
+
|
170
|
+
# Optional: Subgraphs for sub-workflows
|
171
|
+
if include_subgraphs:
|
172
|
+
for node, node_def_entry in workflow_def.nodes.items():
|
173
|
+
if node_def_entry and node_def_entry.sub_workflow:
|
174
|
+
mermaid_code += f" subgraph {node}_sub[Sub-Workflow: {node}]\n"
|
175
|
+
sub_nodes: Set[str] = {node_def_entry.sub_workflow.start} if node_def_entry.sub_workflow.start else set()
|
176
|
+
for trans in node_def_entry.sub_workflow.transitions:
|
177
|
+
sub_nodes.add(trans.from_node)
|
178
|
+
if isinstance(trans.to_node, str):
|
179
|
+
sub_nodes.add(trans.to_node)
|
180
|
+
else:
|
181
|
+
sub_nodes.update(trans.to_node)
|
182
|
+
for sub_node in sub_nodes:
|
183
|
+
mermaid_code += f" {sub_node}[[{sub_node}]]\n"
|
184
|
+
mermaid_code += " end\n"
|
185
|
+
|
186
|
+
else: # stateDiagram
|
187
|
+
# StateDiagram-specific: Define states
|
188
|
+
for node in all_nodes:
|
189
|
+
node_def_state: Optional[NodeDefinition] = workflow_def.nodes.get(node)
|
190
|
+
has_conditions = node in conditional_nodes
|
191
|
+
label, node_type, _ = get_node_label_and_type(node, node_def_state, has_conditions) # Shape unused
|
192
|
+
mermaid_code += f" state \"{label}\" as {node}\n"
|
193
|
+
node_types[node] = node_type
|
194
|
+
|
195
|
+
# Start state
|
196
|
+
if workflow_def.workflow.start:
|
197
|
+
mermaid_code += f" [*] --> {workflow_def.workflow.start}\n"
|
198
|
+
|
199
|
+
# Transitions
|
200
|
+
for trans in workflow_def.workflow.transitions:
|
201
|
+
from_node = trans.from_node
|
202
|
+
if isinstance(trans.to_node, str):
|
203
|
+
to_node = trans.to_node
|
204
|
+
condition = trans.condition
|
205
|
+
if condition:
|
206
|
+
cond = condition.replace('"', '\\"')[:30] + ("..." if len(condition) > 30 else "")
|
207
|
+
mermaid_code += f" {from_node} --> {to_node} : {cond}\n"
|
208
|
+
else:
|
209
|
+
mermaid_code += f" {from_node} --> {to_node}\n"
|
210
|
+
else:
|
211
|
+
# Parallel transitions approximated with a note
|
212
|
+
for to_node in trans.to_node:
|
213
|
+
mermaid_code += f" {from_node} --> {to_node} : parallel\n"
|
214
|
+
|
215
|
+
# Add styles for node types
|
216
|
+
for node, node_type in node_types.items():
|
217
|
+
if node_type in node_styles:
|
218
|
+
mermaid_code += f" style {node} {node_styles[node_type]}\n"
|
177
219
|
|
178
220
|
mermaid_code += "```\n"
|
179
221
|
return mermaid_code
|
180
222
|
|
181
223
|
|
182
224
|
def main() -> None:
|
183
|
-
"""
|
184
|
-
Create a complex workflow and print its improved Mermaid diagram representation.
|
185
|
-
"""
|
225
|
+
"""Create a complex workflow and print its improved Mermaid diagram representation."""
|
186
226
|
manager = WorkflowManager()
|
187
227
|
|
188
228
|
# Add functions
|
@@ -230,11 +270,13 @@ def main() -> None:
|
|
230
270
|
manager.add_transition(from_node="sentiment_analysis", to_node="revise", condition="ctx['sentiment'] == 'negative'")
|
231
271
|
manager.add_transition(from_node="keyword_extraction", to_node="publish")
|
232
272
|
|
233
|
-
# Generate and print
|
273
|
+
# Generate and print both diagrams
|
234
274
|
workflow_def = manager.workflow
|
235
|
-
|
236
|
-
print(
|
237
|
-
|
275
|
+
print("Flowchart (default):")
|
276
|
+
print(generate_mermaid_diagram(workflow_def, include_subgraphs=False, title="Content Processing Workflow"))
|
277
|
+
print("\nState Diagram:")
|
278
|
+
print(generate_mermaid_diagram(workflow_def, diagram_type="stateDiagram", title="Content Processing Workflow"))
|
279
|
+
|
238
280
|
|
239
281
|
if __name__ == "__main__":
|
240
282
|
main()
|
@@ -1,14 +1,12 @@
|
|
1
1
|
import ast
|
2
2
|
import re
|
3
3
|
from collections import defaultdict
|
4
|
-
from typing import Dict, List,
|
4
|
+
from typing import Dict, List, Optional, Set
|
5
5
|
|
6
|
-
from pydantic import
|
6
|
+
from pydantic import BaseModel
|
7
7
|
|
8
8
|
from quantalogic.flow.flow_manager import WorkflowManager
|
9
9
|
from quantalogic.flow.flow_manager_schema import (
|
10
|
-
FunctionDefinition,
|
11
|
-
LLMConfig,
|
12
10
|
NodeDefinition,
|
13
11
|
TransitionDefinition,
|
14
12
|
WorkflowDefinition,
|
@@ -102,9 +100,7 @@ def validate_workflow_definition(workflow_def: WorkflowDefinition) -> List[NodeE
|
|
102
100
|
predecessors[to_node].append(from_node)
|
103
101
|
|
104
102
|
# Define function to get ancestors, handling cycles with a visited set
|
105
|
-
def get_ancestors(node: str, visited: Set[str] =
|
106
|
-
if visited is None:
|
107
|
-
visited = set()
|
103
|
+
def get_ancestors(node: str, visited: Set[str] = set()) -> Set[str]:
|
108
104
|
if node in visited or node not in all_nodes:
|
109
105
|
return set()
|
110
106
|
visited.add(node)
|
@@ -130,15 +126,20 @@ def validate_workflow_definition(workflow_def: WorkflowDefinition) -> List[NodeE
|
|
130
126
|
full_node_name = node_name
|
131
127
|
|
132
128
|
if node_def.function:
|
133
|
-
|
134
|
-
if
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
issues.append(NodeError(node_name=node_name, description=f"Failed to parse function '{node_def.function}': {e}"))
|
129
|
+
maybe_func_def = workflow_def.functions.get(node_def.function)
|
130
|
+
if maybe_func_def is None:
|
131
|
+
issues.append(NodeError(
|
132
|
+
node_name=node_name,
|
133
|
+
description=f"Function '{node_def.function}' not found in workflow functions"
|
134
|
+
))
|
140
135
|
else:
|
141
|
-
|
136
|
+
func_def = maybe_func_def # Type is now definitely FunctionDefinition
|
137
|
+
if func_def.type == "embedded" and func_def.code:
|
138
|
+
try:
|
139
|
+
params = get_function_params(func_def.code, node_def.function)
|
140
|
+
required_inputs = set(params)
|
141
|
+
except ValueError as e:
|
142
|
+
issues.append(NodeError(node_name=node_name, description=f"Failed to parse function '{node_def.function}': {e}"))
|
142
143
|
elif node_def.llm_config:
|
143
144
|
prompt_template = node_def.llm_config.prompt_template
|
144
145
|
input_vars = set(re.findall(r"{{\s*([^}]+?)\s*}}", prompt_template))
|
@@ -154,13 +155,23 @@ def validate_workflow_definition(workflow_def: WorkflowDefinition) -> List[NodeE
|
|
154
155
|
if sub_node_def:
|
155
156
|
full_node_name = f"{node_name}/{sub_node_name}"
|
156
157
|
if sub_node_def.function:
|
157
|
-
|
158
|
-
if
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
158
|
+
maybe_func_def = workflow_def.functions.get(sub_node_def.function)
|
159
|
+
if maybe_func_def is None:
|
160
|
+
issues.append(NodeError(
|
161
|
+
node_name=full_node_name,
|
162
|
+
description=f"Function '{sub_node_def.function}' not found in workflow functions"
|
163
|
+
))
|
164
|
+
else:
|
165
|
+
func_def = maybe_func_def # Type is now definitely FunctionDefinition
|
166
|
+
if func_def.type == "embedded" and func_def.code:
|
167
|
+
try:
|
168
|
+
params = get_function_params(func_def.code, sub_node_def.function)
|
169
|
+
required_inputs = set(params)
|
170
|
+
except ValueError as e:
|
171
|
+
issues.append(NodeError(
|
172
|
+
node_name=full_node_name,
|
173
|
+
description=f"Failed to parse function '{sub_node_def.function}': {e}"
|
174
|
+
))
|
164
175
|
elif sub_node_def.llm_config:
|
165
176
|
prompt_template = sub_node_def.llm_config.prompt_template
|
166
177
|
input_vars = set(re.findall(r"{{\s*([^}]+?)\s*}}", prompt_template))
|
@@ -176,7 +187,10 @@ def validate_workflow_definition(workflow_def: WorkflowDefinition) -> List[NodeE
|
|
176
187
|
for input_name in required_inputs:
|
177
188
|
producer_node = output_to_node.get(input_name)
|
178
189
|
if producer_node is None or producer_node not in ancestors:
|
179
|
-
issues.append(NodeError(
|
190
|
+
issues.append(NodeError(
|
191
|
+
node_name=full_node_name,
|
192
|
+
description=f"Requires input '{input_name}', but it is not produced by any ancestor"
|
193
|
+
))
|
180
194
|
continue
|
181
195
|
|
182
196
|
if not required_inputs:
|
@@ -186,7 +200,10 @@ def validate_workflow_definition(workflow_def: WorkflowDefinition) -> List[NodeE
|
|
186
200
|
for input_name in required_inputs:
|
187
201
|
producer_node = output_to_node.get(input_name)
|
188
202
|
if producer_node is None or producer_node not in ancestors:
|
189
|
-
issues.append(NodeError(
|
203
|
+
issues.append(NodeError(
|
204
|
+
node_name=full_node_name,
|
205
|
+
description=f"Requires input '{input_name}', but it is not produced by any ancestor"
|
206
|
+
))
|
190
207
|
|
191
208
|
for observer in workflow_def.observers:
|
192
209
|
if observer not in workflow_def.functions:
|
quantalogic/flow/flow_yaml.md
CHANGED
@@ -32,11 +32,12 @@ graph TD
|
|
32
32
|
|
33
33
|
## 2. Workflow Structure 🗺️
|
34
34
|
|
35
|
-
A workflow YAML file is divided into
|
35
|
+
A workflow YAML file is divided into four core sections:
|
36
36
|
|
37
37
|
- **`functions`**: Python code definitions.
|
38
38
|
- **`nodes`**: Task specifications.
|
39
39
|
- **`workflow`**: Flow orchestration.
|
40
|
+
- **`dependencies`**: Python module dependencies.
|
40
41
|
|
41
42
|
Here’s the skeleton:
|
42
43
|
|
@@ -47,6 +48,8 @@ nodes:
|
|
47
48
|
# Tasks 🎯
|
48
49
|
workflow:
|
49
50
|
# Flow control 🚦
|
51
|
+
dependencies:
|
52
|
+
# Python module dependencies (optional)
|
50
53
|
observers:
|
51
54
|
# Event watchers 👀 (optional)
|
52
55
|
```
|
@@ -275,7 +278,33 @@ graph TD
|
|
275
278
|
|
276
279
|
---
|
277
280
|
|
278
|
-
|
281
|
+
---
|
282
|
+
|
283
|
+
## 5. Dependencies 🐍
|
284
|
+
|
285
|
+
The `dependencies` section lists Python modules required by the workflow.
|
286
|
+
|
287
|
+
### Fields 📋
|
288
|
+
|
289
|
+
- `dependencies` (list, optional): A list of Python module dependencies. Each dependency can be a:
|
290
|
+
- PyPI package name (e.g., `requests>=2.28.0`).
|
291
|
+
- Local file path (e.g., `/path/to/module.py`).
|
292
|
+
- Remote URL (e.g., `https://example.com/module.py`).
|
293
|
+
|
294
|
+
These dependencies are processed during workflow instantiation, ensuring that all required modules are available before the workflow starts.
|
295
|
+
|
296
|
+
### Example 🌈
|
297
|
+
|
298
|
+
```yaml
|
299
|
+
dependencies:
|
300
|
+
- requests>=2.28.0
|
301
|
+
- /path/to/my_custom_module.py
|
302
|
+
- https://example.com/another_module.py
|
303
|
+
```
|
304
|
+
|
305
|
+
---
|
306
|
+
|
307
|
+
## 6. Nodes 🧩
|
279
308
|
|
280
309
|
Nodes are the tasks, powered by functions, sub-workflows, or LLMs.
|
281
310
|
|
@@ -288,6 +317,28 @@ Nodes are the tasks, powered by functions, sub-workflows, or LLMs.
|
|
288
317
|
- `model` (string, default: `"gpt-3.5-turbo"`)
|
289
318
|
- `system_prompt` (string, optional)
|
290
319
|
- `prompt_template` (string, default: `"{{ input }}"`)
|
320
|
+
- `prompt_file` (string, optional): Path to an external Jinja2 template file. If provided, the template file will be loaded and rendered with the available context.
|
321
|
+
|
322
|
+
To leverage the power of Jinja2 templating directly within your Quantalogic Flow YAML DSL, you can embed Jinja2 syntax within the `prompt_template` field of your `llm_config`. This allows you to dynamically generate prompts based on variables passed from previous nodes or defined within the flow itself. Simply enclose your Jinja2 expressions within `{{ ... }}`. Ensure that the variables you reference are accessible within the scope of the node execution.
|
323
|
+
|
324
|
+
Here's an example:
|
325
|
+
|
326
|
+
```yaml
|
327
|
+
nodes:
|
328
|
+
- id: generate_email
|
329
|
+
type: llm
|
330
|
+
config:
|
331
|
+
llm_config:
|
332
|
+
model: "gpt-4"
|
333
|
+
prompt_template: "Write an email to {{ recipient }} about the upcoming {{ event }}."
|
334
|
+
temperature: 0.7
|
335
|
+
inputs:
|
336
|
+
recipient: ${get_user_details.outputs.email}
|
337
|
+
event: "Company Picnic"
|
338
|
+
```
|
339
|
+
|
340
|
+
In this example, the `prompt_template` will dynamically generate an email prompt using the `recipient` variable (fetched from the output of the `get_user_details` node) and the `event` variable, which is a hardcoded string in this case. The LLM will then use the generated prompt to compose the email.
|
341
|
+
|
291
342
|
- `temperature` (float, default: `0.7`)
|
292
343
|
- `max_tokens` (int, optional)
|
293
344
|
- `top_p` (float, default: `1.0`)
|
@@ -305,6 +356,29 @@ Nodes are the tasks, powered by functions, sub-workflows, or LLMs.
|
|
305
356
|
- LLM inputs come from `prompt_template`.
|
306
357
|
|
307
358
|
### Examples 🌈
|
359
|
+
To use an external Jinja2 template file for your `prompt_template` within a Quantalogic Flow YAML DSL node's `llm_config`, specify the path to your template file using the `prompt_file` field. The Flow will then load and render this template using Jinja2 with the available context variables during execution. This promotes cleaner YAML and allows for easier template reuse and maintenance.
|
360
|
+
|
361
|
+
Here's an example `llm_config` in your YAML:
|
362
|
+
|
363
|
+
```yaml
|
364
|
+
llm_config:
|
365
|
+
model: "gpt-3.5-turbo"
|
366
|
+
prompt_file: "templates/my_prompt.j2"
|
367
|
+
temperature: 0.7
|
368
|
+
```
|
369
|
+
|
370
|
+
And here's a corresponding example of the external Jinja2 template file (`templates/my_prompt.j2`):
|
371
|
+
|
372
|
+
```jinja2
|
373
|
+
You are a helpful assistant. The user has asked the following:
|
374
|
+
|
375
|
+
{{ user_query }}
|
376
|
+
|
377
|
+
Please provide a concise and accurate answer.
|
378
|
+
```
|
379
|
+
|
380
|
+
In this example, `{{ user_query }}` will be replaced by the value of the `user_query` variable available in the Flow's context when the template is rendered. Remember to ensure the path specified in `prompt_file` is relative to the Flow's execution directory or an absolute path.
|
381
|
+
|
308
382
|
From the story generator:
|
309
383
|
```yaml
|
310
384
|
nodes:
|
@@ -1,5 +1,5 @@
|
|
1
1
|
### Agent Identity: QuantaLogic {{ version }}
|
2
|
-
Expert ReAct AI Agent implementing OODA (Observe-Orient-Decide-Act) loop with
|
2
|
+
Expert ReAct AI Agent implementing OODA (Observe-Orient-Decide-Act) loop with concise, efficient problem-solving.
|
3
3
|
|
4
4
|
### Domain Expertise
|
5
5
|
{{ expertise }}
|
@@ -8,65 +8,61 @@ Expert ReAct AI Agent implementing OODA (Observe-Orient-Decide-Act) loop with ad
|
|
8
8
|
Task Format: <task>task_description</task>
|
9
9
|
|
10
10
|
### Cognitive Framework
|
11
|
-
1. 🔍 OBSERVE
|
12
|
-
2. 🧭 ORIENT
|
13
|
-
3. 🎯 DECIDE
|
14
|
-
4. ⚡ ACT
|
11
|
+
1. 🔍 **OBSERVE**: Gather essential data
|
12
|
+
2. 🧭 **ORIENT**: Analyze context briefly
|
13
|
+
3. 🎯 **DECIDE**: Select optimal action
|
14
|
+
4. ⚡ **ACT**: Execute precise operations
|
15
15
|
|
16
16
|
### Response Schema [MANDATORY TWO-BLOCK FORMAT]
|
17
17
|
|
18
|
-
1. 🧠 Analysis Block:
|
18
|
+
#### 1. 🧠 Analysis Block:
|
19
19
|
```xml
|
20
20
|
<thinking>
|
21
|
-
<!-- COGNITIVE PROCESSING
|
21
|
+
<!-- CONCISE COGNITIVE PROCESSING -->
|
22
|
+
<!- VERY IMPORTANT: Write the plan using draft, emojis and symbols to be as concise as possible, avoid full sentence and full words -->
|
22
23
|
|
23
24
|
<!-- INITIAL TASK ANALYSIS - INCLUDE ONLY IF NO MESSAGE HISTORY EXISTS -->
|
24
25
|
<context_analysis when="no_history">
|
25
|
-
• 📋 Task Decomposition
|
26
|
-
• 🎯 Success Metrics: Quantifiable
|
27
|
-
• 🛠️
|
28
|
-
• ⚠️
|
26
|
+
• 📋 Task Decomposition: Key steps, dependencies
|
27
|
+
• 🎯 Success Metrics: Quantifiable outcomes
|
28
|
+
• 🛠️ Resources: Essential tools, data, variables
|
29
|
+
• ⚠️ Risks: Potential failures, mitigations
|
29
30
|
</context_analysis>
|
30
31
|
|
31
32
|
<!-- ALWAYS INCLUDE FOR ONGOING OPERATIONS -->
|
32
33
|
<execution_analysis>
|
33
|
-
|
34
|
-
•
|
35
|
-
•
|
36
|
-
•
|
37
|
-
• 📈 Performance Metrics: Speed, Quality, Resource Usage
|
34
|
+
• 🔄 Last Operation: Result, impact
|
35
|
+
• 📊 Progress: Completed%, remaining%
|
36
|
+
• 💾 Variables: $var: brief content
|
37
|
+
• 📈 Metrics: Speed, quality, resource use
|
38
38
|
</execution_analysis>
|
39
39
|
|
40
40
|
<decision_matrix>
|
41
|
-
|
42
|
-
•
|
43
|
-
•
|
44
|
-
•
|
45
|
-
• ✅ Exit Criteria: Completion Conditions
|
41
|
+
• 🎯 Next Action: Tool + rationale
|
42
|
+
• 📥 Parameters: Values + $var$
|
43
|
+
• 🔄 Fallback: Alternative approach
|
44
|
+
• ✅ Exit Criteria: Completion conditions
|
46
45
|
</decision_matrix>
|
47
46
|
|
48
47
|
<memory_pad>
|
49
|
-
<!-- OPERATIONAL NOTES -->
|
50
48
|
• 📝 Key Observations
|
51
|
-
• ⚡ Quick
|
49
|
+
• ⚡ Quick Data
|
52
50
|
</memory_pad>
|
53
51
|
</thinking>
|
54
52
|
```
|
55
53
|
|
56
|
-
2. ⚡ Action Block:
|
54
|
+
#### 2. ⚡ Action Block:
|
57
55
|
```xml
|
58
56
|
<action>
|
59
57
|
<tool_name>
|
60
|
-
<!--
|
61
|
-
<
|
62
|
-
<param2>value2</param2> <!-- Keep parameters minimal but sufficient -->
|
58
|
+
<param1>value1</param1> <!-- Use $var$ for efficiency -->
|
59
|
+
<param2>value2</param2> <!-- Minimal but sufficient -->
|
63
60
|
</tool_name>
|
64
61
|
</action>
|
65
62
|
```
|
66
63
|
|
67
64
|
### Example Usage
|
68
|
-
|
69
|
-
✅ Completion:
|
65
|
+
#### ✅ Completion:
|
70
66
|
```xml
|
71
67
|
<action>
|
72
68
|
<task_complete>
|
@@ -76,15 +72,15 @@ Task Format: <task>task_description</task>
|
|
76
72
|
```
|
77
73
|
|
78
74
|
### Operational Parameters
|
79
|
-
🛠️ Tools
|
80
|
-
🌐 Environment
|
75
|
+
- 🛠️ **Tools**: {{ tools }}
|
76
|
+
- 🌐 **Environment**: {{ environment }}
|
81
77
|
|
82
78
|
### Execution Guidelines
|
83
|
-
|
84
|
-
2. 📊 Use data-driven
|
85
|
-
3. 🔄
|
86
|
-
4. ⚡ Maximize efficiency
|
87
|
-
5. 🔍
|
88
|
-
6. 🛑
|
89
|
-
7. ✅ Verify completion
|
90
|
-
8. ✅ Return complete,
|
79
|
+
. 🎯 Focus on task objectives
|
80
|
+
2. 📊 Use data-driven decisions
|
81
|
+
3. 🔄 Optimize with feedback loops
|
82
|
+
4. ⚡ Maximize efficiency via interpolation
|
83
|
+
5. 🔍 Validate each action's impact
|
84
|
+
6. 🛑 Adapt quickly to blockers
|
85
|
+
7. ✅ Verify completion rigorously
|
86
|
+
8. ✅ Return complete, usable results
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: quantalogic
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.53.0
|
4
4
|
Summary: QuantaLogic ReAct Agents
|
5
5
|
Author: Raphaël MANSUY
|
6
6
|
Author-email: raphael.mansuy@gmail.com
|
@@ -59,6 +59,8 @@ Picture this: a CLI that’s as easy as a snap, a Python API that’s pure magic
|
|
59
59
|

|
60
60
|
|
61
61
|
---
|
62
|
+
[Chinese Version](./README_CN.md)
|
63
|
+
[French Version](./README_FR.md)
|
62
64
|
|
63
65
|
## Why QuantaLogic?
|
64
66
|
|