quantalogic 0.56.0__py3-none-any.whl → 0.57.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/flow/flow.py +257 -103
- quantalogic/flow/flow_extractor.py +6 -15
- quantalogic/flow/flow_generator.py +28 -32
- quantalogic/flow/flow_manager.py +17 -3
- quantalogic/flow/flow_manager_schema.py +53 -5
- quantalogic/flow/flow_mermaid.py +2 -2
- quantalogic/flow/flow_yaml.linkedin.md +31 -0
- quantalogic/flow/flow_yaml.md +74 -56
- quantalogic/flow/templates/prompt_check_inventory.j2 +1 -0
- quantalogic/flow/templates/system_check_inventory.j2 +1 -0
- {quantalogic-0.56.0.dist-info → quantalogic-0.57.0.dist-info}/METADATA +1 -1
- {quantalogic-0.56.0.dist-info → quantalogic-0.57.0.dist-info}/RECORD +15 -12
- {quantalogic-0.56.0.dist-info → quantalogic-0.57.0.dist-info}/LICENSE +0 -0
- {quantalogic-0.56.0.dist-info → quantalogic-0.57.0.dist-info}/WHEEL +0 -0
- {quantalogic-0.56.0.dist-info → quantalogic-0.57.0.dist-info}/entry_points.txt +0 -0
@@ -3,8 +3,8 @@ import os
|
|
3
3
|
|
4
4
|
from loguru import logger
|
5
5
|
|
6
|
-
from quantalogic.flow.flow_generator import generate_executable_script
|
7
|
-
from quantalogic.flow.flow_manager import WorkflowManager
|
6
|
+
from quantalogic.flow.flow_generator import generate_executable_script
|
7
|
+
from quantalogic.flow.flow_manager import WorkflowManager
|
8
8
|
from quantalogic.flow.flow_manager_schema import (
|
9
9
|
BranchCondition,
|
10
10
|
FunctionDefinition,
|
@@ -137,6 +137,7 @@ class WorkflowExtractor(ast.NodeVisitor):
|
|
137
137
|
if key in [
|
138
138
|
"model",
|
139
139
|
"system_prompt",
|
140
|
+
"system_prompt_file",
|
140
141
|
"prompt_template",
|
141
142
|
"prompt_file",
|
142
143
|
"temperature",
|
@@ -170,6 +171,7 @@ class WorkflowExtractor(ast.NodeVisitor):
|
|
170
171
|
if key in [
|
171
172
|
"model",
|
172
173
|
"system_prompt",
|
174
|
+
"system_prompt_file",
|
173
175
|
"prompt_template",
|
174
176
|
"prompt_file",
|
175
177
|
"temperature",
|
@@ -281,6 +283,7 @@ class WorkflowExtractor(ast.NodeVisitor):
|
|
281
283
|
if key in [
|
282
284
|
"model",
|
283
285
|
"system_prompt",
|
286
|
+
"system_prompt_file",
|
284
287
|
"prompt_template",
|
285
288
|
"prompt_file",
|
286
289
|
"temperature",
|
@@ -314,6 +317,7 @@ class WorkflowExtractor(ast.NodeVisitor):
|
|
314
317
|
if key in [
|
315
318
|
"model",
|
316
319
|
"system_prompt",
|
320
|
+
"system_prompt_file",
|
317
321
|
"prompt_template",
|
318
322
|
"prompt_file",
|
319
323
|
"temperature",
|
@@ -597,19 +601,6 @@ def extract_workflow_from_file(file_path):
|
|
597
601
|
parallel=False,
|
598
602
|
)
|
599
603
|
|
600
|
-
# Optional: Deduplicate transitions (uncomment if desired)
|
601
|
-
# seen = set()
|
602
|
-
# unique_transitions = []
|
603
|
-
# for t in extractor.transitions:
|
604
|
-
# key = (t.from_node, str(t.to_node), t.condition)
|
605
|
-
# if key not in seen:
|
606
|
-
# seen.add(key)
|
607
|
-
# unique_transitions.append(t)
|
608
|
-
# workflow_structure = WorkflowStructure(
|
609
|
-
# start=extractor.start_node,
|
610
|
-
# transitions=unique_transitions,
|
611
|
-
# convergence_nodes=extractor.convergence_nodes,
|
612
|
-
# )
|
613
604
|
workflow_structure = WorkflowStructure(
|
614
605
|
start=extractor.start_node,
|
615
606
|
transitions=extractor.transitions,
|
@@ -3,8 +3,8 @@ import os
|
|
3
3
|
import re
|
4
4
|
from typing import Dict, Optional
|
5
5
|
|
6
|
-
from quantalogic.flow.flow import Nodes
|
7
|
-
from quantalogic.flow.flow_manager_schema import BranchCondition, WorkflowDefinition
|
6
|
+
from quantalogic.flow.flow import Nodes
|
7
|
+
from quantalogic.flow.flow_manager_schema import BranchCondition, WorkflowDefinition
|
8
8
|
|
9
9
|
|
10
10
|
def generate_executable_script(
|
@@ -37,21 +37,19 @@ def generate_executable_script(
|
|
37
37
|
start_node = workflow_def.workflow.start
|
38
38
|
if start_node and start_node in workflow_def.nodes:
|
39
39
|
node_def = workflow_def.nodes[start_node]
|
40
|
-
if node_def.function:
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
#
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
except SyntaxError:
|
54
|
-
pass
|
40
|
+
if node_def.function and node_def.function in workflow_def.functions:
|
41
|
+
func_def = workflow_def.functions[node_def.function]
|
42
|
+
if func_def.type == "embedded" and func_def.code:
|
43
|
+
try:
|
44
|
+
tree = ast.parse(func_def.code)
|
45
|
+
for node in ast.walk(tree):
|
46
|
+
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
47
|
+
inputs = [param.arg for param in node.args.args]
|
48
|
+
for input_name in inputs:
|
49
|
+
initial_context[input_name] = "" # Default to empty string
|
50
|
+
break
|
51
|
+
except SyntaxError:
|
52
|
+
pass
|
55
53
|
elif node_def.llm_config:
|
56
54
|
prompt = node_def.llm_config.prompt_template or ""
|
57
55
|
input_vars = set(re.findall(r"{{\s*([^}]+?)\s*}}", prompt))
|
@@ -88,10 +86,9 @@ def generate_executable_script(
|
|
88
86
|
break
|
89
87
|
except SyntaxError:
|
90
88
|
pass
|
91
|
-
# Apply inputs_mapping if present
|
92
89
|
if node_def.inputs_mapping:
|
93
90
|
for key, value in node_def.inputs_mapping.items():
|
94
|
-
if not value.startswith("lambda ctx:"): #
|
91
|
+
if not value.startswith("lambda ctx:"): # Static mappings only
|
95
92
|
initial_context[value] = ""
|
96
93
|
|
97
94
|
with open(output_file, "w") as f:
|
@@ -126,20 +123,20 @@ def generate_executable_script(
|
|
126
123
|
if node_def.function and node_def.function in workflow_def.functions:
|
127
124
|
func_def = workflow_def.functions[node_def.function]
|
128
125
|
if func_def.type == "embedded" and func_def.code:
|
129
|
-
# Strip original decorator and apply new one
|
130
126
|
code_lines = func_def.code.split('\n')
|
131
|
-
func_body = ""
|
132
|
-
|
133
|
-
|
134
|
-
continue # Skip original decorator
|
135
|
-
func_body += line + "\n"
|
136
|
-
func_body = func_body.rstrip("\n")
|
137
|
-
|
138
|
-
# Generate new decorator based on node type
|
127
|
+
func_body = "".join(
|
128
|
+
line + "\n" for line in code_lines if not line.strip().startswith('@Nodes.')
|
129
|
+
).rstrip("\n")
|
139
130
|
decorator = ""
|
140
131
|
if node_def.llm_config:
|
141
|
-
params = [
|
142
|
-
if node_def.llm_config.
|
132
|
+
params = []
|
133
|
+
if node_def.llm_config.model.startswith("lambda ctx:"):
|
134
|
+
params.append(f"model={node_def.llm_config.model}")
|
135
|
+
else:
|
136
|
+
params.append(f"model={repr(node_def.llm_config.model)}")
|
137
|
+
if node_def.llm_config.system_prompt_file:
|
138
|
+
params.append(f"system_prompt_file={repr(node_def.llm_config.system_prompt_file)}")
|
139
|
+
elif node_def.llm_config.system_prompt:
|
143
140
|
params.append(f"system_prompt={repr(node_def.llm_config.system_prompt)}")
|
144
141
|
if node_def.llm_config.prompt_template:
|
145
142
|
params.append(f"prompt_template={repr(node_def.llm_config.prompt_template)}")
|
@@ -160,10 +157,9 @@ def generate_executable_script(
|
|
160
157
|
decorator = f"@Nodes.template_node({', '.join(params)})\n"
|
161
158
|
else:
|
162
159
|
decorator = f"@Nodes.define(output={repr(node_def.output or f'{node_name}_result')})\n"
|
163
|
-
# Write function with new decorator
|
164
160
|
f.write(f"{decorator}{func_body}\n\n")
|
165
161
|
|
166
|
-
# Define workflow using
|
162
|
+
# Define workflow using chaining syntax
|
167
163
|
f.write("# Define the workflow with branch and converge support\n")
|
168
164
|
f.write("workflow = (\n")
|
169
165
|
start_node = workflow_def.workflow.start
|
quantalogic/flow/flow_manager.py
CHANGED
@@ -326,7 +326,6 @@ class WorkflowManager:
|
|
326
326
|
if node_def.function not in functions:
|
327
327
|
raise ValueError(f"Function '{node_def.function}' for node '{node_name}' not found")
|
328
328
|
func = functions[node_def.function]
|
329
|
-
# Register with the node name, not the function name
|
330
329
|
Nodes.NODE_REGISTRY[node_name] = (
|
331
330
|
Nodes.define(output=node_def.output)(func),
|
332
331
|
["user_name"], # Explicitly define inputs based on function signature
|
@@ -345,11 +344,25 @@ class WorkflowManager:
|
|
345
344
|
async def dummy_func(**kwargs):
|
346
345
|
pass
|
347
346
|
|
347
|
+
# Handle callable model if specified in inputs_mapping, else use default
|
348
|
+
def model_callable(ctx):
|
349
|
+
return llm_config.model # Default to string from schema
|
350
|
+
if node_def.inputs_mapping and "model" in node_def.inputs_mapping:
|
351
|
+
model_value = node_def.inputs_mapping["model"]
|
352
|
+
if isinstance(model_value, str) and model_value.startswith("lambda ctx:"):
|
353
|
+
try:
|
354
|
+
model_callable = eval(model_value)
|
355
|
+
except Exception as e:
|
356
|
+
logger.warning(f"Failed to evaluate model lambda for {node_name}: {e}")
|
357
|
+
def model_callable(ctx):
|
358
|
+
return model_value
|
359
|
+
|
348
360
|
if llm_config.response_model:
|
349
361
|
response_model = self._resolve_model(llm_config.response_model)
|
350
362
|
decorated_func = Nodes.structured_llm_node(
|
351
|
-
model=
|
363
|
+
model=model_callable,
|
352
364
|
system_prompt=llm_config.system_prompt or "",
|
365
|
+
system_prompt_file=llm_config.system_prompt_file,
|
353
366
|
prompt_template=llm_config.prompt_template,
|
354
367
|
prompt_file=llm_config.prompt_file,
|
355
368
|
response_model=response_model,
|
@@ -363,8 +376,9 @@ class WorkflowManager:
|
|
363
376
|
)(dummy_func)
|
364
377
|
else:
|
365
378
|
decorated_func = Nodes.llm_node(
|
366
|
-
model=
|
379
|
+
model=model_callable,
|
367
380
|
system_prompt=llm_config.system_prompt or "",
|
381
|
+
system_prompt_file=llm_config.system_prompt_file,
|
368
382
|
prompt_template=llm_config.prompt_template,
|
369
383
|
prompt_file=llm_config.prompt_file,
|
370
384
|
output=node_def.output or f"{node_name}_result",
|
@@ -30,7 +30,17 @@ class FunctionDefinition(BaseModel):
|
|
30
30
|
@model_validator(mode="before")
|
31
31
|
@classmethod
|
32
32
|
def check_function_source(cls, data: Any) -> Any:
|
33
|
-
"""Ensure the function definition is valid based on its type.
|
33
|
+
"""Ensure the function definition is valid based on its type.
|
34
|
+
|
35
|
+
Args:
|
36
|
+
data: Raw data to validate.
|
37
|
+
|
38
|
+
Returns:
|
39
|
+
Validated data.
|
40
|
+
|
41
|
+
Raises:
|
42
|
+
ValueError: If the function source configuration is invalid.
|
43
|
+
"""
|
34
44
|
type_ = data.get("type")
|
35
45
|
if type_ == "embedded":
|
36
46
|
if not data.get("code"):
|
@@ -50,9 +60,17 @@ class FunctionDefinition(BaseModel):
|
|
50
60
|
class LLMConfig(BaseModel):
|
51
61
|
"""Configuration for LLM-based nodes."""
|
52
62
|
model: str = Field(
|
53
|
-
default="gpt-3.5-turbo",
|
63
|
+
default="gpt-3.5-turbo",
|
64
|
+
description=(
|
65
|
+
"The LLM model to use. Can be a static model name (e.g., 'gpt-3.5-turbo', 'gemini/gemini-2.0-flash') "
|
66
|
+
"or a lambda expression (e.g., 'lambda ctx: ctx.get(\"model_name\")') for dynamic selection."
|
67
|
+
),
|
54
68
|
)
|
55
69
|
system_prompt: Optional[str] = Field(None, description="System prompt defining the LLM's role or context.")
|
70
|
+
system_prompt_file: Optional[str] = Field(
|
71
|
+
None,
|
72
|
+
description="Path to an external Jinja2 template file for the system prompt. Takes precedence over system_prompt."
|
73
|
+
)
|
56
74
|
prompt_template: str = Field(
|
57
75
|
default="{{ input }}", description="Jinja2 template for the user prompt. Ignored if prompt_file is set."
|
58
76
|
)
|
@@ -80,7 +98,17 @@ class LLMConfig(BaseModel):
|
|
80
98
|
@model_validator(mode="before")
|
81
99
|
@classmethod
|
82
100
|
def check_prompt_source(cls, data: Any) -> Any:
|
83
|
-
"""Ensure prompt_file and prompt_template are used appropriately.
|
101
|
+
"""Ensure prompt_file and prompt_template are used appropriately.
|
102
|
+
|
103
|
+
Args:
|
104
|
+
data: Raw data to validate.
|
105
|
+
|
106
|
+
Returns:
|
107
|
+
Validated data.
|
108
|
+
|
109
|
+
Raises:
|
110
|
+
ValueError: If prompt configuration is invalid.
|
111
|
+
"""
|
84
112
|
prompt_file = data.get("prompt_file")
|
85
113
|
if prompt_file and not isinstance(prompt_file, str):
|
86
114
|
raise ValueError("prompt_file must be a string path to a Jinja2 template file")
|
@@ -99,7 +127,17 @@ class TemplateConfig(BaseModel):
|
|
99
127
|
@model_validator(mode="before")
|
100
128
|
@classmethod
|
101
129
|
def check_template_source(cls, data: Any) -> Any:
|
102
|
-
"""Ensure template_file and template are used appropriately.
|
130
|
+
"""Ensure template_file and template are used appropriately.
|
131
|
+
|
132
|
+
Args:
|
133
|
+
data: Raw data to validate.
|
134
|
+
|
135
|
+
Returns:
|
136
|
+
Validated data.
|
137
|
+
|
138
|
+
Raises:
|
139
|
+
ValueError: If template configuration is invalid.
|
140
|
+
"""
|
103
141
|
template_file = data.get("template_file")
|
104
142
|
template = data.get("template")
|
105
143
|
if not template and not template_file:
|
@@ -137,7 +175,17 @@ class NodeDefinition(BaseModel):
|
|
137
175
|
@model_validator(mode="before")
|
138
176
|
@classmethod
|
139
177
|
def check_function_or_sub_workflow_or_llm_or_template(cls, data: Any) -> Any:
|
140
|
-
"""Ensure a node has exactly one of 'function', 'sub_workflow', 'llm_config', or 'template_config'.
|
178
|
+
"""Ensure a node has exactly one of 'function', 'sub_workflow', 'llm_config', or 'template_config'.
|
179
|
+
|
180
|
+
Args:
|
181
|
+
data: Raw data to validate.
|
182
|
+
|
183
|
+
Returns:
|
184
|
+
Validated data.
|
185
|
+
|
186
|
+
Raises:
|
187
|
+
ValueError: If node type configuration is invalid.
|
188
|
+
"""
|
141
189
|
func = data.get("function")
|
142
190
|
sub_wf = data.get("sub_workflow")
|
143
191
|
llm = data.get("llm_config")
|
quantalogic/flow/flow_mermaid.py
CHANGED
@@ -87,7 +87,7 @@ def generate_mermaid_diagram(
|
|
87
87
|
"function": "fill:#90CAF9,stroke:#42A5F5,stroke-width:2px", # Pastel Blue
|
88
88
|
"structured_llm": "fill:#A5D6A7,stroke:#66BB6A,stroke-width:2px", # Pastel Green
|
89
89
|
"llm": "fill:#CE93D8,stroke:#AB47BC,stroke-width:2px", # Pastel Purple
|
90
|
-
"template": "fill:#FCE4EC,stroke:#F06292,stroke-width:2px", # Pastel Pink
|
90
|
+
"template": "fill:#FCE4EC,stroke:#F06292,stroke-width:2px", # Pastel Pink
|
91
91
|
"sub_workflow": "fill:#FFCCBC,stroke:#FF7043,stroke-width:2px", # Pastel Orange
|
92
92
|
"unknown": "fill:#CFD8DC,stroke:#B0BEC5,stroke-width:2px" # Pastel Grey
|
93
93
|
}
|
@@ -148,7 +148,7 @@ def generate_mermaid_diagram(
|
|
148
148
|
if title:
|
149
149
|
mermaid_code += f" %% Diagram: {title}\n"
|
150
150
|
|
151
|
-
# Optional legend for UX
|
151
|
+
# Optional legend for UX
|
152
152
|
if include_legend:
|
153
153
|
mermaid_code += " %% Legend:\n"
|
154
154
|
if diagram_type == "flowchart":
|
@@ -0,0 +1,31 @@
|
|
1
|
+
Feeling like you're duct-taping AI components instead of building real solutions? 😩 We've ALL been there.
|
2
|
+
|
3
|
+
Introducing **QuantaLogic Flow** 🧩: your new (free & open-source!) workflow architect.
|
4
|
+
|
5
|
+
Think of it as an AI LEGO set:
|
6
|
+
|
7
|
+
✅ Build pipelines blazingly fast.
|
8
|
+
✅ Configure complex workflows in simple YAML.
|
9
|
+
✅ Get LLMs collaborating effectively.
|
10
|
+
✅ Automate tasks from paper analysis to story generation.
|
11
|
+
|
12
|
+
Why are engineers loving it?
|
13
|
+
|
14
|
+
⚡️ Branching logic that *doesn't* induce stress.
|
15
|
+
⚡️ Validation that catches errors early.
|
16
|
+
⚡️ Python ↔️ YAML conversion that feels magical.
|
17
|
+
⚡️ Input mapping so clean, Marie Kondo would approve.
|
18
|
+
|
19
|
+
So, what workflow automation feature would save *you* the most time?
|
20
|
+
|
21
|
+
1️⃣ Dynamic LLM content?
|
22
|
+
2️⃣ Visual branching?
|
23
|
+
3️⃣ Declarative YAML config?
|
24
|
+
|
25
|
+
Comment below with your pick! I'll share pro tips for the most requested. 👇
|
26
|
+
|
27
|
+
P.S. Know a dev drowning in manual pipeline work? Share this post and be their hero!
|
28
|
+
|
29
|
+
Link in comments.
|
30
|
+
|
31
|
+
#AIEngineering #WorkflowAutomation
|