quantalogic 0.61.3__py3-none-any.whl → 0.80__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/agent.py +0 -1
- quantalogic/codeact/TODO.md +14 -0
- quantalogic/codeact/agent.py +400 -421
- quantalogic/codeact/cli.py +42 -224
- quantalogic/codeact/cli_commands/__init__.py +0 -0
- quantalogic/codeact/cli_commands/create_toolbox.py +45 -0
- quantalogic/codeact/cli_commands/install_toolbox.py +20 -0
- quantalogic/codeact/cli_commands/list_executor.py +15 -0
- quantalogic/codeact/cli_commands/list_reasoners.py +15 -0
- quantalogic/codeact/cli_commands/list_toolboxes.py +47 -0
- quantalogic/codeact/cli_commands/task.py +215 -0
- quantalogic/codeact/cli_commands/tool_info.py +24 -0
- quantalogic/codeact/cli_commands/uninstall_toolbox.py +43 -0
- quantalogic/codeact/config.yaml +21 -0
- quantalogic/codeact/constants.py +1 -1
- quantalogic/codeact/events.py +12 -5
- quantalogic/codeact/examples/README.md +342 -0
- quantalogic/codeact/examples/agent_sample.yaml +29 -0
- quantalogic/codeact/executor.py +186 -0
- quantalogic/codeact/history_manager.py +94 -0
- quantalogic/codeact/llm_util.py +3 -22
- quantalogic/codeact/plugin_manager.py +92 -0
- quantalogic/codeact/prompts/generate_action.j2 +65 -14
- quantalogic/codeact/prompts/generate_program.j2 +32 -19
- quantalogic/codeact/react_agent.py +318 -0
- quantalogic/codeact/reasoner.py +185 -0
- quantalogic/codeact/templates/toolbox/README.md.j2 +10 -0
- quantalogic/codeact/templates/toolbox/pyproject.toml.j2 +16 -0
- quantalogic/codeact/templates/toolbox/tools.py.j2 +6 -0
- quantalogic/codeact/templates.py +7 -0
- quantalogic/codeact/tools_manager.py +242 -119
- quantalogic/codeact/utils.py +16 -89
- quantalogic/codeact/xml_utils.py +126 -0
- quantalogic/flow/flow.py +151 -41
- quantalogic/flow/flow_extractor.py +61 -1
- quantalogic/flow/flow_generator.py +34 -6
- quantalogic/flow/flow_manager.py +64 -25
- quantalogic/flow/flow_manager_schema.py +32 -0
- quantalogic/tools/action_gen.py +1 -1
- quantalogic/tools/tool.py +531 -109
- {quantalogic-0.61.3.dist-info → quantalogic-0.80.dist-info}/METADATA +3 -3
- {quantalogic-0.61.3.dist-info → quantalogic-0.80.dist-info}/RECORD +45 -22
- {quantalogic-0.61.3.dist-info → quantalogic-0.80.dist-info}/WHEEL +1 -1
- quantalogic-0.80.dist-info/entry_points.txt +3 -0
- quantalogic-0.61.3.dist-info/entry_points.txt +0 -6
- {quantalogic-0.61.3.dist-info → quantalogic-0.80.dist-info}/LICENSE +0 -0
quantalogic/flow/flow.py
CHANGED
@@ -17,7 +17,7 @@ import os
|
|
17
17
|
from dataclasses import dataclass
|
18
18
|
from enum import Enum
|
19
19
|
from pathlib import Path
|
20
|
-
from typing import Any, Callable, Dict, List, Optional, Tuple, Type
|
20
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
|
21
21
|
|
22
22
|
import instructor
|
23
23
|
from jinja2 import Environment, FileSystemLoader, Template, TemplateNotFound
|
@@ -253,6 +253,10 @@ class Workflow:
|
|
253
253
|
self._observers: List[WorkflowObserver] = []
|
254
254
|
self._register_node(start_node)
|
255
255
|
self.current_node = start_node
|
256
|
+
# Loop-specific attributes
|
257
|
+
self.in_loop = False
|
258
|
+
self.loop_nodes = []
|
259
|
+
self.loop_entry_node = None
|
256
260
|
|
257
261
|
def _register_node(self, name: str):
|
258
262
|
"""Register a node without modifying the current node."""
|
@@ -274,6 +278,8 @@ class Workflow:
|
|
274
278
|
Self for method chaining.
|
275
279
|
"""
|
276
280
|
self._register_node(name)
|
281
|
+
if self.in_loop:
|
282
|
+
self.loop_nodes.append(name)
|
277
283
|
if inputs_mapping:
|
278
284
|
self.node_input_mappings[name] = inputs_mapping
|
279
285
|
logger.debug(f"Added inputs mapping for node {name}: {inputs_mapping}")
|
@@ -422,6 +428,66 @@ class Workflow:
|
|
422
428
|
logger.debug(f"Added sub-workflow {name} with inputs {inputs} and output {output}")
|
423
429
|
return self
|
424
430
|
|
431
|
+
def start_loop(self):
|
432
|
+
"""Begin defining a loop in the workflow.
|
433
|
+
|
434
|
+
Raises:
|
435
|
+
ValueError: If called without a current node.
|
436
|
+
|
437
|
+
Returns:
|
438
|
+
Self for method chaining.
|
439
|
+
"""
|
440
|
+
if self.current_node is None:
|
441
|
+
raise ValueError("Cannot start loop without a current node")
|
442
|
+
self.loop_entry_node = self.current_node
|
443
|
+
self.in_loop = True
|
444
|
+
self.loop_nodes = []
|
445
|
+
return self
|
446
|
+
|
447
|
+
def end_loop(self, condition: Callable[[Dict[str, Any]], bool], next_node: str):
|
448
|
+
"""End the loop, setting up transitions based on the condition.
|
449
|
+
|
450
|
+
Args:
|
451
|
+
condition: Callable taking context and returning True when the loop should exit.
|
452
|
+
next_node: Name of the node to transition to after the loop exits.
|
453
|
+
|
454
|
+
Raises:
|
455
|
+
ValueError: If no loop nodes are defined.
|
456
|
+
|
457
|
+
Returns:
|
458
|
+
Self for method chaining.
|
459
|
+
"""
|
460
|
+
if not self.in_loop or not self.loop_nodes:
|
461
|
+
raise ValueError("No loop nodes defined")
|
462
|
+
|
463
|
+
first_node = self.loop_nodes[0]
|
464
|
+
last_node = self.loop_nodes[-1]
|
465
|
+
|
466
|
+
# Transition from the node before the loop to the first loop node
|
467
|
+
self.transitions.setdefault(self.loop_entry_node, []).append((first_node, None))
|
468
|
+
|
469
|
+
# Transitions within the loop
|
470
|
+
for i in range(len(self.loop_nodes) - 1):
|
471
|
+
self.transitions.setdefault(self.loop_nodes[i], []).append((self.loop_nodes[i + 1], None))
|
472
|
+
|
473
|
+
# Conditional transitions from the last loop node
|
474
|
+
# If condition is False, loop back to the first node
|
475
|
+
self.transitions.setdefault(last_node, []).append((first_node, lambda ctx: not condition(ctx)))
|
476
|
+
# If condition is True, exit to the next node
|
477
|
+
self.transitions.setdefault(last_node, []).append((next_node, condition))
|
478
|
+
|
479
|
+
# Register the next_node if not already present
|
480
|
+
if next_node not in self.nodes:
|
481
|
+
self._register_node(next_node)
|
482
|
+
|
483
|
+
# Update state
|
484
|
+
self.current_node = next_node
|
485
|
+
self.in_loop = False
|
486
|
+
self.loop_nodes = []
|
487
|
+
self.loop_entry_node = None
|
488
|
+
|
489
|
+
return self
|
490
|
+
|
425
491
|
def build(self, parent_engine: Optional["WorkflowEngine"] = None) -> WorkflowEngine:
|
426
492
|
"""Build and return a WorkflowEngine instance with registered observers.
|
427
493
|
|
@@ -575,7 +641,7 @@ class Nodes:
|
|
575
641
|
top_p: float = 1.0,
|
576
642
|
presence_penalty: float = 0.0,
|
577
643
|
frequency_penalty: float = 0.0,
|
578
|
-
model: Callable[[Dict[str, Any]], str] = lambda ctx: "gpt-3.5-turbo",
|
644
|
+
model: Union[Callable[[Dict[str, Any]], str], str] = lambda ctx: "gpt-3.5-turbo",
|
579
645
|
**kwargs,
|
580
646
|
):
|
581
647
|
"""Decorator for creating LLM nodes with plain text output, supporting dynamic parameters.
|
@@ -598,27 +664,45 @@ class Nodes:
|
|
598
664
|
Decorator function wrapping the LLM logic.
|
599
665
|
"""
|
600
666
|
def decorator(func: Callable) -> Callable:
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
667
|
+
# Store all decorator parameters in a config dictionary
|
668
|
+
config = {
|
669
|
+
"system_prompt": system_prompt,
|
670
|
+
"system_prompt_file": system_prompt_file,
|
671
|
+
"prompt_template": prompt_template,
|
672
|
+
"prompt_file": prompt_file,
|
673
|
+
"temperature": temperature,
|
674
|
+
"max_tokens": max_tokens,
|
675
|
+
"top_p": top_p,
|
676
|
+
"presence_penalty": presence_penalty,
|
677
|
+
"frequency_penalty": frequency_penalty,
|
678
|
+
"model": model,
|
679
|
+
**kwargs,
|
680
|
+
}
|
681
|
+
|
682
|
+
async def wrapped_func(**func_kwargs):
|
683
|
+
# Use func_kwargs to override config values if provided, otherwise use config defaults
|
684
|
+
system_prompt_to_use = func_kwargs.pop("system_prompt", config["system_prompt"])
|
685
|
+
system_prompt_file_to_use = func_kwargs.pop("system_prompt_file", config["system_prompt_file"])
|
686
|
+
prompt_template_to_use = func_kwargs.pop("prompt_template", config["prompt_template"])
|
687
|
+
prompt_file_to_use = func_kwargs.pop("prompt_file", config["prompt_file"])
|
688
|
+
temperature_to_use = func_kwargs.pop("temperature", config["temperature"])
|
689
|
+
max_tokens_to_use = func_kwargs.pop("max_tokens", config["max_tokens"])
|
690
|
+
top_p_to_use = func_kwargs.pop("top_p", config["top_p"])
|
691
|
+
presence_penalty_to_use = func_kwargs.pop("presence_penalty", config["presence_penalty"])
|
692
|
+
frequency_penalty_to_use = func_kwargs.pop("frequency_penalty", config["frequency_penalty"])
|
693
|
+
model_to_use = func_kwargs.pop("model", config["model"])
|
694
|
+
|
695
|
+
# Handle callable model parameter
|
696
|
+
if callable(model_to_use):
|
697
|
+
model_to_use = model_to_use(func_kwargs)
|
698
|
+
|
699
|
+
# Load system prompt from file if specified
|
605
700
|
if system_prompt_file_to_use:
|
606
701
|
system_content = cls._load_prompt_from_file(system_prompt_file_to_use, func_kwargs)
|
607
702
|
else:
|
608
703
|
system_content = system_prompt_to_use
|
609
|
-
|
610
|
-
prompt_template_to_use = func_kwargs.pop("prompt_template", prompt_template)
|
611
|
-
prompt_file_to_use = func_kwargs.pop("prompt_file", prompt_file)
|
612
|
-
temperature_to_use = func_kwargs.pop("temperature", temperature)
|
613
|
-
max_tokens_to_use = func_kwargs.pop("max_tokens", max_tokens)
|
614
|
-
top_p_to_use = func_kwargs.pop("top_p", top_p)
|
615
|
-
presence_penalty_to_use = func_kwargs.pop("presence_penalty", presence_penalty)
|
616
|
-
frequency_penalty_to_use = func_kwargs.pop("frequency_penalty", frequency_penalty)
|
617
|
-
|
618
|
-
# Prioritize model from func_kwargs (workflow mapping), then model_param, then default
|
619
|
-
model_to_use = func_kwargs.get("model", model_param if model_param is not None else model(func_kwargs))
|
620
|
-
logger.debug(f"Selected model for {func.__name__}: {model_to_use}")
|
621
704
|
|
705
|
+
# Prepare template variables and render prompt
|
622
706
|
sig = inspect.signature(func)
|
623
707
|
template_vars = {k: v for k, v in func_kwargs.items() if k in sig.parameters}
|
624
708
|
prompt = cls._render_template(prompt_template_to_use, prompt_file_to_use, template_vars)
|
@@ -626,12 +710,14 @@ class Nodes:
|
|
626
710
|
{"role": "system", "content": system_content},
|
627
711
|
{"role": "user", "content": prompt},
|
628
712
|
]
|
629
|
-
|
713
|
+
|
714
|
+
# Logging for debugging
|
630
715
|
truncated_prompt = prompt[:200] + "..." if len(prompt) > 200 else prompt
|
631
716
|
logger.info(f"LLM node {func.__name__} using model: {model_to_use}")
|
632
717
|
logger.debug(f"System prompt: {system_content[:100]}...")
|
633
718
|
logger.debug(f"User prompt preview: {truncated_prompt}")
|
634
|
-
|
719
|
+
|
720
|
+
# Call the acompletion function with the resolved model
|
635
721
|
try:
|
636
722
|
response = await acompletion(
|
637
723
|
model=model_to_use,
|
@@ -656,8 +742,10 @@ class Nodes:
|
|
656
742
|
except Exception as e:
|
657
743
|
logger.error(f"Error in LLM node {func.__name__}: {e}")
|
658
744
|
raise
|
745
|
+
|
746
|
+
# Register the node with its inputs and output
|
659
747
|
sig = inspect.signature(func)
|
660
|
-
inputs = [
|
748
|
+
inputs = [param.name for param in sig.parameters.values()]
|
661
749
|
logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
|
662
750
|
cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
|
663
751
|
return wrapped_func
|
@@ -677,7 +765,7 @@ class Nodes:
|
|
677
765
|
top_p: float = 1.0,
|
678
766
|
presence_penalty: float = 0.0,
|
679
767
|
frequency_penalty: float = 0.0,
|
680
|
-
model: Callable[[Dict[str, Any]], str] = lambda ctx: "gpt-3.5-turbo",
|
768
|
+
model: Union[Callable[[Dict[str, Any]], str], str] = lambda ctx: "gpt-3.5-turbo",
|
681
769
|
**kwargs,
|
682
770
|
):
|
683
771
|
"""Decorator for creating LLM nodes with structured output, supporting dynamic parameters.
|
@@ -707,27 +795,45 @@ class Nodes:
|
|
707
795
|
raise ImportError("Instructor is required for structured_llm_node")
|
708
796
|
|
709
797
|
def decorator(func: Callable) -> Callable:
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
|
798
|
+
# Store all decorator parameters in a config dictionary
|
799
|
+
config = {
|
800
|
+
"system_prompt": system_prompt,
|
801
|
+
"system_prompt_file": system_prompt_file,
|
802
|
+
"prompt_template": prompt_template,
|
803
|
+
"prompt_file": prompt_file,
|
804
|
+
"temperature": temperature,
|
805
|
+
"max_tokens": max_tokens,
|
806
|
+
"top_p": top_p,
|
807
|
+
"presence_penalty": presence_penalty,
|
808
|
+
"frequency_penalty": frequency_penalty,
|
809
|
+
"model": model,
|
810
|
+
**kwargs,
|
811
|
+
}
|
812
|
+
|
813
|
+
async def wrapped_func(**func_kwargs):
|
814
|
+
# Resolve parameters, prioritizing func_kwargs over config defaults
|
815
|
+
system_prompt_to_use = func_kwargs.pop("system_prompt", config["system_prompt"])
|
816
|
+
system_prompt_file_to_use = func_kwargs.pop("system_prompt_file", config["system_prompt_file"])
|
817
|
+
prompt_template_to_use = func_kwargs.pop("prompt_template", config["prompt_template"])
|
818
|
+
prompt_file_to_use = func_kwargs.pop("prompt_file", config["prompt_file"])
|
819
|
+
temperature_to_use = func_kwargs.pop("temperature", config["temperature"])
|
820
|
+
max_tokens_to_use = func_kwargs.pop("max_tokens", config["max_tokens"])
|
821
|
+
top_p_to_use = func_kwargs.pop("top_p", config["top_p"])
|
822
|
+
presence_penalty_to_use = func_kwargs.pop("presence_penalty", config["presence_penalty"])
|
823
|
+
frequency_penalty_to_use = func_kwargs.pop("frequency_penalty", config["frequency_penalty"])
|
824
|
+
model_to_use = func_kwargs.pop("model", config["model"])
|
825
|
+
|
826
|
+
# Handle callable model parameter
|
827
|
+
if callable(model_to_use):
|
828
|
+
model_to_use = model_to_use(func_kwargs)
|
829
|
+
|
830
|
+
# Load system prompt from file if specified
|
714
831
|
if system_prompt_file_to_use:
|
715
832
|
system_content = cls._load_prompt_from_file(system_prompt_file_to_use, func_kwargs)
|
716
833
|
else:
|
717
834
|
system_content = system_prompt_to_use
|
718
|
-
|
719
|
-
prompt_template_to_use = func_kwargs.pop("prompt_template", prompt_template)
|
720
|
-
prompt_file_to_use = func_kwargs.pop("prompt_file", prompt_file)
|
721
|
-
temperature_to_use = func_kwargs.pop("temperature", temperature)
|
722
|
-
max_tokens_to_use = func_kwargs.pop("max_tokens", max_tokens)
|
723
|
-
top_p_to_use = func_kwargs.pop("top_p", top_p)
|
724
|
-
presence_penalty_to_use = func_kwargs.pop("presence_penalty", presence_penalty)
|
725
|
-
frequency_penalty_to_use = func_kwargs.pop("frequency_penalty", frequency_penalty)
|
726
|
-
|
727
|
-
# Prioritize model from func_kwargs (workflow mapping), then model_param, then default
|
728
|
-
model_to_use = func_kwargs.get("model", model_param if model_param is not None else model(func_kwargs))
|
729
|
-
logger.debug(f"Selected model for {func.__name__}: {model_to_use}")
|
730
835
|
|
836
|
+
# Render prompt using template variables
|
731
837
|
sig = inspect.signature(func)
|
732
838
|
template_vars = {k: v for k, v in func_kwargs.items() if k in sig.parameters}
|
733
839
|
prompt = cls._render_template(prompt_template_to_use, prompt_file_to_use, template_vars)
|
@@ -735,13 +841,15 @@ class Nodes:
|
|
735
841
|
{"role": "system", "content": system_content},
|
736
842
|
{"role": "user", "content": prompt},
|
737
843
|
]
|
738
|
-
|
844
|
+
|
845
|
+
# Logging for debugging
|
739
846
|
truncated_prompt = prompt[:200] + "..." if len(prompt) > 200 else prompt
|
740
847
|
logger.info(f"Structured LLM node {func.__name__} using model: {model_to_use}")
|
741
848
|
logger.debug(f"System prompt: {system_content[:100]}...")
|
742
849
|
logger.debug(f"User prompt preview: {truncated_prompt}")
|
743
850
|
logger.debug(f"Expected response model: {response_model.__name__}")
|
744
|
-
|
851
|
+
|
852
|
+
# Generate structured response
|
745
853
|
try:
|
746
854
|
structured_response, raw_response = await client.chat.completions.create_with_completion(
|
747
855
|
model=model_to_use,
|
@@ -769,8 +877,10 @@ class Nodes:
|
|
769
877
|
except Exception as e:
|
770
878
|
logger.error(f"Error in structured LLM node {func.__name__}: {e}")
|
771
879
|
raise
|
880
|
+
|
881
|
+
# Register the node
|
772
882
|
sig = inspect.signature(func)
|
773
|
-
inputs = [
|
883
|
+
inputs = [param.name for param in sig.parameters.values()]
|
774
884
|
logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
|
775
885
|
cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
|
776
886
|
return wrapped_func
|
@@ -21,7 +21,7 @@ class WorkflowExtractor(ast.NodeVisitor):
|
|
21
21
|
AST visitor to extract workflow nodes and structure from a Python file.
|
22
22
|
|
23
23
|
This class parses Python source code to identify workflow components defined with Nodes decorators
|
24
|
-
and Workflow construction, including branch and
|
24
|
+
and Workflow construction, including branch, converge, and loop patterns, building a WorkflowDefinition
|
25
25
|
compatible with WorkflowManager. Fully supports input mappings and template nodes.
|
26
26
|
"""
|
27
27
|
|
@@ -34,6 +34,10 @@ class WorkflowExtractor(ast.NodeVisitor):
|
|
34
34
|
self.global_vars = {} # Tracks global variable assignments (e.g., DEFAULT_LLM_PARAMS)
|
35
35
|
self.observers = [] # List of observer function names
|
36
36
|
self.convergence_nodes = [] # List of convergence nodes
|
37
|
+
# Added for loop support
|
38
|
+
self.in_loop = False # Flag indicating if we're inside a loop
|
39
|
+
self.loop_nodes = [] # List of nodes within the current loop
|
40
|
+
self.loop_entry_node = None # Node before the loop starts
|
37
41
|
|
38
42
|
def visit_Module(self, node):
|
39
43
|
"""Log and explicitly process top-level statements in the module."""
|
@@ -468,6 +472,10 @@ class WorkflowExtractor(ast.NodeVisitor):
|
|
468
472
|
if previous_node:
|
469
473
|
self.transitions.append(TransitionDefinition(from_node=previous_node, to_node=node_name))
|
470
474
|
logger.debug(f"Added node transition: {previous_node} -> {node_name}")
|
475
|
+
# Add node to loop_nodes if inside a loop
|
476
|
+
if self.in_loop:
|
477
|
+
self.loop_nodes.append(node_name)
|
478
|
+
logger.debug(f"Added '{node_name}' to loop_nodes in '{var_name}'")
|
471
479
|
return node_name
|
472
480
|
|
473
481
|
elif method_name == "add_sub_workflow":
|
@@ -519,6 +527,58 @@ class WorkflowExtractor(ast.NodeVisitor):
|
|
519
527
|
logger.warning(f"Unsupported observer argument in 'add_observer' for '{var_name}'")
|
520
528
|
return previous_node
|
521
529
|
|
530
|
+
elif method_name == "start_loop":
|
531
|
+
if previous_node is None:
|
532
|
+
logger.warning(f"start_loop called without a previous node in '{var_name}'")
|
533
|
+
return None
|
534
|
+
self.in_loop = True
|
535
|
+
self.loop_entry_node = previous_node
|
536
|
+
self.loop_nodes = []
|
537
|
+
logger.debug(f"Started loop after node '{previous_node}' in '{var_name}'")
|
538
|
+
return previous_node
|
539
|
+
|
540
|
+
elif method_name == "end_loop":
|
541
|
+
cond = None
|
542
|
+
next_node = None
|
543
|
+
for keyword in expr.keywords:
|
544
|
+
if keyword.arg == "condition":
|
545
|
+
cond = ast.unparse(keyword.value)
|
546
|
+
elif keyword.arg == "next_node":
|
547
|
+
next_node = (keyword.value.value
|
548
|
+
if isinstance(keyword.value, ast.Constant)
|
549
|
+
else ast.unparse(keyword.value))
|
550
|
+
if not cond or not next_node:
|
551
|
+
logger.warning(f"end_loop in '{var_name}' missing condition or next_node")
|
552
|
+
return None
|
553
|
+
if not self.loop_nodes:
|
554
|
+
logger.warning(f"end_loop called without loop nodes in '{var_name}'")
|
555
|
+
return None
|
556
|
+
first_loop_node = self.loop_nodes[0]
|
557
|
+
last_loop_node = self.loop_nodes[-1]
|
558
|
+
# Loop-back transition: last node to first node when condition is false
|
559
|
+
negated_cond = f"not ({cond})"
|
560
|
+
self.transitions.append(
|
561
|
+
TransitionDefinition(
|
562
|
+
from_node=last_loop_node,
|
563
|
+
to_node=first_loop_node,
|
564
|
+
condition=negated_cond
|
565
|
+
)
|
566
|
+
)
|
567
|
+
# Exit transition: last node to next_node when condition is true
|
568
|
+
self.transitions.append(
|
569
|
+
TransitionDefinition(
|
570
|
+
from_node=last_loop_node,
|
571
|
+
to_node=next_node,
|
572
|
+
condition=cond
|
573
|
+
)
|
574
|
+
)
|
575
|
+
logger.debug(f"Added loop transitions: '{last_loop_node}' -> '{first_loop_node}' "
|
576
|
+
f"(not {cond}), '{last_loop_node}' -> '{next_node}' ({cond})")
|
577
|
+
self.in_loop = False
|
578
|
+
self.loop_nodes = []
|
579
|
+
self.loop_entry_node = None
|
580
|
+
return next_node
|
581
|
+
|
522
582
|
else:
|
523
583
|
logger.warning(f"Unsupported Workflow method '{method_name}' in variable '{var_name}'")
|
524
584
|
return None
|
@@ -3,7 +3,6 @@ import os
|
|
3
3
|
import re
|
4
4
|
from typing import Dict, Optional
|
5
5
|
|
6
|
-
from quantalogic.flow.flow import Nodes
|
7
6
|
from quantalogic.flow.flow_manager_schema import BranchCondition, WorkflowDefinition
|
8
7
|
|
9
8
|
|
@@ -27,7 +26,7 @@ def generate_executable_script(
|
|
27
26
|
- Metadata specifying the required Python version and dependencies.
|
28
27
|
- Global variables from the original script.
|
29
28
|
- Functions defined with appropriate Nodes decorators (e.g., @Nodes.define, @Nodes.llm_node).
|
30
|
-
- Workflow instantiation using direct chaining syntax with function names, including branch and
|
29
|
+
- Workflow instantiation using direct chaining syntax with function names, including branch, converge, and loop support.
|
31
30
|
- Support for input mappings and template nodes via workflow configuration and decorators.
|
32
31
|
- A default initial_context inferred from the workflow with customization guidance.
|
33
32
|
"""
|
@@ -91,6 +90,22 @@ def generate_executable_script(
|
|
91
90
|
if not value.startswith("lambda ctx:"): # Static mappings only
|
92
91
|
initial_context[value] = ""
|
93
92
|
|
93
|
+
# Detect loops
|
94
|
+
loop_nodes = []
|
95
|
+
loop_condition = None
|
96
|
+
loop_exit_node = None
|
97
|
+
for trans in workflow_def.workflow.transitions:
|
98
|
+
if isinstance(trans.to_node, str) and trans.condition:
|
99
|
+
# Check for loop-back transition
|
100
|
+
if any(t.from_node == trans.to_node and t.to_node == trans.from_node for t in workflow_def.workflow.transitions):
|
101
|
+
loop_nodes.append(trans.from_node)
|
102
|
+
loop_nodes.append(trans.to_node)
|
103
|
+
loop_condition = trans.condition
|
104
|
+
# Check for exit transition
|
105
|
+
elif loop_nodes and trans.from_node == loop_nodes[-1] and f"not ({loop_condition})" in trans.condition:
|
106
|
+
loop_exit_node = trans.to_node
|
107
|
+
loop_nodes = list(dict.fromkeys(loop_nodes)) # Remove duplicates, preserve order
|
108
|
+
|
94
109
|
with open(output_file, "w") as f:
|
95
110
|
# Shebang and metadata
|
96
111
|
f.write("#!/usr/bin/env -S uv run\n")
|
@@ -159,15 +174,20 @@ def generate_executable_script(
|
|
159
174
|
decorator = f"@Nodes.define(output={repr(node_def.output or f'{node_name}_result')})\n"
|
160
175
|
f.write(f"{decorator}{func_body}\n\n")
|
161
176
|
|
162
|
-
# Define workflow using chaining syntax
|
163
|
-
f.write("# Define the workflow with branch and
|
177
|
+
# Define workflow using chaining syntax with loop support
|
178
|
+
f.write("# Define the workflow with branch, converge, and loop support\n")
|
164
179
|
f.write("workflow = (\n")
|
165
180
|
start_node = workflow_def.workflow.start
|
166
181
|
start_func = workflow_def.nodes[start_node].function if start_node in workflow_def.nodes and workflow_def.nodes[start_node].function else start_node
|
167
182
|
f.write(f' Workflow("{start_func}")\n')
|
168
183
|
|
184
|
+
added_nodes = set()
|
169
185
|
for node_name, node_def in workflow_def.nodes.items():
|
186
|
+
if node_name in added_nodes:
|
187
|
+
continue
|
170
188
|
func_name = node_def.function if node_def.function else node_name
|
189
|
+
if loop_nodes and node_name == loop_nodes[0]:
|
190
|
+
f.write(" .start_loop()\n")
|
171
191
|
if node_def.sub_workflow:
|
172
192
|
sub_start = node_def.sub_workflow.start or f"{node_name}_start"
|
173
193
|
sub_start_func = workflow_def.nodes[sub_start].function if sub_start in workflow_def.nodes and workflow_def.nodes[sub_start].function else sub_start
|
@@ -208,15 +228,23 @@ def generate_executable_script(
|
|
208
228
|
f.write(f' .node("{func_name}", inputs_mapping={inputs_mapping_str})\n')
|
209
229
|
else:
|
210
230
|
f.write(f' .node("{func_name}")\n')
|
231
|
+
added_nodes.add(node_name)
|
211
232
|
|
212
233
|
for trans in workflow_def.workflow.transitions:
|
213
234
|
from_node = trans.from_node
|
214
|
-
|
235
|
+
_from_func = workflow_def.nodes[from_node].function if from_node in workflow_def.nodes and workflow_def.nodes[from_node].function else from_node
|
236
|
+
if from_node not in added_nodes:
|
237
|
+
continue # Skip if already added via .node()
|
215
238
|
to_node = trans.to_node
|
216
239
|
if isinstance(to_node, str):
|
240
|
+
if loop_nodes and from_node in loop_nodes and to_node in loop_nodes:
|
241
|
+
continue # Skip loop-back transition, handled by end_loop
|
217
242
|
to_func = workflow_def.nodes[to_node].function if to_node in workflow_def.nodes and workflow_def.nodes[to_node].function else to_node
|
218
243
|
condition = f"lambda ctx: {trans.condition}" if trans.condition else "None"
|
219
|
-
|
244
|
+
if loop_nodes and from_node == loop_nodes[-1] and to_node == loop_exit_node:
|
245
|
+
f.write(f' .end_loop(condition=lambda ctx: {loop_condition}, next_node="{to_func}")\n')
|
246
|
+
else:
|
247
|
+
f.write(f' .then("{to_func}", condition={condition})\n')
|
220
248
|
elif all(isinstance(tn, str) for tn in to_node):
|
221
249
|
to_funcs = [workflow_def.nodes[tn].function if tn in workflow_def.nodes and workflow_def.nodes[tn].function else tn for tn in to_node]
|
222
250
|
f.write(f' .parallel({", ".join(f"{n!r}" for n in to_funcs)})\n')
|
quantalogic/flow/flow_manager.py
CHANGED
@@ -195,6 +195,27 @@ class WorkflowManager:
|
|
195
195
|
)
|
196
196
|
self.workflow.workflow.transitions.append(transition)
|
197
197
|
|
198
|
+
def add_loop(self, loop_nodes: List[str], condition: str, exit_node: str) -> None:
|
199
|
+
"""Add a loop construct to the workflow.
|
200
|
+
|
201
|
+
Args:
|
202
|
+
loop_nodes: List of node names to execute in the loop.
|
203
|
+
condition: Python expression using 'ctx' that, when True, keeps the loop running.
|
204
|
+
exit_node: Node to transition to when the loop condition is False.
|
205
|
+
"""
|
206
|
+
if not loop_nodes:
|
207
|
+
raise ValueError("Loop must contain at least one node")
|
208
|
+
for node in loop_nodes + [exit_node]:
|
209
|
+
if node not in self.workflow.nodes:
|
210
|
+
raise ValueError(f"Node '{node}' does not exist")
|
211
|
+
# Add transitions between loop nodes
|
212
|
+
for i in range(len(loop_nodes) - 1):
|
213
|
+
self.add_transition(from_node=loop_nodes[i], to_node=loop_nodes[i + 1])
|
214
|
+
# Add loop-back transition
|
215
|
+
self.add_transition(from_node=loop_nodes[-1], to_node=loop_nodes[0], condition=condition)
|
216
|
+
# Add exit transition
|
217
|
+
self.add_transition(from_node=loop_nodes[-1], to_node=exit_node, condition=f"not ({condition})")
|
218
|
+
|
198
219
|
def set_start_node(self, name: str) -> None:
|
199
220
|
"""Set the start node of the workflow."""
|
200
221
|
if name not in self.workflow.nodes:
|
@@ -288,7 +309,7 @@ class WorkflowManager:
|
|
288
309
|
)
|
289
310
|
|
290
311
|
def instantiate_workflow(self) -> Workflow:
|
291
|
-
"""Instantiate a Workflow object with full support for template_node and
|
312
|
+
"""Instantiate a Workflow object with full support for template_node, inputs_mapping, and loops."""
|
292
313
|
self._ensure_dependencies()
|
293
314
|
|
294
315
|
functions: Dict[str, Callable] = {}
|
@@ -471,10 +492,30 @@ class WorkflowManager:
|
|
471
492
|
else:
|
472
493
|
wf.node(node_name, inputs_mapping=inputs_mapping if inputs_mapping else None)
|
473
494
|
|
495
|
+
# Detect loops by finding cycles with conditions
|
496
|
+
loop_nodes = []
|
497
|
+
loop_condition = None
|
498
|
+
loop_exit_node = None
|
499
|
+
for trans in self.workflow.workflow.transitions:
|
500
|
+
if isinstance(trans.to_node, str) and trans.condition:
|
501
|
+
if trans.to_node in loop_nodes and trans.from_node in loop_nodes:
|
502
|
+
continue # Already identified as part of loop
|
503
|
+
if any(t.from_node == trans.to_node and t.to_node == trans.from_node for t in self.workflow.workflow.transitions):
|
504
|
+
# Found a potential loop
|
505
|
+
loop_nodes.append(trans.from_node)
|
506
|
+
loop_nodes.append(trans.to_node)
|
507
|
+
loop_condition = trans.condition
|
508
|
+
elif trans.from_node in loop_nodes:
|
509
|
+
# Check for exit transition
|
510
|
+
if f"not ({loop_condition})" in trans.condition:
|
511
|
+
loop_exit_node = trans.to_node
|
512
|
+
|
474
513
|
added_nodes = set()
|
475
514
|
for trans in self.workflow.workflow.transitions:
|
476
515
|
from_node = trans.from_node
|
477
516
|
if from_node not in added_nodes and from_node not in sub_workflows:
|
517
|
+
if loop_nodes and from_node == loop_nodes[0]: # Start of loop
|
518
|
+
wf.start_loop()
|
478
519
|
wf.node(from_node)
|
479
520
|
added_nodes.add(from_node)
|
480
521
|
if isinstance(trans.to_node, str):
|
@@ -483,7 +524,10 @@ class WorkflowManager:
|
|
483
524
|
if to_nodes[0] not in added_nodes and to_nodes[0] not in sub_workflows:
|
484
525
|
wf.node(to_nodes[0])
|
485
526
|
added_nodes.add(to_nodes[0])
|
486
|
-
|
527
|
+
if loop_nodes and to_nodes[0] == loop_exit_node and loop_condition: # End of loop
|
528
|
+
wf.end_loop(condition=eval(f"lambda ctx: {loop_condition}"), next_node=to_nodes[0])
|
529
|
+
else:
|
530
|
+
wf.then(to_nodes[0], condition=condition)
|
487
531
|
elif all(isinstance(tn, str) for tn in trans.to_node):
|
488
532
|
to_nodes = trans.to_node
|
489
533
|
for to_node in to_nodes:
|
@@ -544,7 +588,7 @@ class WorkflowManager:
|
|
544
588
|
|
545
589
|
|
546
590
|
async def test_workflow():
|
547
|
-
"""Test the workflow execution."""
|
591
|
+
"""Test the workflow execution with a loop."""
|
548
592
|
manager = WorkflowManager()
|
549
593
|
manager.workflow.dependencies = ["requests>=2.28.0"]
|
550
594
|
manager.add_function(
|
@@ -553,9 +597,14 @@ async def test_workflow():
|
|
553
597
|
code="def greet(user_name): return f'Hello, {user_name}!'",
|
554
598
|
)
|
555
599
|
manager.add_function(
|
556
|
-
name="
|
600
|
+
name="check_length",
|
557
601
|
type_="embedded",
|
558
|
-
code="def
|
602
|
+
code="def check_length(user_name): return len(user_name) < 5",
|
603
|
+
)
|
604
|
+
manager.add_function(
|
605
|
+
name="append_char",
|
606
|
+
type_="embedded",
|
607
|
+
code="def append_char(user_name): return user_name + 'x'",
|
559
608
|
)
|
560
609
|
manager.add_function(
|
561
610
|
name="farewell",
|
@@ -578,19 +627,16 @@ async def test_workflow():
|
|
578
627
|
inputs_mapping={"user_name": "name_input"},
|
579
628
|
)
|
580
629
|
manager.add_node(
|
581
|
-
name="
|
582
|
-
|
583
|
-
inputs_mapping={"user_name": "name_input", "date": "lambda ctx: '2025-03-06'"},
|
584
|
-
)
|
585
|
-
manager.add_node(
|
586
|
-
name="branch_true",
|
587
|
-
function="check_condition",
|
630
|
+
name="check",
|
631
|
+
function="check_length",
|
588
632
|
inputs_mapping={"user_name": "name_input"},
|
633
|
+
output="continue_loop"
|
589
634
|
)
|
590
635
|
manager.add_node(
|
591
|
-
name="
|
592
|
-
function="
|
636
|
+
name="append",
|
637
|
+
function="append_char",
|
593
638
|
inputs_mapping={"user_name": "name_input"},
|
639
|
+
output="name_input"
|
594
640
|
)
|
595
641
|
manager.add_node(
|
596
642
|
name="end",
|
@@ -598,18 +644,11 @@ async def test_workflow():
|
|
598
644
|
inputs_mapping={"user_name": "name_input"},
|
599
645
|
)
|
600
646
|
manager.set_start_node("start")
|
601
|
-
manager.
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
manager.add_transition(
|
606
|
-
from_node="format_greeting",
|
607
|
-
to_node=[
|
608
|
-
BranchCondition(to_node="branch_true", condition="ctx.get('user_name') == 'Alice'"),
|
609
|
-
BranchCondition(to_node="branch_false", condition="ctx.get('user_name') != 'Alice'")
|
610
|
-
]
|
647
|
+
manager.add_loop(
|
648
|
+
loop_nodes=["start", "check", "append"],
|
649
|
+
condition="ctx.get('continue_loop', False)",
|
650
|
+
exit_node="end"
|
611
651
|
)
|
612
|
-
manager.add_convergence_node("end")
|
613
652
|
manager.add_observer("monitor")
|
614
653
|
manager.save_to_yaml("workflow.yaml")
|
615
654
|
|