quantalogic 0.52.1__py3-none-any.whl → 0.53.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
quantalogic/flow/flow.py CHANGED
@@ -2,22 +2,23 @@
2
2
  # /// script
3
3
  # requires-python = ">=3.12"
4
4
  # dependencies = [
5
- # "loguru",
6
- # "litellm",
7
- # "pydantic>=2.0",
8
- # "anyio",
9
- # "jinja2",
10
- # "instructor[litellm]" # Required for structured_llm_node
5
+ # "loguru>=0.7.2", # Logging utility
6
+ # "litellm>=1.0.0", # LLM integration
7
+ # "pydantic>=2.0.0", # Data validation and settings
8
+ # "anyio>=4.0.0", # Async utilities
9
+ # "jinja2>=3.1.0", # Templating engine
10
+ # "instructor[litellm]>=0.5.0" # Structured LLM output with litellm integration
11
11
  # ]
12
12
  # ///
13
13
 
14
14
  import asyncio
15
15
  from dataclasses import dataclass
16
16
  from enum import Enum
17
+ from pathlib import Path
17
18
  from typing import Any, Callable, Dict, List, Optional, Tuple, Type
18
19
 
19
20
  import instructor
20
- from jinja2 import Template
21
+ from jinja2 import Environment, FileSystemLoader, Template, TemplateNotFound
21
22
  from litellm import acompletion
22
23
  from loguru import logger
23
24
  from pydantic import BaseModel, ValidationError
@@ -348,13 +349,42 @@ class Nodes:
348
349
 
349
350
  return decorator
350
351
 
352
+ @staticmethod
353
+ def _load_prompt_from_file(prompt_file: str, context: Dict[str, Any]) -> str:
354
+ """Load and render a Jinja2 template from an external file."""
355
+ try:
356
+ file_path = Path(prompt_file).resolve()
357
+ directory = file_path.parent
358
+ filename = file_path.name
359
+ env = Environment(loader=FileSystemLoader(directory))
360
+ template = env.get_template(filename)
361
+ return template.render(**context)
362
+ except TemplateNotFound as e:
363
+ logger.error(f"Jinja2 template file '{prompt_file}' not found: {e}")
364
+ raise ValueError(f"Prompt file '{prompt_file}' not found")
365
+ except Exception as e:
366
+ logger.error(f"Error loading or rendering prompt file '{prompt_file}': {e}")
367
+ raise
368
+
369
+ @staticmethod
370
+ def _render_prompt(template: str, prompt_file: Optional[str], context: Dict[str, Any]) -> str:
371
+ """Render a prompt from either a template string or an external file."""
372
+ if prompt_file:
373
+ return Nodes._load_prompt_from_file(prompt_file, context)
374
+ try:
375
+ return Template(template).render(**context)
376
+ except Exception as e:
377
+ logger.error(f"Error rendering prompt template: {e}")
378
+ raise
379
+
351
380
  @classmethod
352
381
  def llm_node(
353
382
  cls,
354
383
  model: str,
355
384
  system_prompt: str,
356
- prompt_template: str,
357
385
  output: str,
386
+ prompt_template: str = "",
387
+ prompt_file: Optional[str] = None,
358
388
  temperature: float = 0.7,
359
389
  max_tokens: int = 2000,
360
390
  top_p: float = 1.0,
@@ -362,11 +392,11 @@ class Nodes:
362
392
  frequency_penalty: float = 0.0,
363
393
  **kwargs,
364
394
  ):
365
- """Decorator for creating LLM nodes with plain text output."""
395
+ """Decorator for creating LLM nodes with plain text output, supporting external prompt files."""
366
396
 
367
397
  def decorator(func: Callable) -> Callable:
368
398
  async def wrapped_func(**kwargs):
369
- prompt = cls._render_prompt(prompt_template, kwargs)
399
+ prompt = cls._render_prompt(prompt_template, prompt_file, kwargs)
370
400
  messages = [
371
401
  {"role": "system", "content": system_prompt},
372
402
  {"role": "user", "content": prompt},
@@ -409,9 +439,10 @@ class Nodes:
409
439
  cls,
410
440
  model: str,
411
441
  system_prompt: str,
412
- prompt_template: str,
413
- response_model: Type[BaseModel],
414
442
  output: str,
443
+ response_model: Type[BaseModel],
444
+ prompt_template: str = "",
445
+ prompt_file: Optional[str] = None,
415
446
  temperature: float = 0.7,
416
447
  max_tokens: int = 2000,
417
448
  top_p: float = 1.0,
@@ -419,7 +450,7 @@ class Nodes:
419
450
  frequency_penalty: float = 0.0,
420
451
  **kwargs,
421
452
  ):
422
- """Decorator for creating LLM nodes with structured output using instructor."""
453
+ """Decorator for creating LLM nodes with structured output using instructor, supporting external prompt files."""
423
454
  try:
424
455
  client = instructor.from_litellm(acompletion)
425
456
  except ImportError:
@@ -428,7 +459,7 @@ class Nodes:
428
459
 
429
460
  def decorator(func: Callable) -> Callable:
430
461
  async def wrapped_func(**kwargs):
431
- prompt = cls._render_prompt(prompt_template, kwargs)
462
+ prompt = cls._render_prompt(prompt_template, prompt_file, kwargs)
432
463
  messages = [
433
464
  {"role": "system", "content": system_prompt},
434
465
  {"role": "user", "content": prompt},
@@ -470,15 +501,6 @@ class Nodes:
470
501
 
471
502
  return decorator
472
503
 
473
- @staticmethod
474
- def _render_prompt(template: str, context: Dict[str, Any]) -> str:
475
- """Render a Jinja2 template with the given context."""
476
- try:
477
- return Template(template).render(**context)
478
- except Exception as e:
479
- logger.error(f"Error rendering prompt template: {e}")
480
- raise
481
-
482
504
 
483
505
  # Example workflow with observer integration and updated structured node
484
506
  async def example_workflow():
@@ -528,9 +550,9 @@ async def example_workflow():
528
550
  @Nodes.structured_llm_node(
529
551
  model="gemini/gemini-2.0-flash",
530
552
  system_prompt="You are an inventory checker. Respond with a JSON object containing 'order_id', 'items', and 'in_stock' (boolean).",
531
- prompt_template="Check if the following items are in stock: {{ items }}. Return the result in JSON format with 'order_id' set to '123'.",
532
- response_model=OrderDetails,
533
553
  output="inventory_status",
554
+ response_model=OrderDetails,
555
+ prompt_template="Check if the following items are in stock: {{ items }}. Return the result in JSON format with 'order_id' set to '123'.",
534
556
  )
535
557
  async def check_inventory(items: List[str]) -> OrderDetails:
536
558
  # This is a placeholder function that would normally call an LLM
@@ -594,4 +616,4 @@ async def example_workflow():
594
616
 
595
617
 
596
618
  if __name__ == "__main__":
597
- asyncio.run(example_workflow())
619
+ asyncio.run(example_workflow())
@@ -131,11 +131,11 @@ class WorkflowExtractor(ast.NodeVisitor):
131
131
  llm_config = {
132
132
  key: value
133
133
  for key, value in kwargs.items()
134
- if key
135
- in [
134
+ if key in [
136
135
  "model",
137
136
  "system_prompt",
138
137
  "prompt_template",
138
+ "prompt_file", # Added to support external Jinja2 files
139
139
  "temperature",
140
140
  "max_tokens",
141
141
  "top_p",
@@ -162,11 +162,11 @@ class WorkflowExtractor(ast.NodeVisitor):
162
162
  llm_config = {
163
163
  key: value
164
164
  for key, value in kwargs.items()
165
- if key
166
- in [
165
+ if key in [
167
166
  "model",
168
167
  "system_prompt",
169
168
  "prompt_template",
169
+ "prompt_file", # Added to support external Jinja2 files
170
170
  "temperature",
171
171
  "max_tokens",
172
172
  "top_p",
@@ -250,11 +250,11 @@ class WorkflowExtractor(ast.NodeVisitor):
250
250
  llm_config = {
251
251
  key: value
252
252
  for key, value in kwargs.items()
253
- if key
254
- in [
253
+ if key in [
255
254
  "model",
256
255
  "system_prompt",
257
256
  "prompt_template",
257
+ "prompt_file", # Added to support external Jinja2 files
258
258
  "temperature",
259
259
  "max_tokens",
260
260
  "top_p",
@@ -281,11 +281,11 @@ class WorkflowExtractor(ast.NodeVisitor):
281
281
  llm_config = {
282
282
  key: value
283
283
  for key, value in kwargs.items()
284
- if key
285
- in [
284
+ if key in [
286
285
  "model",
287
286
  "system_prompt",
288
287
  "prompt_template",
288
+ "prompt_file", # Added to support external Jinja2 files
289
289
  "temperature",
290
290
  "max_tokens",
291
291
  "top_p",
@@ -510,9 +510,6 @@ def extract_workflow_from_file(file_path):
510
510
  return workflow_def, extractor.global_vars
511
511
 
512
512
 
513
- # The generate_executable_script function has been moved to flow_generator.py
514
-
515
-
516
513
  def print_workflow_definition(workflow_def):
517
514
  """
518
515
  Utility function to print a WorkflowDefinition in a human-readable format.
@@ -541,6 +538,8 @@ def print_workflow_definition(workflow_def):
541
538
  print(" Type: LLM")
542
539
  print(f" Model: {node.llm_config.model}")
543
540
  print(f" Prompt Template: {node.llm_config.prompt_template}")
541
+ if node.llm_config.prompt_file: # Added to display external prompt file if present
542
+ print(f" Prompt File: {node.llm_config.prompt_file}")
544
543
  elif node.sub_workflow:
545
544
  print(" Type: Sub-Workflow")
546
545
  print(f" Start Node: {node.sub_workflow.start}")
@@ -601,4 +600,4 @@ def main():
601
600
 
602
601
 
603
602
  if __name__ == "__main__":
604
- main()
603
+ main()
@@ -1,9 +1,18 @@
1
+ import ast
1
2
  import os
3
+ import re
4
+ from typing import Dict, Optional
2
5
 
6
+ from quantalogic.flow.flow import Nodes # Import Nodes to access NODE_REGISTRY
3
7
  from quantalogic.flow.flow_manager_schema import WorkflowDefinition
4
8
 
5
9
 
6
- def generate_executable_script(workflow_def: WorkflowDefinition, global_vars: dict, output_file: str) -> None:
10
+ def generate_executable_script(
11
+ workflow_def: WorkflowDefinition,
12
+ global_vars: Dict[str, object],
13
+ output_file: str,
14
+ initial_context: Optional[Dict[str, object]] = None,
15
+ ) -> None:
7
16
  """
8
17
  Generate an executable Python script from a WorkflowDefinition with global variables.
9
18
 
@@ -11,17 +20,74 @@ def generate_executable_script(workflow_def: WorkflowDefinition, global_vars: di
11
20
  workflow_def: The WorkflowDefinition object containing the workflow details.
12
21
  global_vars: Dictionary of global variables extracted from the source file.
13
22
  output_file: The path where the executable script will be written.
23
+ initial_context: Optional initial context; if None, inferred from the workflow.
14
24
 
15
25
  The generated script includes:
16
26
  - A shebang using `uv run` for environment management.
17
27
  - Metadata specifying the required Python version and dependencies.
18
28
  - Global variables from the original script.
19
- - Embedded functions included directly in the script.
29
+ - Embedded functions included directly in the script with node registration.
20
30
  - Workflow instantiation using direct chaining syntax.
21
- - A default initial_context matching the example.
31
+ - A default initial_context inferred from the workflow with customization guidance.
22
32
  """
33
+ # Infer initial context if not provided
34
+ if initial_context is None:
35
+ initial_context = {}
36
+ start_node = workflow_def.workflow.start
37
+ if start_node and start_node in workflow_def.nodes:
38
+ node_def = workflow_def.nodes[start_node]
39
+ if node_def.function:
40
+ # Function node: Try NODE_REGISTRY first
41
+ if start_node in Nodes.NODE_REGISTRY:
42
+ inputs = Nodes.NODE_REGISTRY[start_node][1]
43
+ initial_context = {input_name: None for input_name in inputs}
44
+ # Fallback: Parse embedded function code
45
+ elif node_def.function in workflow_def.functions:
46
+ func_def = workflow_def.functions[node_def.function]
47
+ if func_def.type == "embedded" and func_def.code:
48
+ try:
49
+ tree = ast.parse(func_def.code)
50
+ for node in ast.walk(tree):
51
+ if isinstance(node, ast.AsyncFunctionDef) or isinstance(node, ast.FunctionDef):
52
+ inputs = [param.arg for param in node.args.args]
53
+ initial_context = {input_name: None for input_name in inputs}
54
+ break
55
+ except SyntaxError:
56
+ pass # If parsing fails, leave context empty
57
+ elif node_def.llm_config:
58
+ # LLM node: Parse prompt template for variables
59
+ prompt = node_def.llm_config.prompt_template or ""
60
+ input_vars = set(re.findall(r"{{\s*([^}]+?)\s*}}", prompt))
61
+ cleaned_inputs = {
62
+ re.split(r"\s*[\+\-\*/]\s*", var.strip())[0].strip()
63
+ for var in input_vars
64
+ if var.strip().isidentifier()
65
+ }
66
+ initial_context = {var: None for var in cleaned_inputs}
67
+ elif node_def.sub_workflow:
68
+ # Sub-workflow: Infer from sub-workflow's start node
69
+ sub_start = node_def.sub_workflow.start or f"{start_node}_start"
70
+ if sub_start in Nodes.NODE_REGISTRY:
71
+ inputs = Nodes.NODE_REGISTRY[sub_start][1]
72
+ initial_context = {input_name: None for input_name in inputs}
73
+ # Fallback: Check sub-workflow's start node function
74
+ elif sub_start in workflow_def.nodes:
75
+ sub_node_def = workflow_def.nodes[sub_start]
76
+ if sub_node_def.function in workflow_def.functions:
77
+ func_def = workflow_def.functions[sub_node_def.function]
78
+ if func_def.type == "embedded" and func_def.code:
79
+ try:
80
+ tree = ast.parse(func_def.code)
81
+ for node in ast.walk(tree):
82
+ if isinstance(node, ast.AsyncFunctionDef) or isinstance(node, ast.FunctionDef):
83
+ inputs = [param.arg for param in node.args.args]
84
+ initial_context = {input_name: None for input_name in inputs}
85
+ break
86
+ except SyntaxError:
87
+ pass
88
+
23
89
  with open(output_file, "w") as f:
24
- # Write the shebang and metadata
90
+ # Write the shebang and metadata (exact original style)
25
91
  f.write("#!/usr/bin/env -S uv run\n")
26
92
  f.write("# /// script\n")
27
93
  f.write('# requires-python = ">=3.12"\n')
@@ -32,65 +98,132 @@ def generate_executable_script(workflow_def: WorkflowDefinition, global_vars: di
32
98
  f.write('# "anyio",\n')
33
99
  f.write('# "quantalogic>=0.35",\n')
34
100
  f.write('# "jinja2",\n')
35
- f.write('# "instructor[litellm]",\n') # Kept for potential structured LLM support
101
+ f.write('# "instructor[litellm]",\n')
36
102
  f.write("# ]\n")
37
103
  f.write("# ///\n\n")
38
104
 
39
- # Write necessary imports
105
+ # Write necessary imports (matching original)
40
106
  f.write("import anyio\n")
41
107
  f.write("from typing import List\n")
42
108
  f.write("from loguru import logger\n")
43
109
  f.write("from quantalogic.flow import Nodes, Workflow\n\n")
44
110
 
45
- # Write global variables
111
+ # Write global variables (preserving original feature)
46
112
  for var_name, value in global_vars.items():
47
113
  f.write(f"{var_name} = {repr(value)}\n")
48
114
  f.write("\n")
49
115
 
50
- # Embed functions from workflow_def
116
+ # Embed functions from workflow_def without decorators
51
117
  for func_name, func_def in workflow_def.functions.items():
52
- if func_def.type == "embedded":
53
- if func_def.code is not None:
54
- f.write(func_def.code + "\n\n")
55
- else:
56
- f.write("\n\n")
57
-
58
- # Define workflow using chaining syntax
59
- f.write("# Define the workflow using simplified syntax with automatic node registration\n")
118
+ if func_def.type == "embedded" and func_def.code:
119
+ f.write(func_def.code + "\n\n")
120
+
121
+ # Register nodes explicitly with their intended names
122
+ f.write("# Register nodes with their workflow names\n")
123
+ for node_name, node_def in workflow_def.nodes.items():
124
+ if node_def.function and node_def.function in workflow_def.functions:
125
+ output = node_def.output or f"{node_name}_result"
126
+ f.write(f"Nodes.NODE_REGISTRY['{node_name}'] = (greet if '{node_name}' == 'start' else end, ")
127
+ # Extract inputs using ast parsing
128
+ func_def = workflow_def.functions[node_def.function]
129
+ inputs = []
130
+ if func_def.code:
131
+ try:
132
+ tree = ast.parse(func_def.code)
133
+ for node in ast.walk(tree):
134
+ if isinstance(node, ast.AsyncFunctionDef) or isinstance(node, ast.FunctionDef):
135
+ inputs = [param.arg for param in node.args.args]
136
+ break
137
+ except SyntaxError:
138
+ pass
139
+ f.write(f"{repr(inputs)}, {repr(output)})\n")
140
+
141
+ # Define workflow using chaining syntax (original style with enhancements)
142
+ f.write("\n# Define the workflow using simplified syntax with automatic node registration\n")
60
143
  f.write("workflow = (\n")
61
144
  f.write(f' Workflow("{workflow_def.workflow.start}")\n')
145
+ # Add all nodes explicitly
146
+ for node_name, node_def in workflow_def.nodes.items():
147
+ if node_def.sub_workflow:
148
+ sub_start = node_def.sub_workflow.start or f"{node_name}_start"
149
+ f.write(f' .add_sub_workflow("{node_name}", Workflow("{sub_start}"), ')
150
+ inputs = Nodes.NODE_REGISTRY.get(sub_start, ([], None))[0] if sub_start in Nodes.NODE_REGISTRY else []
151
+ f.write(f'inputs={{{", ".join(f"{k!r}: {k!r}" for k in inputs)}}}, ')
152
+ f.write(f'output="{node_def.output or f"{node_name}_result"}")\n')
153
+ else:
154
+ f.write(f' .node("{node_name}")\n')
155
+ # Add transitions (original style preserved)
62
156
  for trans in workflow_def.workflow.transitions:
63
- _from_node = trans.from_node
157
+ _from_node = trans.from_node # Original used `_from_node`
64
158
  to_node = trans.to_node
65
159
  condition = trans.condition or "None"
66
- if condition != "None":
67
- # Ensure condition is formatted as a lambda if not already
68
- if not condition.startswith("lambda ctx:"):
69
- condition = f"lambda ctx: {condition}"
70
- f.write(f' .then("{to_node}", condition={condition})\n')
71
- # Add observers if any exist in the workflow definition
160
+ if condition != "None" and not condition.startswith("lambda ctx:"):
161
+ condition = f"lambda ctx: {condition}"
162
+ if isinstance(to_node, str):
163
+ f.write(f' .then("{to_node}", condition={condition})\n')
164
+ else:
165
+ f.write(f' .parallel({", ".join(f"{n!r}" for n in to_node)})\n')
166
+ # Add observers (original feature)
72
167
  if hasattr(workflow_def, 'observers'):
73
168
  for observer in workflow_def.observers:
74
169
  f.write(f" .add_observer({observer})\n")
75
170
  f.write(")\n\n")
76
171
 
77
- # Main asynchronous function to run the workflow
172
+ # Main asynchronous function (updated with inferred context)
78
173
  f.write("async def main():\n")
79
174
  f.write(' """Main function to run the story generation workflow."""\n')
175
+ f.write(" # Customize initial_context as needed based on the workflow's nodes\n")
176
+ f.write(" # Inferred required inputs:\n")
177
+ inferred_inputs = list(initial_context.keys())
178
+ f.write(f" # {', '.join(inferred_inputs) if inferred_inputs else 'None detected'}\n")
80
179
  f.write(" initial_context = {\n")
81
- f.write(' "genre": "science fiction",\n')
82
- f.write(' "num_chapters": 3,\n')
83
- f.write(' "chapters": [],\n')
84
- f.write(' "completed_chapters": 0,\n')
85
- f.write(' "style": "descriptive"\n')
180
+ for key, value in initial_context.items():
181
+ f.write(f" {repr(key)}: {repr(value)},\n")
86
182
  f.write(" } # Customize initial_context as needed\n")
87
183
  f.write(" engine = workflow.build()\n")
88
184
  f.write(" result = await engine.run(initial_context)\n")
89
185
  f.write(' logger.info(f"Workflow result: {result}")\n\n')
90
186
 
91
- # Entry point to execute the main function
187
+ # Entry point (original style)
92
188
  f.write('if __name__ == "__main__":\n')
93
189
  f.write(" anyio.run(main)\n")
94
190
 
95
- # Set executable permissions (rwxr-xr-x)
191
+ # Set executable permissions (original feature)
96
192
  os.chmod(output_file, 0o755)
193
+
194
+
195
+ # Example usage (consistent with original structure)
196
+ if __name__ == "__main__":
197
+ from quantalogic.flow.flow_manager import WorkflowManager
198
+
199
+ # Create the workflow manager
200
+ manager = WorkflowManager()
201
+
202
+ # Define and add functions
203
+ manager.add_function(
204
+ name="greet",
205
+ type_="embedded",
206
+ code="async def greet(name): return f'Hello, {name}!'",
207
+ )
208
+ manager.add_function(
209
+ name="end",
210
+ type_="embedded",
211
+ code="async def end(greeting): return f'{greeting} Goodbye!'",
212
+ )
213
+
214
+ # Add nodes to the workflow
215
+ manager.add_node(name="start", function="greet", output="greeting")
216
+ manager.add_node(name="end", function="end", output="farewell")
217
+
218
+ # Set start node and transitions
219
+ manager.set_start_node("start")
220
+ manager.add_transition("start", "end")
221
+
222
+ # Get the WorkflowDefinition
223
+ wf_def = manager.workflow
224
+
225
+ # Define global variables
226
+ global_vars = {"MY_CONSTANT": 42}
227
+
228
+ # Generate the script with inferred context
229
+ generate_executable_script(wf_def, global_vars, "workflow_script.py")
@@ -2,6 +2,7 @@ import importlib
2
2
  import importlib.util
3
3
  import os
4
4
  import re
5
+ import subprocess
5
6
  import sys
6
7
  import tempfile
7
8
  import urllib
@@ -28,6 +29,34 @@ class WorkflowManager:
28
29
  def __init__(self, workflow: Optional[WorkflowDefinition] = None):
29
30
  """Initialize the WorkflowManager with an optional workflow definition."""
30
31
  self.workflow = workflow or WorkflowDefinition()
32
+ self._ensure_dependencies()
33
+
34
+ def _ensure_dependencies(self) -> None:
35
+ """Ensure all specified dependencies are installed or available."""
36
+ if not self.workflow.dependencies:
37
+ return
38
+
39
+ for dep in self.workflow.dependencies:
40
+ if dep.startswith("http://") or dep.startswith("https://"):
41
+ # Remote URL: handled by import_module_from_source later
42
+ logger.debug(f"Dependency '{dep}' is a remote URL, will be fetched during instantiation")
43
+ elif os.path.isfile(dep):
44
+ # Local file: handled by import_module_from_source later
45
+ logger.debug(f"Dependency '{dep}' is a local file, will be loaded during instantiation")
46
+ else:
47
+ # Assume PyPI package
48
+ try:
49
+ # Check if the module is already installed
50
+ module_name = dep.split(">")[0].split("<")[0].split("=")[0].strip()
51
+ importlib.import_module(module_name)
52
+ logger.debug(f"Dependency '{dep}' is already installed")
53
+ except ImportError:
54
+ logger.info(f"Installing dependency '{dep}' via pip")
55
+ try:
56
+ subprocess.check_call([sys.executable, "-m", "pip", "install", dep])
57
+ logger.debug(f"Successfully installed '{dep}'")
58
+ except subprocess.CalledProcessError as e:
59
+ raise ValueError(f"Failed to install dependency '{dep}': {e}")
31
60
 
32
61
  def add_node(
33
62
  self,
@@ -232,6 +261,9 @@ class WorkflowManager:
232
261
 
233
262
  def instantiate_workflow(self) -> Workflow:
234
263
  """Instantiates a Workflow object based on the definitions stored in the WorkflowManager."""
264
+ # Ensure dependencies are available before instantiation
265
+ self._ensure_dependencies()
266
+
235
267
  functions: Dict[str, Callable] = {}
236
268
  for func_name, func_def in self.workflow.functions.items():
237
269
  if func_def.type == "embedded":
@@ -317,15 +349,13 @@ class WorkflowManager:
317
349
  )(func)
318
350
  elif node_def.llm_config:
319
351
  llm_config = node_def.llm_config
320
- # Extract inputs from prompt_template using regex
321
- # Extract inputs from prompt_template using regex
322
- input_vars = set(re.findall(r"{{\s*([^}]+?)\s*}}", llm_config.prompt_template))
352
+ # Extract inputs from prompt_template if no prompt_file, otherwise assume inputs will be inferred at runtime
353
+ input_vars = set(re.findall(r"{{\s*([^}]+?)\s*}}", llm_config.prompt_template)) if not llm_config.prompt_file else set()
323
354
  cleaned_inputs = set()
324
355
  for input_var in input_vars:
325
356
  base_var = re.split(r"\s*[\+\-\*/]\s*", input_var.strip())[0].strip()
326
357
  if base_var.isidentifier():
327
358
  cleaned_inputs.add(base_var)
328
- # Convert set to list for type compatibility
329
359
  inputs_list: List[str] = list(cleaned_inputs)
330
360
 
331
361
  # Define a dummy function to be decorated
@@ -339,6 +369,7 @@ class WorkflowManager:
339
369
  model=llm_config.model,
340
370
  system_prompt=llm_config.system_prompt or "",
341
371
  prompt_template=llm_config.prompt_template,
372
+ prompt_file=llm_config.prompt_file, # Pass prompt_file if provided
342
373
  response_model=response_model,
343
374
  output=node_def.output or f"{node_name}_result",
344
375
  temperature=llm_config.temperature,
@@ -354,6 +385,7 @@ class WorkflowManager:
354
385
  model=llm_config.model,
355
386
  system_prompt=llm_config.system_prompt or "",
356
387
  prompt_template=llm_config.prompt_template,
388
+ prompt_file=llm_config.prompt_file, # Pass prompt_file if provided
357
389
  output=node_def.output or f"{node_name}_result",
358
390
  temperature=llm_config.temperature,
359
391
  max_tokens=llm_config.max_tokens or 2000,
@@ -397,6 +429,7 @@ class WorkflowManager:
397
429
  data = yaml.safe_load(f)
398
430
  try:
399
431
  self.workflow = WorkflowDefinition.model_validate(data)
432
+ self._ensure_dependencies() # Ensure dependencies after loading
400
433
  except ValidationError as e:
401
434
  raise ValueError(f"Invalid workflow YAML: {e}")
402
435
 
@@ -427,6 +460,7 @@ class WorkflowManager:
427
460
  def main():
428
461
  """Demonstrate usage of WorkflowManager with observer support."""
429
462
  manager = WorkflowManager()
463
+ manager.workflow.dependencies = ["requests>=2.28.0"] # Example dependency
430
464
  manager.add_function(
431
465
  name="greet",
432
466
  type_="embedded",
@@ -459,4 +493,4 @@ def main():
459
493
 
460
494
 
461
495
  if __name__ == "__main__":
462
- main()
496
+ main()
@@ -61,7 +61,10 @@ class LLMConfig(BaseModel):
61
61
  )
62
62
  system_prompt: Optional[str] = Field(None, description="System prompt defining the LLM's role or context.")
63
63
  prompt_template: str = Field(
64
- default="{{ input }}", description="Jinja2 template for the user prompt (e.g., 'Summarize {{ text }}')."
64
+ default="{{ input }}", description="Jinja2 template for the user prompt (e.g., 'Summarize {{ text }}'). Ignored if prompt_file is set."
65
+ )
66
+ prompt_file: Optional[str] = Field(
67
+ None, description="Path to an external Jinja2 template file (e.g., 'prompts/summary.j2'). Takes precedence over prompt_template if provided."
65
68
  )
66
69
  temperature: float = Field(
67
70
  default=0.7, ge=0.0, le=1.0, description="Controls randomness of LLM output (0.0 to 1.0)."
@@ -84,6 +87,15 @@ class LLMConfig(BaseModel):
84
87
  )
85
88
  api_key: Optional[str] = Field(None, description="Custom API key for the LLM provider, if required.")
86
89
 
90
+ @model_validator(mode="before")
91
+ @classmethod
92
+ def check_prompt_source(cls, data: Any) -> Any:
93
+ """Ensure prompt_file and prompt_template are used appropriately."""
94
+ prompt_file = data.get("prompt_file")
95
+ if prompt_file and not isinstance(prompt_file, str):
96
+ raise ValueError("prompt_file must be a string path to a Jinja2 template file")
97
+ return data
98
+
87
99
 
88
100
  class NodeDefinition(BaseModel):
89
101
  """
@@ -162,7 +174,15 @@ class WorkflowDefinition(BaseModel):
162
174
  observers: List[str] = Field(
163
175
  default_factory=list, description="List of observer function names to monitor workflow execution."
164
176
  )
177
+ dependencies: List[str] = Field(
178
+ default_factory=list,
179
+ description=(
180
+ "List of Python module dependencies required by the workflow. "
181
+ "Examples: PyPI packages ('requests>=2.28.0'), local paths ('/path/to/module.py'), "
182
+ "or remote URLs ('https://example.com/module.py'). Processed during workflow instantiation."
183
+ ),
184
+ )
165
185
 
166
186
 
167
187
  # Resolve forward reference for sub_workflow in NodeDefinition
168
- NodeDefinition.model_rebuild()
188
+ NodeDefinition.model_rebuild()