quantalogic 0.53.0__py3-none-any.whl → 0.56.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
quantalogic/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
1
  """QuantaLogic package initialization."""
2
2
 
3
3
  import warnings
4
+ from importlib.metadata import version as get_version
4
5
 
5
6
  # Suppress specific warnings related to Pydantic's V2 configuration changes
6
7
  warnings.filterwarnings(
@@ -10,6 +11,12 @@ warnings.filterwarnings(
10
11
  message=".*config keys have changed in V2:.*|.*'fields' config key is removed in V2.*",
11
12
  )
12
13
 
14
+ try:
15
+ __version__: str = get_version("quantalogic")
16
+ except Exception as e:
17
+ __version__ = "unknown"
18
+ print(f"Unable to retrieve version: {e}")
19
+
13
20
  # Import public API
14
21
  from .agent import Agent # noqa: E402
15
22
  from .console_print_events import console_print_events # noqa: E402
quantalogic/flow/flow.py CHANGED
@@ -7,11 +7,12 @@
7
7
  # "pydantic>=2.0.0", # Data validation and settings
8
8
  # "anyio>=4.0.0", # Async utilities
9
9
  # "jinja2>=3.1.0", # Templating engine
10
- # "instructor[litellm]>=0.5.0" # Structured LLM output with litellm integration
10
+ # "instructor" # Structured LLM output with litellm integration
11
11
  # ]
12
12
  # ///
13
13
 
14
14
  import asyncio
15
+ import inspect # Added for accurate parameter detection
15
16
  from dataclasses import dataclass
16
17
  from enum import Enum
17
18
  from pathlib import Path
@@ -52,17 +53,24 @@ class WorkflowEvent:
52
53
  WorkflowObserver = Callable[[WorkflowEvent], None]
53
54
 
54
55
 
55
- # Define a class for sub-workflow nodes
56
+ # Define a class for sub-workflow nodes with updated inputs handling
56
57
  class SubWorkflowNode:
57
- def __init__(self, sub_workflow: "Workflow", inputs: Dict[str, str], output: str):
58
- """Initialize a sub-workflow node."""
58
+ def __init__(self, sub_workflow: "Workflow", inputs: Dict[str, Any], output: str):
59
+ """Initialize a sub-workflow node with flexible inputs mapping."""
59
60
  self.sub_workflow = sub_workflow
60
- self.inputs = inputs
61
+ self.inputs = inputs # Maps sub_key to main_key, callable, or value
61
62
  self.output = output
62
63
 
63
- async def __call__(self, engine: "WorkflowEngine", **kwargs):
64
- """Execute the sub-workflow with the engine's context."""
65
- sub_context = {sub_key: kwargs[main_key] for main_key, sub_key in self.inputs.items()}
64
+ async def __call__(self, engine: "WorkflowEngine"):
65
+ """Execute the sub-workflow with the engine's context using inputs mapping."""
66
+ sub_context = {}
67
+ for sub_key, mapping in self.inputs.items():
68
+ if callable(mapping):
69
+ sub_context[sub_key] = mapping(engine.context)
70
+ elif isinstance(mapping, str):
71
+ sub_context[sub_key] = engine.context.get(mapping)
72
+ else:
73
+ sub_context[sub_key] = mapping # Direct value
66
74
  sub_engine = self.sub_workflow.build(parent_engine=engine)
67
75
  result = await sub_engine.run(sub_context)
68
76
  return result.get(self.output)
@@ -132,7 +140,22 @@ class WorkflowEngine:
132
140
  )
133
141
  break
134
142
 
135
- inputs = {k: self.context[k] for k in self.workflow.node_inputs[current_node] if k in self.context}
143
+ # Prepare inputs with mappings
144
+ input_mappings = self.workflow.node_input_mappings.get(current_node, {})
145
+ inputs = {}
146
+ # Add all mapped inputs
147
+ for key, mapping in input_mappings.items():
148
+ if callable(mapping):
149
+ inputs[key] = mapping(self.context)
150
+ elif isinstance(mapping, str):
151
+ inputs[key] = self.context.get(mapping)
152
+ else:
153
+ inputs[key] = mapping # Direct value
154
+ # For parameters in node_inputs that are not mapped, get from context
155
+ for param in self.workflow.node_inputs[current_node]:
156
+ if param not in inputs:
157
+ inputs[param] = self.context.get(param)
158
+
136
159
  result = None
137
160
  exception = None
138
161
 
@@ -149,7 +172,7 @@ class WorkflowEngine:
149
172
 
150
173
  try:
151
174
  if isinstance(node_func, SubWorkflowNode):
152
- result = await node_func(self, **inputs)
175
+ result = await node_func(self) # Sub-workflow handles its own inputs
153
176
  usage = None # Sub-workflow usage is handled by its own nodes
154
177
  else:
155
178
  result = await node_func(**inputs)
@@ -157,6 +180,9 @@ class WorkflowEngine:
157
180
  output_key = self.workflow.node_outputs[current_node]
158
181
  if output_key:
159
182
  self.context[output_key] = result
183
+ elif isinstance(result, dict): # Update context if node returns a dict and output is None
184
+ self.context.update(result)
185
+ logger.debug(f"Updated context with {result} from node {current_node}")
160
186
  await self._notify_observers(
161
187
  WorkflowEvent(
162
188
  event_type=WorkflowEventType.NODE_COMPLETED,
@@ -222,8 +248,9 @@ class Workflow:
222
248
  self.node_inputs: Dict[str, List[str]] = {}
223
249
  self.node_outputs: Dict[str, Optional[str]] = {}
224
250
  self.transitions: Dict[str, List[Tuple[str, Optional[Callable]]]] = {}
251
+ self.node_input_mappings: Dict[str, Dict[str, Any]] = {} # Store input mappings for nodes
225
252
  self.current_node = None
226
- self._observers: List[WorkflowObserver] = [] # Store observers for later propagation
253
+ self._observers: List[WorkflowObserver] = []
227
254
  self._register_node(start_node) # Register the start node without setting current_node
228
255
  self.current_node = start_node # Set current_node explicitly after registration
229
256
 
@@ -236,9 +263,12 @@ class Workflow:
236
263
  self.node_inputs[name] = inputs
237
264
  self.node_outputs[name] = output
238
265
 
239
- def node(self, name: str):
240
- """Add a node to the workflow chain and set it as the current node."""
266
+ def node(self, name: str, inputs_mapping: Optional[Dict[str, Any]] = None):
267
+ """Add a node to the workflow chain with an optional inputs mapping."""
241
268
  self._register_node(name)
269
+ if inputs_mapping:
270
+ self.node_input_mappings[name] = inputs_mapping
271
+ logger.debug(f"Added inputs mapping for node {name}: {inputs_mapping}")
242
272
  self.current_node = name
243
273
  return self
244
274
 
@@ -270,6 +300,30 @@ class Workflow:
270
300
  self.current_node = next_node
271
301
  return self
272
302
 
303
+ def branch(self, branches: List[Tuple[str, Optional[Callable]]]) -> "Workflow":
304
+ """Add multiple conditional branches from the current node."""
305
+ if not self.current_node:
306
+ logger.warning("No current node set for branching")
307
+ return self
308
+ for next_node, condition in branches:
309
+ if next_node not in self.nodes:
310
+ self._register_node(next_node)
311
+ self.transitions.setdefault(self.current_node, []).append((next_node, condition))
312
+ logger.debug(f"Added branch from {self.current_node} to {next_node} with condition {condition}")
313
+ return self
314
+
315
+ def converge(self, convergence_node: str) -> "Workflow":
316
+ """Set a convergence point for all previous branches."""
317
+ if convergence_node not in self.nodes:
318
+ self._register_node(convergence_node)
319
+ # Find all leaf nodes (nodes with no outgoing transitions) and point them to convergence_node
320
+ for node in self.nodes:
321
+ if (node not in self.transitions or not self.transitions[node]) and node != convergence_node:
322
+ self.transitions.setdefault(node, []).append((convergence_node, None))
323
+ logger.debug(f"Added convergence from {node} to {convergence_node}")
324
+ self.current_node = convergence_node
325
+ return self
326
+
273
327
  def parallel(self, *nodes: str):
274
328
  """Add parallel nodes to execute concurrently."""
275
329
  if self.current_node:
@@ -285,13 +339,14 @@ class Workflow:
285
339
  logger.debug(f"Added observer to workflow: {observer}")
286
340
  return self # Support chaining
287
341
 
288
- def add_sub_workflow(self, name: str, sub_workflow: "Workflow", inputs: Dict[str, str], output: str):
289
- """Add a sub-workflow as a node."""
342
+ def add_sub_workflow(self, name: str, sub_workflow: "Workflow", inputs: Dict[str, Any], output: str):
343
+ """Add a sub-workflow as a node with flexible inputs mapping."""
290
344
  sub_node = SubWorkflowNode(sub_workflow, inputs, output)
291
345
  self.nodes[name] = sub_node
292
- self.node_inputs[name] = list(inputs.keys())
346
+ self.node_inputs[name] = [] # Inputs handled internally by SubWorkflowNode
293
347
  self.node_outputs[name] = output
294
348
  self.current_node = name
349
+ logger.debug(f"Added sub-workflow {name} with inputs {inputs} and output {output}")
295
350
  return self
296
351
 
297
352
  def build(self, parent_engine: Optional["WorkflowEngine"] = None) -> WorkflowEngine:
@@ -308,32 +363,37 @@ class Nodes:
308
363
  @classmethod
309
364
  def define(cls, output: Optional[str] = None):
310
365
  """Decorator for defining simple workflow nodes."""
311
-
312
366
  def decorator(func: Callable) -> Callable:
313
367
  async def wrapped_func(**kwargs):
314
368
  try:
315
- result = await func(**kwargs)
369
+ if asyncio.iscoroutinefunction(func):
370
+ result = await func(**kwargs)
371
+ else:
372
+ result = func(**kwargs)
316
373
  logger.debug(f"Node {func.__name__} executed with result: {result}")
317
374
  return result
318
375
  except Exception as e:
319
376
  logger.error(f"Error in node {func.__name__}: {e}")
320
377
  raise
321
378
 
322
- inputs = list(func.__annotations__.keys())
379
+ # Get parameter names from function signature
380
+ sig = inspect.signature(func)
381
+ inputs = [param.name for param in sig.parameters.values()]
323
382
  logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
324
383
  cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
325
384
  return wrapped_func
326
-
327
385
  return decorator
328
386
 
329
387
  @classmethod
330
388
  def validate_node(cls, output: str):
331
389
  """Decorator for nodes that validate inputs."""
332
-
333
390
  def decorator(func: Callable) -> Callable:
334
391
  async def wrapped_func(**kwargs):
335
392
  try:
336
- result = await func(**kwargs)
393
+ if asyncio.iscoroutinefunction(func):
394
+ result = await func(**kwargs)
395
+ else:
396
+ result = func(**kwargs)
337
397
  if not isinstance(result, str):
338
398
  raise ValueError(f"Validation node {func.__name__} must return a string")
339
399
  logger.info(f"Validation result from {func.__name__}: {result}")
@@ -342,11 +402,41 @@ class Nodes:
342
402
  logger.error(f"Validation error in {func.__name__}: {e}")
343
403
  raise
344
404
 
345
- inputs = list(func.__annotations__.keys())
405
+ # Get parameter names from function signature
406
+ sig = inspect.signature(func)
407
+ inputs = [param.name for param in sig.parameters.values()]
346
408
  logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
347
409
  cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
348
410
  return wrapped_func
411
+ return decorator
349
412
 
413
+ @classmethod
414
+ def transform_node(cls, output: str, transformer: Callable[[Any], Any]):
415
+ """Decorator for nodes that transform their inputs."""
416
+ def decorator(func: Callable) -> Callable:
417
+ async def wrapped_func(**kwargs):
418
+ try:
419
+ # Apply transformer to the first input value
420
+ input_key = list(kwargs.keys())[0] if kwargs else None
421
+ if input_key:
422
+ transformed_input = transformer(kwargs[input_key])
423
+ kwargs[input_key] = transformed_input
424
+ if asyncio.iscoroutinefunction(func):
425
+ result = await func(**kwargs)
426
+ else:
427
+ result = func(**kwargs)
428
+ logger.debug(f"Transformed node {func.__name__} executed with result: {result}")
429
+ return result
430
+ except Exception as e:
431
+ logger.error(f"Error in transform node {func.__name__}: {e}")
432
+ raise
433
+
434
+ # Get parameter names from function signature
435
+ sig = inspect.signature(func)
436
+ inputs = [param.name for param in sig.parameters.values()]
437
+ logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
438
+ cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
439
+ return wrapped_func
350
440
  return decorator
351
441
 
352
442
  @staticmethod
@@ -367,20 +457,19 @@ class Nodes:
367
457
  raise
368
458
 
369
459
  @staticmethod
370
- def _render_prompt(template: str, prompt_file: Optional[str], context: Dict[str, Any]) -> str:
371
- """Render a prompt from either a template string or an external file."""
372
- if prompt_file:
373
- return Nodes._load_prompt_from_file(prompt_file, context)
460
+ def _render_template(template: str, template_file: Optional[str], context: Dict[str, Any]) -> str:
461
+ """Render a Jinja2 template from either a string or an external file."""
462
+ if template_file:
463
+ return Nodes._load_prompt_from_file(template_file, context)
374
464
  try:
375
465
  return Template(template).render(**context)
376
466
  except Exception as e:
377
- logger.error(f"Error rendering prompt template: {e}")
467
+ logger.error(f"Error rendering template: {e}")
378
468
  raise
379
469
 
380
470
  @classmethod
381
471
  def llm_node(
382
472
  cls,
383
- model: str,
384
473
  system_prompt: str,
385
474
  output: str,
386
475
  prompt_template: str = "",
@@ -392,34 +481,52 @@ class Nodes:
392
481
  frequency_penalty: float = 0.0,
393
482
  **kwargs,
394
483
  ):
395
- """Decorator for creating LLM nodes with plain text output, supporting external prompt files."""
396
-
484
+ """Decorator for creating LLM nodes with plain text output, supporting dynamic parameters via input mappings."""
397
485
  def decorator(func: Callable) -> Callable:
398
- async def wrapped_func(**kwargs):
399
- prompt = cls._render_prompt(prompt_template, prompt_file, kwargs)
486
+ async def wrapped_func(model: str, **func_kwargs):
487
+ # Extract parameters from func_kwargs if provided, else use defaults
488
+ system_prompt_to_use = func_kwargs.pop("system_prompt", system_prompt)
489
+ prompt_template_to_use = func_kwargs.pop("prompt_template", prompt_template)
490
+ prompt_file_to_use = func_kwargs.pop("prompt_file", prompt_file)
491
+ temperature_to_use = func_kwargs.pop("temperature", temperature)
492
+ max_tokens_to_use = func_kwargs.pop("max_tokens", max_tokens)
493
+ top_p_to_use = func_kwargs.pop("top_p", top_p)
494
+ presence_penalty_to_use = func_kwargs.pop("presence_penalty", presence_penalty)
495
+ frequency_penalty_to_use = func_kwargs.pop("frequency_penalty", frequency_penalty)
496
+
497
+ # Use only signature parameters for template rendering
498
+ sig = inspect.signature(func)
499
+ template_vars = {k: v for k, v in func_kwargs.items() if k in sig.parameters}
500
+ prompt = cls._render_template(prompt_template_to_use, prompt_file_to_use, template_vars)
400
501
  messages = [
401
- {"role": "system", "content": system_prompt},
502
+ {"role": "system", "content": system_prompt_to_use},
402
503
  {"role": "user", "content": prompt},
403
504
  ]
505
+
506
+ # Log the model and a preview of the prompt
507
+ truncated_prompt = prompt[:200] + "..." if len(prompt) > 200 else prompt
508
+ logger.info(f"LLM node {func.__name__} using model: {model}")
509
+ logger.debug(f"System prompt: {system_prompt_to_use[:100]}...")
510
+ logger.debug(f"User prompt preview: {truncated_prompt}")
511
+
404
512
  try:
405
513
  response = await acompletion(
406
514
  model=model,
407
515
  messages=messages,
408
- temperature=temperature,
409
- max_tokens=max_tokens,
410
- top_p=top_p,
411
- presence_penalty=presence_penalty,
412
- frequency_penalty=frequency_penalty,
516
+ temperature=temperature_to_use,
517
+ max_tokens=max_tokens_to_use,
518
+ top_p=top_p_to_use,
519
+ presence_penalty=presence_penalty_to_use,
520
+ frequency_penalty=frequency_penalty_to_use,
413
521
  drop_params=True,
414
522
  **kwargs,
415
523
  )
416
524
  content = response.choices[0].message.content.strip()
417
- # Attach usage metadata to the function
418
525
  wrapped_func.usage = {
419
526
  "prompt_tokens": response.usage.prompt_tokens,
420
527
  "completion_tokens": response.usage.completion_tokens,
421
528
  "total_tokens": response.usage.total_tokens,
422
- "cost": getattr(response, "cost", None), # Include cost if available
529
+ "cost": getattr(response, "cost", None),
423
530
  }
424
531
  logger.debug(f"LLM output from {func.__name__}: {content[:50]}...")
425
532
  return content
@@ -427,17 +534,17 @@ class Nodes:
427
534
  logger.error(f"Error in LLM node {func.__name__}: {e}")
428
535
  raise
429
536
 
430
- inputs = list(func.__annotations__.keys())
537
+ # Get parameter names from function signature and add 'model'
538
+ sig = inspect.signature(func)
539
+ inputs = ['model'] + [param.name for param in sig.parameters.values()]
431
540
  logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
432
541
  cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
433
542
  return wrapped_func
434
-
435
543
  return decorator
436
544
 
437
545
  @classmethod
438
546
  def structured_llm_node(
439
547
  cls,
440
- model: str,
441
548
  system_prompt: str,
442
549
  output: str,
443
550
  response_model: Type[BaseModel],
@@ -450,7 +557,7 @@ class Nodes:
450
557
  frequency_penalty: float = 0.0,
451
558
  **kwargs,
452
559
  ):
453
- """Decorator for creating LLM nodes with structured output using instructor, supporting external prompt files."""
560
+ """Decorator for creating LLM nodes with structured output, supporting dynamic parameters via input mappings."""
454
561
  try:
455
562
  client = instructor.from_litellm(acompletion)
456
563
  except ImportError:
@@ -458,32 +565,51 @@ class Nodes:
458
565
  raise ImportError("Instructor is required for structured_llm_node")
459
566
 
460
567
  def decorator(func: Callable) -> Callable:
461
- async def wrapped_func(**kwargs):
462
- prompt = cls._render_prompt(prompt_template, prompt_file, kwargs)
568
+ async def wrapped_func(model: str, **func_kwargs):
569
+ # Extract parameters from func_kwargs if provided, else use defaults
570
+ system_prompt_to_use = func_kwargs.pop("system_prompt", system_prompt)
571
+ prompt_template_to_use = func_kwargs.pop("prompt_template", prompt_template)
572
+ prompt_file_to_use = func_kwargs.pop("prompt_file", prompt_file)
573
+ temperature_to_use = func_kwargs.pop("temperature", temperature)
574
+ max_tokens_to_use = func_kwargs.pop("max_tokens", max_tokens)
575
+ top_p_to_use = func_kwargs.pop("top_p", top_p)
576
+ presence_penalty_to_use = func_kwargs.pop("presence_penalty", presence_penalty)
577
+ frequency_penalty_to_use = func_kwargs.pop("frequency_penalty", frequency_penalty)
578
+
579
+ # Use only signature parameters for template rendering
580
+ sig = inspect.signature(func)
581
+ template_vars = {k: v for k, v in func_kwargs.items() if k in sig.parameters}
582
+ prompt = cls._render_template(prompt_template_to_use, prompt_file_to_use, template_vars)
463
583
  messages = [
464
- {"role": "system", "content": system_prompt},
584
+ {"role": "system", "content": system_prompt_to_use},
465
585
  {"role": "user", "content": prompt},
466
586
  ]
587
+
588
+ # Log the model and a preview of the prompt
589
+ truncated_prompt = prompt[:200] + "..." if len(prompt) > 200 else prompt
590
+ logger.info(f"Structured LLM node {func.__name__} using model: {model}")
591
+ logger.debug(f"System prompt: {system_prompt_to_use[:100]}...")
592
+ logger.debug(f"User prompt preview: {truncated_prompt}")
593
+ logger.debug(f"Expected response model: {response_model.__name__}")
594
+
467
595
  try:
468
- # Use instructor with completion to get both structured output and raw response
469
596
  structured_response, raw_response = await client.chat.completions.create_with_completion(
470
597
  model=model,
471
598
  messages=messages,
472
599
  response_model=response_model,
473
- temperature=temperature,
474
- max_tokens=max_tokens,
475
- top_p=top_p,
476
- presence_penalty=presence_penalty,
477
- frequency_penalty=frequency_penalty,
600
+ temperature=temperature_to_use,
601
+ max_tokens=max_tokens_to_use,
602
+ top_p=top_p_to_use,
603
+ presence_penalty=presence_penalty_to_use,
604
+ frequency_penalty=frequency_penalty_to_use,
478
605
  drop_params=True,
479
606
  **kwargs,
480
607
  )
481
- # Attach usage metadata to the function
482
608
  wrapped_func.usage = {
483
609
  "prompt_tokens": raw_response.usage.prompt_tokens,
484
610
  "completion_tokens": raw_response.usage.completion_tokens,
485
611
  "total_tokens": raw_response.usage.total_tokens,
486
- "cost": getattr(raw_response, "cost", None), # Include cost if available
612
+ "cost": getattr(raw_response, "cost", None),
487
613
  }
488
614
  logger.debug(f"Structured output from {func.__name__}: {structured_response}")
489
615
  return structured_response
@@ -494,15 +620,60 @@ class Nodes:
494
620
  logger.error(f"Error in structured LLM node {func.__name__}: {e}")
495
621
  raise
496
622
 
497
- inputs = list(func.__annotations__.keys())
623
+ # Get parameter names from function signature and add 'model'
624
+ sig = inspect.signature(func)
625
+ inputs = ['model'] + [param.name for param in sig.parameters.values()]
498
626
  logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
499
627
  cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
500
628
  return wrapped_func
629
+ return decorator
630
+
631
+ @classmethod
632
+ def template_node(
633
+ cls,
634
+ output: str,
635
+ template: str = "",
636
+ template_file: Optional[str] = None,
637
+ ):
638
+ """Decorator for creating nodes that apply a Jinja2 template to inputs, supporting dynamic parameters."""
639
+ def decorator(func: Callable) -> Callable:
640
+ async def wrapped_func(**func_kwargs):
641
+ # Extract template parameters from func_kwargs if provided, else use defaults
642
+ template_to_use = func_kwargs.pop("template", template)
643
+ template_file_to_use = func_kwargs.pop("template_file", template_file)
644
+
645
+ # Use only signature parameters (excluding rendered_content) for template rendering
646
+ sig = inspect.signature(func)
647
+ expected_params = [p.name for p in sig.parameters.values() if p.name != 'rendered_content']
648
+ template_vars = {k: v for k, v in func_kwargs.items() if k in expected_params}
649
+ rendered_content = cls._render_template(template_to_use, template_file_to_use, template_vars)
650
+
651
+ # Filter func_kwargs for the function call
652
+ filtered_kwargs = {k: v for k, v in func_kwargs.items() if k in expected_params}
653
+
654
+ try:
655
+ if asyncio.iscoroutinefunction(func):
656
+ result = await func(rendered_content=rendered_content, **filtered_kwargs)
657
+ else:
658
+ result = func(rendered_content=rendered_content, **filtered_kwargs)
659
+ logger.debug(f"Template node {func.__name__} rendered: {rendered_content[:50]}...")
660
+ return result
661
+ except Exception as e:
662
+ logger.error(f"Error in template node {func.__name__}: {e}")
663
+ raise
501
664
 
665
+ # Get parameter names from function signature and add 'rendered_content' if not present
666
+ sig = inspect.signature(func)
667
+ inputs = [param.name for param in sig.parameters.values()]
668
+ if 'rendered_content' not in inputs:
669
+ inputs.insert(0, 'rendered_content')
670
+ logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
671
+ cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
672
+ return wrapped_func
502
673
  return decorator
503
674
 
504
675
 
505
- # Example workflow with observer integration and updated structured node
676
+ # Example workflow with observer integration, updated nodes, input mappings, and dynamic parameters
506
677
  async def example_workflow():
507
678
  # Define Pydantic model for structured output
508
679
  class OrderDetails(BaseModel):
@@ -534,7 +705,6 @@ async def example_workflow():
534
705
  if usage.get("cost") is not None:
535
706
  self.total_cost += usage["cost"]
536
707
  self.node_usages[event.node_name] = usage
537
- # Print summary at workflow completion
538
708
  if event.event_type == WorkflowEventType.WORKFLOW_COMPLETED:
539
709
  print(f"Total prompt tokens: {self.total_prompt_tokens}")
540
710
  print(f"Total completion tokens: {self.total_completion_tokens}")
@@ -548,15 +718,12 @@ async def example_workflow():
548
718
  return "Order validated" if order.get("items") else "Invalid order"
549
719
 
550
720
  @Nodes.structured_llm_node(
551
- model="gemini/gemini-2.0-flash",
552
- system_prompt="You are an inventory checker. Respond with a JSON object containing 'order_id', 'items', and 'in_stock' (boolean).",
721
+ system_prompt="You are an inventory checker. Respond with a JSON object containing 'order_id', 'items_in_stock', and 'items_out_of_stock'.",
553
722
  output="inventory_status",
554
723
  response_model=OrderDetails,
555
724
  prompt_template="Check if the following items are in stock: {{ items }}. Return the result in JSON format with 'order_id' set to '123'.",
556
725
  )
557
726
  async def check_inventory(items: List[str]) -> OrderDetails:
558
- # This is a placeholder function that would normally call an LLM
559
- # The actual implementation is handled by the structured_llm_node decorator
560
727
  return OrderDetails(order_id="123", items_in_stock=["item1"], items_out_of_stock=[])
561
728
 
562
729
  @Nodes.define(output="payment_status")
@@ -579,41 +746,61 @@ async def example_workflow():
579
746
  async def notify_customer_out_of_stock(inventory_status: OrderDetails) -> str:
580
747
  return "Customer notified of out-of-stock"
581
748
 
749
+ @Nodes.transform_node(output="transformed_items", transformer=lambda x: [item.upper() for item in x])
750
+ async def transform_items(items: List[str]) -> List[str]:
751
+ return items
752
+
753
+ @Nodes.template_node(
754
+ output="formatted_message",
755
+ template="Order contains: {{ items | join(', ') }}",
756
+ )
757
+ async def format_order_message(rendered_content: str, items: List[str]) -> str:
758
+ return rendered_content
759
+
582
760
  # Sub-workflow for payment and shipping
583
761
  payment_shipping_sub_wf = Workflow("process_payment").sequence("process_payment", "arrange_shipping")
584
762
 
585
763
  # Instantiate token usage observer
586
764
  token_observer = TokenUsageObserver()
587
765
 
588
- # Main workflow incorporating the sub-workflow
766
+ # Main workflow with dynamic parameter overrides
589
767
  workflow = (
590
768
  Workflow("validate_order")
591
- .add_observer(progress_monitor) # Add progress observer
592
- .add_observer(token_observer) # Add token usage observer
769
+ .add_observer(progress_monitor)
770
+ .add_observer(token_observer)
771
+ .node("validate_order", inputs_mapping={"order": "customer_order"})
772
+ .node("transform_items")
773
+ .node("format_order_message", inputs_mapping={
774
+ "items": "items",
775
+ "template": "Custom order: {{ items | join(', ') }}" # Dynamic override
776
+ })
777
+ .node("check_inventory", inputs_mapping={
778
+ "model": lambda ctx: "gemini/gemini-2.0-flash",
779
+ "items": "transformed_items",
780
+ "temperature": 0.5, # Dynamic override
781
+ "max_tokens": 1000 # Dynamic override
782
+ })
593
783
  .add_sub_workflow(
594
- "payment_shipping", payment_shipping_sub_wf, inputs={"order": "order"}, output="shipping_confirmation"
595
- )
596
- .sequence("validate_order", "check_inventory")
597
- .then(
598
784
  "payment_shipping",
599
- condition=lambda ctx: len(ctx.get("inventory_status").items_out_of_stock) == 0 if ctx.get("inventory_status") else False,
600
- )
601
- .then(
602
- "notify_customer_out_of_stock",
603
- condition=lambda ctx: len(ctx.get("inventory_status").items_out_of_stock) > 0 if ctx.get("inventory_status") else True,
785
+ payment_shipping_sub_wf,
786
+ inputs={"order": lambda ctx: {"items": ctx["items"]}},
787
+ output="shipping_confirmation"
604
788
  )
605
- .parallel("update_order_status", "send_confirmation_email")
606
- .node("update_order_status")
607
- .node("send_confirmation_email")
608
- .node("notify_customer_out_of_stock")
789
+ .branch([
790
+ ("payment_shipping", lambda ctx: len(ctx.get("inventory_status").items_out_of_stock) == 0 if ctx.get("inventory_status") else False),
791
+ ("notify_customer_out_of_stock", lambda ctx: len(ctx.get("inventory_status").items_out_of_stock) > 0 if ctx.get("inventory_status") else True)
792
+ ])
793
+ .converge("update_order_status")
794
+ .sequence("update_order_status", "send_confirmation_email")
609
795
  )
610
796
 
611
797
  # Execute workflow
612
- initial_context = {"order": {"items": ["item1", "item2"]}, "items": ["item1", "item2"]}
798
+ initial_context = {"customer_order": {"items": ["item1", "item2"]}, "items": ["item1", "item2"]}
613
799
  engine = workflow.build()
614
800
  result = await engine.run(initial_context)
615
801
  logger.info(f"Workflow result: {result}")
616
802
 
617
803
 
618
804
  if __name__ == "__main__":
805
+ logger.info("Initializing Quantalogic Flow Package")
619
806
  asyncio.run(example_workflow())