quantalogic 0.53.0__py3-none-any.whl → 0.55.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
quantalogic/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
1
  """QuantaLogic package initialization."""
2
2
 
3
3
  import warnings
4
+ from importlib.metadata import version as get_version
4
5
 
5
6
  # Suppress specific warnings related to Pydantic's V2 configuration changes
6
7
  warnings.filterwarnings(
@@ -10,6 +11,12 @@ warnings.filterwarnings(
10
11
  message=".*config keys have changed in V2:.*|.*'fields' config key is removed in V2.*",
11
12
  )
12
13
 
14
+ try:
15
+ __version__: str = get_version("quantalogic")
16
+ except Exception as e:
17
+ __version__ = "unknown"
18
+ print(f"Unable to retrieve version: {e}")
19
+
13
20
  # Import public API
14
21
  from .agent import Agent # noqa: E402
15
22
  from .console_print_events import console_print_events # noqa: E402
quantalogic/flow/flow.py CHANGED
@@ -7,11 +7,12 @@
7
7
  # "pydantic>=2.0.0", # Data validation and settings
8
8
  # "anyio>=4.0.0", # Async utilities
9
9
  # "jinja2>=3.1.0", # Templating engine
10
- # "instructor[litellm]>=0.5.0" # Structured LLM output with litellm integration
10
+ # "instructor" # Structured LLM output with litellm integration
11
11
  # ]
12
12
  # ///
13
13
 
14
14
  import asyncio
15
+ import inspect # Added for accurate parameter detection
15
16
  from dataclasses import dataclass
16
17
  from enum import Enum
17
18
  from pathlib import Path
@@ -52,17 +53,24 @@ class WorkflowEvent:
52
53
  WorkflowObserver = Callable[[WorkflowEvent], None]
53
54
 
54
55
 
55
- # Define a class for sub-workflow nodes
56
+ # Define a class for sub-workflow nodes with updated inputs handling
56
57
  class SubWorkflowNode:
57
- def __init__(self, sub_workflow: "Workflow", inputs: Dict[str, str], output: str):
58
- """Initialize a sub-workflow node."""
58
+ def __init__(self, sub_workflow: "Workflow", inputs: Dict[str, Any], output: str):
59
+ """Initialize a sub-workflow node with flexible inputs mapping."""
59
60
  self.sub_workflow = sub_workflow
60
- self.inputs = inputs
61
+ self.inputs = inputs # Maps sub_key to main_key, callable, or value
61
62
  self.output = output
62
63
 
63
- async def __call__(self, engine: "WorkflowEngine", **kwargs):
64
- """Execute the sub-workflow with the engine's context."""
65
- sub_context = {sub_key: kwargs[main_key] for main_key, sub_key in self.inputs.items()}
64
+ async def __call__(self, engine: "WorkflowEngine"):
65
+ """Execute the sub-workflow with the engine's context using inputs mapping."""
66
+ sub_context = {}
67
+ for sub_key, mapping in self.inputs.items():
68
+ if callable(mapping):
69
+ sub_context[sub_key] = mapping(engine.context)
70
+ elif isinstance(mapping, str):
71
+ sub_context[sub_key] = engine.context.get(mapping)
72
+ else:
73
+ sub_context[sub_key] = mapping # Direct value
66
74
  sub_engine = self.sub_workflow.build(parent_engine=engine)
67
75
  result = await sub_engine.run(sub_context)
68
76
  return result.get(self.output)
@@ -132,7 +140,22 @@ class WorkflowEngine:
132
140
  )
133
141
  break
134
142
 
135
- inputs = {k: self.context[k] for k in self.workflow.node_inputs[current_node] if k in self.context}
143
+ # Prepare inputs with mappings
144
+ input_mappings = self.workflow.node_input_mappings.get(current_node, {})
145
+ inputs = {}
146
+ # Add all mapped inputs
147
+ for key, mapping in input_mappings.items():
148
+ if callable(mapping):
149
+ inputs[key] = mapping(self.context)
150
+ elif isinstance(mapping, str):
151
+ inputs[key] = self.context.get(mapping)
152
+ else:
153
+ inputs[key] = mapping # Direct value
154
+ # For parameters in node_inputs that are not mapped, get from context
155
+ for param in self.workflow.node_inputs[current_node]:
156
+ if param not in inputs:
157
+ inputs[param] = self.context.get(param)
158
+
136
159
  result = None
137
160
  exception = None
138
161
 
@@ -149,7 +172,7 @@ class WorkflowEngine:
149
172
 
150
173
  try:
151
174
  if isinstance(node_func, SubWorkflowNode):
152
- result = await node_func(self, **inputs)
175
+ result = await node_func(self) # Sub-workflow handles its own inputs
153
176
  usage = None # Sub-workflow usage is handled by its own nodes
154
177
  else:
155
178
  result = await node_func(**inputs)
@@ -222,8 +245,9 @@ class Workflow:
222
245
  self.node_inputs: Dict[str, List[str]] = {}
223
246
  self.node_outputs: Dict[str, Optional[str]] = {}
224
247
  self.transitions: Dict[str, List[Tuple[str, Optional[Callable]]]] = {}
248
+ self.node_input_mappings: Dict[str, Dict[str, Any]] = {} # Store input mappings for nodes
225
249
  self.current_node = None
226
- self._observers: List[WorkflowObserver] = [] # Store observers for later propagation
250
+ self._observers: List[WorkflowObserver] = []
227
251
  self._register_node(start_node) # Register the start node without setting current_node
228
252
  self.current_node = start_node # Set current_node explicitly after registration
229
253
 
@@ -236,9 +260,12 @@ class Workflow:
236
260
  self.node_inputs[name] = inputs
237
261
  self.node_outputs[name] = output
238
262
 
239
- def node(self, name: str):
240
- """Add a node to the workflow chain and set it as the current node."""
263
+ def node(self, name: str, inputs_mapping: Optional[Dict[str, Any]] = None):
264
+ """Add a node to the workflow chain with an optional inputs mapping."""
241
265
  self._register_node(name)
266
+ if inputs_mapping:
267
+ self.node_input_mappings[name] = inputs_mapping
268
+ logger.debug(f"Added inputs mapping for node {name}: {inputs_mapping}")
242
269
  self.current_node = name
243
270
  return self
244
271
 
@@ -270,6 +297,30 @@ class Workflow:
270
297
  self.current_node = next_node
271
298
  return self
272
299
 
300
+ def branch(self, branches: List[Tuple[str, Optional[Callable]]]) -> "Workflow":
301
+ """Add multiple conditional branches from the current node."""
302
+ if not self.current_node:
303
+ logger.warning("No current node set for branching")
304
+ return self
305
+ for next_node, condition in branches:
306
+ if next_node not in self.nodes:
307
+ self._register_node(next_node)
308
+ self.transitions.setdefault(self.current_node, []).append((next_node, condition))
309
+ logger.debug(f"Added branch from {self.current_node} to {next_node} with condition {condition}")
310
+ return self
311
+
312
+ def converge(self, convergence_node: str) -> "Workflow":
313
+ """Set a convergence point for all previous branches."""
314
+ if convergence_node not in self.nodes:
315
+ self._register_node(convergence_node)
316
+ # Find all leaf nodes (nodes with no outgoing transitions) and point them to convergence_node
317
+ for node in self.nodes:
318
+ if (node not in self.transitions or not self.transitions[node]) and node != convergence_node:
319
+ self.transitions.setdefault(node, []).append((convergence_node, None))
320
+ logger.debug(f"Added convergence from {node} to {convergence_node}")
321
+ self.current_node = convergence_node
322
+ return self
323
+
273
324
  def parallel(self, *nodes: str):
274
325
  """Add parallel nodes to execute concurrently."""
275
326
  if self.current_node:
@@ -285,13 +336,14 @@ class Workflow:
285
336
  logger.debug(f"Added observer to workflow: {observer}")
286
337
  return self # Support chaining
287
338
 
288
- def add_sub_workflow(self, name: str, sub_workflow: "Workflow", inputs: Dict[str, str], output: str):
289
- """Add a sub-workflow as a node."""
339
+ def add_sub_workflow(self, name: str, sub_workflow: "Workflow", inputs: Dict[str, Any], output: str):
340
+ """Add a sub-workflow as a node with flexible inputs mapping."""
290
341
  sub_node = SubWorkflowNode(sub_workflow, inputs, output)
291
342
  self.nodes[name] = sub_node
292
- self.node_inputs[name] = list(inputs.keys())
343
+ self.node_inputs[name] = [] # Inputs handled internally by SubWorkflowNode
293
344
  self.node_outputs[name] = output
294
345
  self.current_node = name
346
+ logger.debug(f"Added sub-workflow {name} with inputs {inputs} and output {output}")
295
347
  return self
296
348
 
297
349
  def build(self, parent_engine: Optional["WorkflowEngine"] = None) -> WorkflowEngine:
@@ -308,32 +360,37 @@ class Nodes:
308
360
  @classmethod
309
361
  def define(cls, output: Optional[str] = None):
310
362
  """Decorator for defining simple workflow nodes."""
311
-
312
363
  def decorator(func: Callable) -> Callable:
313
364
  async def wrapped_func(**kwargs):
314
365
  try:
315
- result = await func(**kwargs)
366
+ if asyncio.iscoroutinefunction(func):
367
+ result = await func(**kwargs)
368
+ else:
369
+ result = func(**kwargs)
316
370
  logger.debug(f"Node {func.__name__} executed with result: {result}")
317
371
  return result
318
372
  except Exception as e:
319
373
  logger.error(f"Error in node {func.__name__}: {e}")
320
374
  raise
321
375
 
322
- inputs = list(func.__annotations__.keys())
376
+ # Get parameter names from function signature
377
+ sig = inspect.signature(func)
378
+ inputs = [param.name for param in sig.parameters.values()]
323
379
  logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
324
380
  cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
325
381
  return wrapped_func
326
-
327
382
  return decorator
328
383
 
329
384
  @classmethod
330
385
  def validate_node(cls, output: str):
331
386
  """Decorator for nodes that validate inputs."""
332
-
333
387
  def decorator(func: Callable) -> Callable:
334
388
  async def wrapped_func(**kwargs):
335
389
  try:
336
- result = await func(**kwargs)
390
+ if asyncio.iscoroutinefunction(func):
391
+ result = await func(**kwargs)
392
+ else:
393
+ result = func(**kwargs)
337
394
  if not isinstance(result, str):
338
395
  raise ValueError(f"Validation node {func.__name__} must return a string")
339
396
  logger.info(f"Validation result from {func.__name__}: {result}")
@@ -342,11 +399,41 @@ class Nodes:
342
399
  logger.error(f"Validation error in {func.__name__}: {e}")
343
400
  raise
344
401
 
345
- inputs = list(func.__annotations__.keys())
402
+ # Get parameter names from function signature
403
+ sig = inspect.signature(func)
404
+ inputs = [param.name for param in sig.parameters.values()]
346
405
  logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
347
406
  cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
348
407
  return wrapped_func
408
+ return decorator
349
409
 
410
+ @classmethod
411
+ def transform_node(cls, output: str, transformer: Callable[[Any], Any]):
412
+ """Decorator for nodes that transform their inputs."""
413
+ def decorator(func: Callable) -> Callable:
414
+ async def wrapped_func(**kwargs):
415
+ try:
416
+ # Apply transformer to the first input value
417
+ input_key = list(kwargs.keys())[0] if kwargs else None
418
+ if input_key:
419
+ transformed_input = transformer(kwargs[input_key])
420
+ kwargs[input_key] = transformed_input
421
+ if asyncio.iscoroutinefunction(func):
422
+ result = await func(**kwargs)
423
+ else:
424
+ result = func(**kwargs)
425
+ logger.debug(f"Transformed node {func.__name__} executed with result: {result}")
426
+ return result
427
+ except Exception as e:
428
+ logger.error(f"Error in transform node {func.__name__}: {e}")
429
+ raise
430
+
431
+ # Get parameter names from function signature
432
+ sig = inspect.signature(func)
433
+ inputs = [param.name for param in sig.parameters.values()]
434
+ logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
435
+ cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
436
+ return wrapped_func
350
437
  return decorator
351
438
 
352
439
  @staticmethod
@@ -367,20 +454,19 @@ class Nodes:
367
454
  raise
368
455
 
369
456
  @staticmethod
370
- def _render_prompt(template: str, prompt_file: Optional[str], context: Dict[str, Any]) -> str:
371
- """Render a prompt from either a template string or an external file."""
372
- if prompt_file:
373
- return Nodes._load_prompt_from_file(prompt_file, context)
457
+ def _render_template(template: str, template_file: Optional[str], context: Dict[str, Any]) -> str:
458
+ """Render a Jinja2 template from either a string or an external file."""
459
+ if template_file:
460
+ return Nodes._load_prompt_from_file(template_file, context)
374
461
  try:
375
462
  return Template(template).render(**context)
376
463
  except Exception as e:
377
- logger.error(f"Error rendering prompt template: {e}")
464
+ logger.error(f"Error rendering template: {e}")
378
465
  raise
379
466
 
380
467
  @classmethod
381
468
  def llm_node(
382
469
  cls,
383
- model: str,
384
470
  system_prompt: str,
385
471
  output: str,
386
472
  prompt_template: str = "",
@@ -392,34 +478,52 @@ class Nodes:
392
478
  frequency_penalty: float = 0.0,
393
479
  **kwargs,
394
480
  ):
395
- """Decorator for creating LLM nodes with plain text output, supporting external prompt files."""
396
-
481
+ """Decorator for creating LLM nodes with plain text output, supporting dynamic parameters via input mappings."""
397
482
  def decorator(func: Callable) -> Callable:
398
- async def wrapped_func(**kwargs):
399
- prompt = cls._render_prompt(prompt_template, prompt_file, kwargs)
483
+ async def wrapped_func(model: str, **func_kwargs):
484
+ # Extract parameters from func_kwargs if provided, else use defaults
485
+ system_prompt_to_use = func_kwargs.pop("system_prompt", system_prompt)
486
+ prompt_template_to_use = func_kwargs.pop("prompt_template", prompt_template)
487
+ prompt_file_to_use = func_kwargs.pop("prompt_file", prompt_file)
488
+ temperature_to_use = func_kwargs.pop("temperature", temperature)
489
+ max_tokens_to_use = func_kwargs.pop("max_tokens", max_tokens)
490
+ top_p_to_use = func_kwargs.pop("top_p", top_p)
491
+ presence_penalty_to_use = func_kwargs.pop("presence_penalty", presence_penalty)
492
+ frequency_penalty_to_use = func_kwargs.pop("frequency_penalty", frequency_penalty)
493
+
494
+ # Use only signature parameters for template rendering
495
+ sig = inspect.signature(func)
496
+ template_vars = {k: v for k, v in func_kwargs.items() if k in sig.parameters}
497
+ prompt = cls._render_template(prompt_template_to_use, prompt_file_to_use, template_vars)
400
498
  messages = [
401
- {"role": "system", "content": system_prompt},
499
+ {"role": "system", "content": system_prompt_to_use},
402
500
  {"role": "user", "content": prompt},
403
501
  ]
502
+
503
+ # Log the model and a preview of the prompt
504
+ truncated_prompt = prompt[:200] + "..." if len(prompt) > 200 else prompt
505
+ logger.info(f"LLM node {func.__name__} using model: {model}")
506
+ logger.debug(f"System prompt: {system_prompt_to_use[:100]}...")
507
+ logger.debug(f"User prompt preview: {truncated_prompt}")
508
+
404
509
  try:
405
510
  response = await acompletion(
406
511
  model=model,
407
512
  messages=messages,
408
- temperature=temperature,
409
- max_tokens=max_tokens,
410
- top_p=top_p,
411
- presence_penalty=presence_penalty,
412
- frequency_penalty=frequency_penalty,
513
+ temperature=temperature_to_use,
514
+ max_tokens=max_tokens_to_use,
515
+ top_p=top_p_to_use,
516
+ presence_penalty=presence_penalty_to_use,
517
+ frequency_penalty=frequency_penalty_to_use,
413
518
  drop_params=True,
414
519
  **kwargs,
415
520
  )
416
521
  content = response.choices[0].message.content.strip()
417
- # Attach usage metadata to the function
418
522
  wrapped_func.usage = {
419
523
  "prompt_tokens": response.usage.prompt_tokens,
420
524
  "completion_tokens": response.usage.completion_tokens,
421
525
  "total_tokens": response.usage.total_tokens,
422
- "cost": getattr(response, "cost", None), # Include cost if available
526
+ "cost": getattr(response, "cost", None),
423
527
  }
424
528
  logger.debug(f"LLM output from {func.__name__}: {content[:50]}...")
425
529
  return content
@@ -427,17 +531,17 @@ class Nodes:
427
531
  logger.error(f"Error in LLM node {func.__name__}: {e}")
428
532
  raise
429
533
 
430
- inputs = list(func.__annotations__.keys())
534
+ # Get parameter names from function signature and add 'model'
535
+ sig = inspect.signature(func)
536
+ inputs = ['model'] + [param.name for param in sig.parameters.values()]
431
537
  logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
432
538
  cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
433
539
  return wrapped_func
434
-
435
540
  return decorator
436
541
 
437
542
  @classmethod
438
543
  def structured_llm_node(
439
544
  cls,
440
- model: str,
441
545
  system_prompt: str,
442
546
  output: str,
443
547
  response_model: Type[BaseModel],
@@ -450,7 +554,7 @@ class Nodes:
450
554
  frequency_penalty: float = 0.0,
451
555
  **kwargs,
452
556
  ):
453
- """Decorator for creating LLM nodes with structured output using instructor, supporting external prompt files."""
557
+ """Decorator for creating LLM nodes with structured output, supporting dynamic parameters via input mappings."""
454
558
  try:
455
559
  client = instructor.from_litellm(acompletion)
456
560
  except ImportError:
@@ -458,32 +562,51 @@ class Nodes:
458
562
  raise ImportError("Instructor is required for structured_llm_node")
459
563
 
460
564
  def decorator(func: Callable) -> Callable:
461
- async def wrapped_func(**kwargs):
462
- prompt = cls._render_prompt(prompt_template, prompt_file, kwargs)
565
+ async def wrapped_func(model: str, **func_kwargs):
566
+ # Extract parameters from func_kwargs if provided, else use defaults
567
+ system_prompt_to_use = func_kwargs.pop("system_prompt", system_prompt)
568
+ prompt_template_to_use = func_kwargs.pop("prompt_template", prompt_template)
569
+ prompt_file_to_use = func_kwargs.pop("prompt_file", prompt_file)
570
+ temperature_to_use = func_kwargs.pop("temperature", temperature)
571
+ max_tokens_to_use = func_kwargs.pop("max_tokens", max_tokens)
572
+ top_p_to_use = func_kwargs.pop("top_p", top_p)
573
+ presence_penalty_to_use = func_kwargs.pop("presence_penalty", presence_penalty)
574
+ frequency_penalty_to_use = func_kwargs.pop("frequency_penalty", frequency_penalty)
575
+
576
+ # Use only signature parameters for template rendering
577
+ sig = inspect.signature(func)
578
+ template_vars = {k: v for k, v in func_kwargs.items() if k in sig.parameters}
579
+ prompt = cls._render_template(prompt_template_to_use, prompt_file_to_use, template_vars)
463
580
  messages = [
464
- {"role": "system", "content": system_prompt},
581
+ {"role": "system", "content": system_prompt_to_use},
465
582
  {"role": "user", "content": prompt},
466
583
  ]
584
+
585
+ # Log the model and a preview of the prompt
586
+ truncated_prompt = prompt[:200] + "..." if len(prompt) > 200 else prompt
587
+ logger.info(f"Structured LLM node {func.__name__} using model: {model}")
588
+ logger.debug(f"System prompt: {system_prompt_to_use[:100]}...")
589
+ logger.debug(f"User prompt preview: {truncated_prompt}")
590
+ logger.debug(f"Expected response model: {response_model.__name__}")
591
+
467
592
  try:
468
- # Use instructor with completion to get both structured output and raw response
469
593
  structured_response, raw_response = await client.chat.completions.create_with_completion(
470
594
  model=model,
471
595
  messages=messages,
472
596
  response_model=response_model,
473
- temperature=temperature,
474
- max_tokens=max_tokens,
475
- top_p=top_p,
476
- presence_penalty=presence_penalty,
477
- frequency_penalty=frequency_penalty,
597
+ temperature=temperature_to_use,
598
+ max_tokens=max_tokens_to_use,
599
+ top_p=top_p_to_use,
600
+ presence_penalty=presence_penalty_to_use,
601
+ frequency_penalty=frequency_penalty_to_use,
478
602
  drop_params=True,
479
603
  **kwargs,
480
604
  )
481
- # Attach usage metadata to the function
482
605
  wrapped_func.usage = {
483
606
  "prompt_tokens": raw_response.usage.prompt_tokens,
484
607
  "completion_tokens": raw_response.usage.completion_tokens,
485
608
  "total_tokens": raw_response.usage.total_tokens,
486
- "cost": getattr(raw_response, "cost", None), # Include cost if available
609
+ "cost": getattr(raw_response, "cost", None),
487
610
  }
488
611
  logger.debug(f"Structured output from {func.__name__}: {structured_response}")
489
612
  return structured_response
@@ -494,15 +617,60 @@ class Nodes:
494
617
  logger.error(f"Error in structured LLM node {func.__name__}: {e}")
495
618
  raise
496
619
 
497
- inputs = list(func.__annotations__.keys())
620
+ # Get parameter names from function signature and add 'model'
621
+ sig = inspect.signature(func)
622
+ inputs = ['model'] + [param.name for param in sig.parameters.values()]
498
623
  logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
499
624
  cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
500
625
  return wrapped_func
626
+ return decorator
627
+
628
+ @classmethod
629
+ def template_node(
630
+ cls,
631
+ output: str,
632
+ template: str = "",
633
+ template_file: Optional[str] = None,
634
+ ):
635
+ """Decorator for creating nodes that apply a Jinja2 template to inputs, supporting dynamic parameters."""
636
+ def decorator(func: Callable) -> Callable:
637
+ async def wrapped_func(**func_kwargs):
638
+ # Extract template parameters from func_kwargs if provided, else use defaults
639
+ template_to_use = func_kwargs.pop("template", template)
640
+ template_file_to_use = func_kwargs.pop("template_file", template_file)
641
+
642
+ # Use only signature parameters (excluding rendered_content) for template rendering
643
+ sig = inspect.signature(func)
644
+ expected_params = [p.name for p in sig.parameters.values() if p.name != 'rendered_content']
645
+ template_vars = {k: v for k, v in func_kwargs.items() if k in expected_params}
646
+ rendered_content = cls._render_template(template_to_use, template_file_to_use, template_vars)
647
+
648
+ # Filter func_kwargs for the function call
649
+ filtered_kwargs = {k: v for k, v in func_kwargs.items() if k in expected_params}
650
+
651
+ try:
652
+ if asyncio.iscoroutinefunction(func):
653
+ result = await func(rendered_content=rendered_content, **filtered_kwargs)
654
+ else:
655
+ result = func(rendered_content=rendered_content, **filtered_kwargs)
656
+ logger.debug(f"Template node {func.__name__} rendered: {rendered_content[:50]}...")
657
+ return result
658
+ except Exception as e:
659
+ logger.error(f"Error in template node {func.__name__}: {e}")
660
+ raise
501
661
 
662
+ # Get parameter names from function signature and add 'rendered_content' if not present
663
+ sig = inspect.signature(func)
664
+ inputs = [param.name for param in sig.parameters.values()]
665
+ if 'rendered_content' not in inputs:
666
+ inputs.insert(0, 'rendered_content')
667
+ logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
668
+ cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
669
+ return wrapped_func
502
670
  return decorator
503
671
 
504
672
 
505
- # Example workflow with observer integration and updated structured node
673
+ # Example workflow with observer integration, updated nodes, input mappings, and dynamic parameters
506
674
  async def example_workflow():
507
675
  # Define Pydantic model for structured output
508
676
  class OrderDetails(BaseModel):
@@ -534,7 +702,6 @@ async def example_workflow():
534
702
  if usage.get("cost") is not None:
535
703
  self.total_cost += usage["cost"]
536
704
  self.node_usages[event.node_name] = usage
537
- # Print summary at workflow completion
538
705
  if event.event_type == WorkflowEventType.WORKFLOW_COMPLETED:
539
706
  print(f"Total prompt tokens: {self.total_prompt_tokens}")
540
707
  print(f"Total completion tokens: {self.total_completion_tokens}")
@@ -548,15 +715,12 @@ async def example_workflow():
548
715
  return "Order validated" if order.get("items") else "Invalid order"
549
716
 
550
717
  @Nodes.structured_llm_node(
551
- model="gemini/gemini-2.0-flash",
552
- system_prompt="You are an inventory checker. Respond with a JSON object containing 'order_id', 'items', and 'in_stock' (boolean).",
718
+ system_prompt="You are an inventory checker. Respond with a JSON object containing 'order_id', 'items_in_stock', and 'items_out_of_stock'.",
553
719
  output="inventory_status",
554
720
  response_model=OrderDetails,
555
721
  prompt_template="Check if the following items are in stock: {{ items }}. Return the result in JSON format with 'order_id' set to '123'.",
556
722
  )
557
723
  async def check_inventory(items: List[str]) -> OrderDetails:
558
- # This is a placeholder function that would normally call an LLM
559
- # The actual implementation is handled by the structured_llm_node decorator
560
724
  return OrderDetails(order_id="123", items_in_stock=["item1"], items_out_of_stock=[])
561
725
 
562
726
  @Nodes.define(output="payment_status")
@@ -579,41 +743,61 @@ async def example_workflow():
579
743
  async def notify_customer_out_of_stock(inventory_status: OrderDetails) -> str:
580
744
  return "Customer notified of out-of-stock"
581
745
 
746
+ @Nodes.transform_node(output="transformed_items", transformer=lambda x: [item.upper() for item in x])
747
+ async def transform_items(items: List[str]) -> List[str]:
748
+ return items
749
+
750
+ @Nodes.template_node(
751
+ output="formatted_message",
752
+ template="Order contains: {{ items | join(', ') }}",
753
+ )
754
+ async def format_order_message(rendered_content: str, items: List[str]) -> str:
755
+ return rendered_content
756
+
582
757
  # Sub-workflow for payment and shipping
583
758
  payment_shipping_sub_wf = Workflow("process_payment").sequence("process_payment", "arrange_shipping")
584
759
 
585
760
  # Instantiate token usage observer
586
761
  token_observer = TokenUsageObserver()
587
762
 
588
- # Main workflow incorporating the sub-workflow
763
+ # Main workflow with dynamic parameter overrides
589
764
  workflow = (
590
765
  Workflow("validate_order")
591
- .add_observer(progress_monitor) # Add progress observer
592
- .add_observer(token_observer) # Add token usage observer
766
+ .add_observer(progress_monitor)
767
+ .add_observer(token_observer)
768
+ .node("validate_order", inputs_mapping={"order": "customer_order"})
769
+ .node("transform_items")
770
+ .node("format_order_message", inputs_mapping={
771
+ "items": "items",
772
+ "template": "Custom order: {{ items | join(', ') }}" # Dynamic override
773
+ })
774
+ .node("check_inventory", inputs_mapping={
775
+ "model": lambda ctx: "gemini/gemini-2.0-flash",
776
+ "items": "transformed_items",
777
+ "temperature": 0.5, # Dynamic override
778
+ "max_tokens": 1000 # Dynamic override
779
+ })
593
780
  .add_sub_workflow(
594
- "payment_shipping", payment_shipping_sub_wf, inputs={"order": "order"}, output="shipping_confirmation"
595
- )
596
- .sequence("validate_order", "check_inventory")
597
- .then(
598
781
  "payment_shipping",
599
- condition=lambda ctx: len(ctx.get("inventory_status").items_out_of_stock) == 0 if ctx.get("inventory_status") else False,
600
- )
601
- .then(
602
- "notify_customer_out_of_stock",
603
- condition=lambda ctx: len(ctx.get("inventory_status").items_out_of_stock) > 0 if ctx.get("inventory_status") else True,
782
+ payment_shipping_sub_wf,
783
+ inputs={"order": lambda ctx: {"items": ctx["items"]}},
784
+ output="shipping_confirmation"
604
785
  )
605
- .parallel("update_order_status", "send_confirmation_email")
606
- .node("update_order_status")
607
- .node("send_confirmation_email")
608
- .node("notify_customer_out_of_stock")
786
+ .branch([
787
+ ("payment_shipping", lambda ctx: len(ctx.get("inventory_status").items_out_of_stock) == 0 if ctx.get("inventory_status") else False),
788
+ ("notify_customer_out_of_stock", lambda ctx: len(ctx.get("inventory_status").items_out_of_stock) > 0 if ctx.get("inventory_status") else True)
789
+ ])
790
+ .converge("update_order_status")
791
+ .sequence("update_order_status", "send_confirmation_email")
609
792
  )
610
793
 
611
794
  # Execute workflow
612
- initial_context = {"order": {"items": ["item1", "item2"]}, "items": ["item1", "item2"]}
795
+ initial_context = {"customer_order": {"items": ["item1", "item2"]}, "items": ["item1", "item2"]}
613
796
  engine = workflow.build()
614
797
  result = await engine.run(initial_context)
615
798
  logger.info(f"Workflow result: {result}")
616
799
 
617
800
 
618
801
  if __name__ == "__main__":
802
+ logger.info("Initializing Quantalogic Flow Package")
619
803
  asyncio.run(example_workflow())