quantalogic 0.80__py3-none-any.whl → 0.93__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. quantalogic/flow/__init__.py +16 -34
  2. quantalogic/main.py +11 -6
  3. quantalogic/tools/tool.py +8 -922
  4. quantalogic-0.93.dist-info/METADATA +475 -0
  5. {quantalogic-0.80.dist-info → quantalogic-0.93.dist-info}/RECORD +8 -54
  6. quantalogic/codeact/TODO.md +0 -14
  7. quantalogic/codeact/__init__.py +0 -0
  8. quantalogic/codeact/agent.py +0 -478
  9. quantalogic/codeact/cli.py +0 -50
  10. quantalogic/codeact/cli_commands/__init__.py +0 -0
  11. quantalogic/codeact/cli_commands/create_toolbox.py +0 -45
  12. quantalogic/codeact/cli_commands/install_toolbox.py +0 -20
  13. quantalogic/codeact/cli_commands/list_executor.py +0 -15
  14. quantalogic/codeact/cli_commands/list_reasoners.py +0 -15
  15. quantalogic/codeact/cli_commands/list_toolboxes.py +0 -47
  16. quantalogic/codeact/cli_commands/task.py +0 -215
  17. quantalogic/codeact/cli_commands/tool_info.py +0 -24
  18. quantalogic/codeact/cli_commands/uninstall_toolbox.py +0 -43
  19. quantalogic/codeact/config.yaml +0 -21
  20. quantalogic/codeact/constants.py +0 -9
  21. quantalogic/codeact/events.py +0 -85
  22. quantalogic/codeact/examples/README.md +0 -342
  23. quantalogic/codeact/examples/agent_sample.yaml +0 -29
  24. quantalogic/codeact/executor.py +0 -186
  25. quantalogic/codeact/history_manager.py +0 -94
  26. quantalogic/codeact/llm_util.py +0 -57
  27. quantalogic/codeact/plugin_manager.py +0 -92
  28. quantalogic/codeact/prompts/error_format.j2 +0 -11
  29. quantalogic/codeact/prompts/generate_action.j2 +0 -77
  30. quantalogic/codeact/prompts/generate_program.j2 +0 -52
  31. quantalogic/codeact/prompts/response_format.j2 +0 -11
  32. quantalogic/codeact/react_agent.py +0 -318
  33. quantalogic/codeact/reasoner.py +0 -185
  34. quantalogic/codeact/templates/toolbox/README.md.j2 +0 -10
  35. quantalogic/codeact/templates/toolbox/pyproject.toml.j2 +0 -16
  36. quantalogic/codeact/templates/toolbox/tools.py.j2 +0 -6
  37. quantalogic/codeact/templates.py +0 -7
  38. quantalogic/codeact/tools_manager.py +0 -258
  39. quantalogic/codeact/utils.py +0 -62
  40. quantalogic/codeact/xml_utils.py +0 -126
  41. quantalogic/flow/flow.py +0 -1070
  42. quantalogic/flow/flow_extractor.py +0 -783
  43. quantalogic/flow/flow_generator.py +0 -322
  44. quantalogic/flow/flow_manager.py +0 -676
  45. quantalogic/flow/flow_manager_schema.py +0 -287
  46. quantalogic/flow/flow_mermaid.py +0 -365
  47. quantalogic/flow/flow_validator.py +0 -479
  48. quantalogic/flow/flow_yaml.linkedin.md +0 -31
  49. quantalogic/flow/flow_yaml.md +0 -767
  50. quantalogic/flow/templates/prompt_check_inventory.j2 +0 -1
  51. quantalogic/flow/templates/system_check_inventory.j2 +0 -1
  52. quantalogic-0.80.dist-info/METADATA +0 -900
  53. {quantalogic-0.80.dist-info → quantalogic-0.93.dist-info}/LICENSE +0 -0
  54. {quantalogic-0.80.dist-info → quantalogic-0.93.dist-info}/WHEEL +0 -0
  55. {quantalogic-0.80.dist-info → quantalogic-0.93.dist-info}/entry_points.txt +0 -0
quantalogic/flow/flow.py DELETED
@@ -1,1070 +0,0 @@
1
- #!/usr/bin/env -S uv run
2
- # /// script
3
- # requires-python = ">=3.12"
4
- # dependencies = [
5
- # "loguru>=0.7.2", # Logging utility
6
- # "litellm>=1.0.0", # LLM integration
7
- # "pydantic>=2.0.0", # Data validation and settings
8
- # "anyio>=4.0.0", # Async utilities
9
- # "jinja2>=3.1.0", # Templating engine
10
- # "instructor" # Structured LLM output with litellm integration
11
- # ]
12
- # ///
13
-
14
- import asyncio
15
- import inspect
16
- import os
17
- from dataclasses import dataclass
18
- from enum import Enum
19
- from pathlib import Path
20
- from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
21
-
22
- import instructor
23
- from jinja2 import Environment, FileSystemLoader, Template, TemplateNotFound
24
- from litellm import acompletion
25
- from loguru import logger
26
- from pydantic import BaseModel, ValidationError
27
-
28
-
29
- # Define event types and structure for observer system
30
- class WorkflowEventType(Enum):
31
- NODE_STARTED = "node_started"
32
- NODE_COMPLETED = "node_completed"
33
- NODE_FAILED = "node_failed"
34
- TRANSITION_EVALUATED = "transition_evaluated"
35
- WORKFLOW_STARTED = "workflow_started"
36
- WORKFLOW_COMPLETED = "workflow_completed"
37
- SUB_WORKFLOW_ENTERED = "sub_workflow_entered"
38
- SUB_WORKFLOW_EXITED = "sub_workflow_exited"
39
-
40
-
41
- @dataclass
42
- class WorkflowEvent:
43
- event_type: WorkflowEventType
44
- node_name: Optional[str]
45
- context: Dict[str, Any]
46
- result: Optional[Any] = None
47
- exception: Optional[Exception] = None
48
- transition_from: Optional[str] = None
49
- transition_to: Optional[str] = None
50
- sub_workflow_name: Optional[str] = None
51
- usage: Optional[Dict[str, Any]] = None
52
-
53
-
54
- WorkflowObserver = Callable[[WorkflowEvent], None]
55
-
56
-
57
- class SubWorkflowNode:
58
- def __init__(self, sub_workflow: "Workflow", inputs: Dict[str, Any], output: str):
59
- """Initialize a sub-workflow node with flexible inputs mapping."""
60
- self.sub_workflow = sub_workflow
61
- self.inputs = inputs
62
- self.output = output
63
-
64
- async def __call__(self, engine: "WorkflowEngine"):
65
- """Execute the sub-workflow with the engine's context using inputs mapping."""
66
- sub_context = {}
67
- for sub_key, mapping in self.inputs.items():
68
- if callable(mapping):
69
- sub_context[sub_key] = mapping(engine.context)
70
- elif isinstance(mapping, str):
71
- sub_context[sub_key] = engine.context.get(mapping)
72
- else:
73
- sub_context[sub_key] = mapping
74
- sub_engine = self.sub_workflow.build(parent_engine=engine)
75
- result = await sub_engine.run(sub_context)
76
- return result.get(self.output)
77
-
78
-
79
- class WorkflowEngine:
80
- def __init__(self, workflow, parent_engine: Optional["WorkflowEngine"] = None):
81
- """Initialize the WorkflowEngine with a workflow and optional parent for sub-workflows."""
82
- self.workflow = workflow
83
- self.context: Dict[str, Any] = {}
84
- self.observers: List[WorkflowObserver] = []
85
- self.parent_engine = parent_engine
86
-
87
- def add_observer(self, observer: WorkflowObserver) -> None:
88
- """Register an event observer callback."""
89
- if observer not in self.observers:
90
- self.observers.append(observer)
91
- logger.debug(f"Added observer: {observer}")
92
- if self.parent_engine:
93
- self.parent_engine.add_observer(observer)
94
-
95
- def remove_observer(self, observer: WorkflowObserver) -> None:
96
- """Remove an event observer callback."""
97
- if observer in self.observers:
98
- self.observers.remove(observer)
99
- logger.debug(f"Removed observer: {observer}")
100
-
101
- async def _notify_observers(self, event: WorkflowEvent) -> None:
102
- """Asynchronously notify all observers of an event."""
103
- tasks = []
104
- for observer in self.observers:
105
- try:
106
- if asyncio.iscoroutinefunction(observer):
107
- tasks.append(observer(event))
108
- else:
109
- observer(event)
110
- except Exception as e:
111
- logger.error(f"Observer {observer} failed for {event.event_type.value}: {e}")
112
- if tasks:
113
- await asyncio.gather(*tasks)
114
-
115
- async def run(self, initial_context: Dict[str, Any]) -> Dict[str, Any]:
116
- """Execute the workflow starting from the entry node with event notifications."""
117
- self.context = initial_context.copy()
118
- await self._notify_observers(
119
- WorkflowEvent(event_type=WorkflowEventType.WORKFLOW_STARTED, node_name=None, context=self.context)
120
- )
121
-
122
- current_node = self.workflow.start_node
123
- while current_node:
124
- logger.info(f"Executing node: {current_node}")
125
- await self._notify_observers(
126
- WorkflowEvent(event_type=WorkflowEventType.NODE_STARTED, node_name=current_node, context=self.context)
127
- )
128
-
129
- node_func = self.workflow.nodes.get(current_node)
130
- if not node_func:
131
- logger.error(f"Node {current_node} not found")
132
- exc = ValueError(f"Node {current_node} not found")
133
- await self._notify_observers(
134
- WorkflowEvent(
135
- event_type=WorkflowEventType.NODE_FAILED,
136
- node_name=current_node,
137
- context=self.context,
138
- exception=exc,
139
- )
140
- )
141
- break
142
-
143
- input_mappings = self.workflow.node_input_mappings.get(current_node, {})
144
- inputs = {}
145
- for key, mapping in input_mappings.items():
146
- if callable(mapping):
147
- inputs[key] = mapping(self.context)
148
- elif isinstance(mapping, str):
149
- inputs[key] = self.context.get(mapping)
150
- else:
151
- inputs[key] = mapping
152
- for param in self.workflow.node_inputs[current_node]:
153
- if param not in inputs:
154
- inputs[param] = self.context.get(param)
155
-
156
- result = None
157
- exception = None
158
-
159
- if isinstance(node_func, SubWorkflowNode):
160
- await self._notify_observers(
161
- WorkflowEvent(
162
- event_type=WorkflowEventType.SUB_WORKFLOW_ENTERED,
163
- node_name=current_node,
164
- context=self.context,
165
- sub_workflow_name=current_node,
166
- )
167
- )
168
-
169
- try:
170
- if isinstance(node_func, SubWorkflowNode):
171
- result = await node_func(self)
172
- usage = None
173
- else:
174
- result = await node_func(**inputs)
175
- usage = getattr(node_func, "usage", None)
176
- output_key = self.workflow.node_outputs[current_node]
177
- if output_key:
178
- self.context[output_key] = result
179
- elif isinstance(result, dict):
180
- self.context.update(result)
181
- logger.debug(f"Updated context with {result} from node {current_node}")
182
- await self._notify_observers(
183
- WorkflowEvent(
184
- event_type=WorkflowEventType.NODE_COMPLETED,
185
- node_name=current_node,
186
- context=self.context,
187
- result=result,
188
- usage=usage,
189
- )
190
- )
191
- except Exception as e:
192
- logger.error(f"Error executing node {current_node}: {e}")
193
- exception = e
194
- await self._notify_observers(
195
- WorkflowEvent(
196
- event_type=WorkflowEventType.NODE_FAILED,
197
- node_name=current_node,
198
- context=self.context,
199
- exception=e,
200
- )
201
- )
202
- raise
203
- finally:
204
- if isinstance(node_func, SubWorkflowNode):
205
- await self._notify_observers(
206
- WorkflowEvent(
207
- event_type=WorkflowEventType.SUB_WORKFLOW_EXITED,
208
- node_name=current_node,
209
- context=self.context,
210
- sub_workflow_name=current_node,
211
- result=result,
212
- exception=exception,
213
- )
214
- )
215
-
216
- next_nodes = self.workflow.transitions.get(current_node, [])
217
- current_node = None
218
- for next_node, condition in next_nodes:
219
- await self._notify_observers(
220
- WorkflowEvent(
221
- event_type=WorkflowEventType.TRANSITION_EVALUATED,
222
- node_name=None,
223
- context=self.context,
224
- transition_from=current_node,
225
- transition_to=next_node,
226
- )
227
- )
228
- if condition is None or condition(self.context):
229
- current_node = next_node
230
- break
231
-
232
- logger.info("Workflow execution completed")
233
- await self._notify_observers(
234
- WorkflowEvent(event_type=WorkflowEventType.WORKFLOW_COMPLETED, node_name=None, context=self.context)
235
- )
236
- return self.context
237
-
238
-
239
- class Workflow:
240
- def __init__(self, start_node: str):
241
- """Initialize a workflow with a starting node.
242
-
243
- Args:
244
- start_node: The name of the initial node in the workflow.
245
- """
246
- self.start_node = start_node
247
- self.nodes: Dict[str, Callable] = {}
248
- self.node_inputs: Dict[str, List[str]] = {}
249
- self.node_outputs: Dict[str, Optional[str]] = {}
250
- self.transitions: Dict[str, List[Tuple[str, Optional[Callable]]]] = {}
251
- self.node_input_mappings: Dict[str, Dict[str, Any]] = {}
252
- self.current_node = None
253
- self._observers: List[WorkflowObserver] = []
254
- self._register_node(start_node)
255
- self.current_node = start_node
256
- # Loop-specific attributes
257
- self.in_loop = False
258
- self.loop_nodes = []
259
- self.loop_entry_node = None
260
-
261
- def _register_node(self, name: str):
262
- """Register a node without modifying the current node."""
263
- if name not in Nodes.NODE_REGISTRY:
264
- raise ValueError(f"Node {name} not registered")
265
- func, inputs, output = Nodes.NODE_REGISTRY[name]
266
- self.nodes[name] = func
267
- self.node_inputs[name] = inputs
268
- self.node_outputs[name] = output
269
-
270
- def node(self, name: str, inputs_mapping: Optional[Dict[str, Any]] = None):
271
- """Add a node to the workflow chain with an optional inputs mapping.
272
-
273
- Args:
274
- name: The name of the node to add.
275
- inputs_mapping: Optional dictionary mapping node inputs to context keys or callables.
276
-
277
- Returns:
278
- Self for method chaining.
279
- """
280
- self._register_node(name)
281
- if self.in_loop:
282
- self.loop_nodes.append(name)
283
- if inputs_mapping:
284
- self.node_input_mappings[name] = inputs_mapping
285
- logger.debug(f"Added inputs mapping for node {name}: {inputs_mapping}")
286
- self.current_node = name
287
- return self
288
-
289
- def sequence(self, *nodes: str):
290
- """Add a sequence of nodes to execute in order.
291
-
292
- Args:
293
- *nodes: Variable number of node names to execute sequentially.
294
-
295
- Returns:
296
- Self for method chaining.
297
- """
298
- if not nodes:
299
- return self
300
- for node in nodes:
301
- if node not in Nodes.NODE_REGISTRY:
302
- raise ValueError(f"Node {node} not registered")
303
- func, inputs, output = Nodes.NODE_REGISTRY[node]
304
- self.nodes[node] = func
305
- self.node_inputs[node] = inputs
306
- self.node_outputs[node] = output
307
- for i in range(len(nodes) - 1):
308
- self.transitions.setdefault(nodes[i], []).append((nodes[i + 1], None))
309
- self.current_node = nodes[-1]
310
- return self
311
-
312
- def then(self, next_node: str, condition: Optional[Callable] = None):
313
- """Add a transition to the next node with an optional condition.
314
-
315
- Args:
316
- next_node: Name of the node to transition to.
317
- condition: Optional callable taking context and returning a boolean.
318
-
319
- Returns:
320
- Self for method chaining.
321
- """
322
- if next_node not in self.nodes:
323
- self._register_node(next_node)
324
- if self.current_node:
325
- self.transitions.setdefault(self.current_node, []).append((next_node, condition))
326
- logger.debug(f"Added transition from {self.current_node} to {next_node} with condition {condition}")
327
- else:
328
- logger.warning("No current node set for transition")
329
- self.current_node = next_node
330
- return self
331
-
332
- def branch(
333
- self,
334
- branches: List[Tuple[str, Optional[Callable]]],
335
- default: Optional[str] = None,
336
- next_node: Optional[str] = None,
337
- ) -> "Workflow":
338
- """Add multiple conditional branches from the current node with an optional default and next node.
339
-
340
- Args:
341
- branches: List of tuples (next_node, condition), where condition takes context and returns a boolean.
342
- default: Optional node to transition to if no branch conditions are met.
343
- next_node: Optional node to set as current_node after branching (e.g., for convergence).
344
-
345
- Returns:
346
- Self for method chaining.
347
- """
348
- if not self.current_node:
349
- logger.warning("No current node set for branching")
350
- return self
351
- for next_node_name, condition in branches:
352
- if next_node_name not in self.nodes:
353
- self._register_node(next_node_name)
354
- self.transitions.setdefault(self.current_node, []).append((next_node_name, condition))
355
- logger.debug(f"Added branch from {self.current_node} to {next_node_name} with condition {condition}")
356
- if default:
357
- if default not in self.nodes:
358
- self._register_node(default)
359
- self.transitions.setdefault(self.current_node, []).append((default, None))
360
- logger.debug(f"Added default transition from {self.current_node} to {default}")
361
- self.current_node = next_node # Explicitly set next_node if provided
362
- return self
363
-
364
- def converge(self, convergence_node: str) -> "Workflow":
365
- """Set a convergence point for all previous branches.
366
-
367
- Args:
368
- convergence_node: Name of the node where branches converge.
369
-
370
- Returns:
371
- Self for method chaining.
372
- """
373
- if convergence_node not in self.nodes:
374
- self._register_node(convergence_node)
375
- for node in self.nodes:
376
- if (node not in self.transitions or not self.transitions[node]) and node != convergence_node:
377
- self.transitions.setdefault(node, []).append((convergence_node, None))
378
- logger.debug(f"Added convergence from {node} to {convergence_node}")
379
- self.current_node = convergence_node
380
- return self
381
-
382
- def parallel(self, *nodes: str):
383
- """Add parallel nodes to execute concurrently.
384
-
385
- Args:
386
- *nodes: Variable number of node names to execute in parallel.
387
-
388
- Returns:
389
- Self for method chaining.
390
- """
391
- if self.current_node:
392
- for node in nodes:
393
- self.transitions.setdefault(self.current_node, []).append((node, None))
394
- self.current_node = None
395
- return self
396
-
397
- def add_observer(self, observer: WorkflowObserver) -> "Workflow":
398
- """Add an event observer callback to the workflow.
399
-
400
- Args:
401
- observer: Callable to handle workflow events.
402
-
403
- Returns:
404
- Self for method chaining.
405
- """
406
- if observer not in self._observers:
407
- self._observers.append(observer)
408
- logger.debug(f"Added observer to workflow: {observer}")
409
- return self
410
-
411
- def add_sub_workflow(self, name: str, sub_workflow: "Workflow", inputs: Dict[str, Any], output: str):
412
- """Add a sub-workflow as a node with flexible inputs mapping.
413
-
414
- Args:
415
- name: Name of the sub-workflow node.
416
- sub_workflow: The Workflow instance to embed.
417
- inputs: Dictionary mapping sub-workflow inputs to context keys or callables.
418
- output: Context key for the sub-workflow's result.
419
-
420
- Returns:
421
- Self for method chaining.
422
- """
423
- sub_node = SubWorkflowNode(sub_workflow, inputs, output)
424
- self.nodes[name] = sub_node
425
- self.node_inputs[name] = []
426
- self.node_outputs[name] = output
427
- self.current_node = name
428
- logger.debug(f"Added sub-workflow {name} with inputs {inputs} and output {output}")
429
- return self
430
-
431
- def start_loop(self):
432
- """Begin defining a loop in the workflow.
433
-
434
- Raises:
435
- ValueError: If called without a current node.
436
-
437
- Returns:
438
- Self for method chaining.
439
- """
440
- if self.current_node is None:
441
- raise ValueError("Cannot start loop without a current node")
442
- self.loop_entry_node = self.current_node
443
- self.in_loop = True
444
- self.loop_nodes = []
445
- return self
446
-
447
- def end_loop(self, condition: Callable[[Dict[str, Any]], bool], next_node: str):
448
- """End the loop, setting up transitions based on the condition.
449
-
450
- Args:
451
- condition: Callable taking context and returning True when the loop should exit.
452
- next_node: Name of the node to transition to after the loop exits.
453
-
454
- Raises:
455
- ValueError: If no loop nodes are defined.
456
-
457
- Returns:
458
- Self for method chaining.
459
- """
460
- if not self.in_loop or not self.loop_nodes:
461
- raise ValueError("No loop nodes defined")
462
-
463
- first_node = self.loop_nodes[0]
464
- last_node = self.loop_nodes[-1]
465
-
466
- # Transition from the node before the loop to the first loop node
467
- self.transitions.setdefault(self.loop_entry_node, []).append((first_node, None))
468
-
469
- # Transitions within the loop
470
- for i in range(len(self.loop_nodes) - 1):
471
- self.transitions.setdefault(self.loop_nodes[i], []).append((self.loop_nodes[i + 1], None))
472
-
473
- # Conditional transitions from the last loop node
474
- # If condition is False, loop back to the first node
475
- self.transitions.setdefault(last_node, []).append((first_node, lambda ctx: not condition(ctx)))
476
- # If condition is True, exit to the next node
477
- self.transitions.setdefault(last_node, []).append((next_node, condition))
478
-
479
- # Register the next_node if not already present
480
- if next_node not in self.nodes:
481
- self._register_node(next_node)
482
-
483
- # Update state
484
- self.current_node = next_node
485
- self.in_loop = False
486
- self.loop_nodes = []
487
- self.loop_entry_node = None
488
-
489
- return self
490
-
491
- def build(self, parent_engine: Optional["WorkflowEngine"] = None) -> WorkflowEngine:
492
- """Build and return a WorkflowEngine instance with registered observers.
493
-
494
- Args:
495
- parent_engine: Optional parent WorkflowEngine for sub-workflows.
496
-
497
- Returns:
498
- Configured WorkflowEngine instance.
499
- """
500
- engine = WorkflowEngine(self, parent_engine=parent_engine)
501
- for observer in self._observers:
502
- engine.add_observer(observer)
503
- return engine
504
-
505
-
506
- class Nodes:
507
- NODE_REGISTRY: Dict[str, Tuple[Callable, List[str], Optional[str]]] = {}
508
-
509
- @classmethod
510
- def define(cls, output: Optional[str] = None):
511
- """Decorator for defining simple workflow nodes.
512
-
513
- Args:
514
- output: Optional context key for the node's result.
515
-
516
- Returns:
517
- Decorator function wrapping the node logic.
518
- """
519
- def decorator(func: Callable) -> Callable:
520
- async def wrapped_func(**kwargs):
521
- try:
522
- if asyncio.iscoroutinefunction(func):
523
- result = await func(**kwargs)
524
- else:
525
- result = func(**kwargs)
526
- logger.debug(f"Node {func.__name__} executed with result: {result}")
527
- return result
528
- except Exception as e:
529
- logger.error(f"Error in node {func.__name__}: {e}")
530
- raise
531
- sig = inspect.signature(func)
532
- inputs = [param.name for param in sig.parameters.values()]
533
- logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
534
- cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
535
- return wrapped_func
536
- return decorator
537
-
538
- @classmethod
539
- def validate_node(cls, output: str):
540
- """Decorator for nodes that validate inputs and return a string.
541
-
542
- Args:
543
- output: Context key for the validation result.
544
-
545
- Returns:
546
- Decorator function wrapping the validation logic.
547
- """
548
- def decorator(func: Callable) -> Callable:
549
- async def wrapped_func(**kwargs):
550
- try:
551
- if asyncio.iscoroutinefunction(func):
552
- result = await func(**kwargs)
553
- else:
554
- result = func(**kwargs)
555
- if not isinstance(result, str):
556
- raise ValueError(f"Validation node {func.__name__} must return a string")
557
- logger.info(f"Validation result from {func.__name__}: {result}")
558
- return result
559
- except Exception as e:
560
- logger.error(f"Validation error in {func.__name__}: {e}")
561
- raise
562
- sig = inspect.signature(func)
563
- inputs = [param.name for param in sig.parameters.values()]
564
- logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
565
- cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
566
- return wrapped_func
567
- return decorator
568
-
569
- @classmethod
570
- def transform_node(cls, output: str, transformer: Callable[[Any], Any]):
571
- """Decorator for nodes that transform their inputs.
572
-
573
- Args:
574
- output: Context key for the transformed result.
575
- transformer: Callable to transform the input.
576
-
577
- Returns:
578
- Decorator function wrapping the transformation logic.
579
- """
580
- def decorator(func: Callable) -> Callable:
581
- async def wrapped_func(**kwargs):
582
- try:
583
- input_key = list(kwargs.keys())[0] if kwargs else None
584
- if input_key:
585
- transformed_input = transformer(kwargs[input_key])
586
- kwargs[input_key] = transformed_input
587
- if asyncio.iscoroutinefunction(func):
588
- result = await func(**kwargs)
589
- else:
590
- result = func(**kwargs)
591
- logger.debug(f"Transformed node {func.__name__} executed with result: {result}")
592
- return result
593
- except Exception as e:
594
- logger.error(f"Error in transform node {func.__name__}: {e}")
595
- raise
596
- sig = inspect.signature(func)
597
- inputs = [param.name for param in sig.parameters.values()]
598
- logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
599
- cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
600
- return wrapped_func
601
- return decorator
602
-
603
- @staticmethod
604
- def _load_prompt_from_file(prompt_file: str, context: Dict[str, Any]) -> str:
605
- """Load and render a Jinja2 template from an external file."""
606
- try:
607
- file_path = Path(prompt_file).resolve()
608
- directory = file_path.parent
609
- filename = file_path.name
610
- env = Environment(loader=FileSystemLoader(directory))
611
- template = env.get_template(filename)
612
- return template.render(**context)
613
- except TemplateNotFound as e:
614
- logger.error(f"Jinja2 template file '{prompt_file}' not found: {e}")
615
- raise ValueError(f"Prompt file '{prompt_file}' not found")
616
- except Exception as e:
617
- logger.error(f"Error loading or rendering prompt file '{prompt_file}': {e}")
618
- raise
619
-
620
- @staticmethod
621
- def _render_template(template: str, template_file: Optional[str], context: Dict[str, Any]) -> str:
622
- """Render a Jinja2 template from either a string or an external file."""
623
- if template_file:
624
- return Nodes._load_prompt_from_file(template_file, context)
625
- try:
626
- return Template(template).render(**context)
627
- except Exception as e:
628
- logger.error(f"Error rendering template: {e}")
629
- raise
630
-
631
- @classmethod
632
- def llm_node(
633
- cls,
634
- system_prompt: str = "",
635
- system_prompt_file: Optional[str] = None,
636
- output: str = "",
637
- prompt_template: str = "",
638
- prompt_file: Optional[str] = None,
639
- temperature: float = 0.7,
640
- max_tokens: int = 2000,
641
- top_p: float = 1.0,
642
- presence_penalty: float = 0.0,
643
- frequency_penalty: float = 0.0,
644
- model: Union[Callable[[Dict[str, Any]], str], str] = lambda ctx: "gpt-3.5-turbo",
645
- **kwargs,
646
- ):
647
- """Decorator for creating LLM nodes with plain text output, supporting dynamic parameters.
648
-
649
- Args:
650
- system_prompt: Inline system prompt defining LLM behavior.
651
- system_prompt_file: Path to a system prompt template file (overrides system_prompt).
652
- output: Context key for the LLM's result.
653
- prompt_template: Inline Jinja2 template for the user prompt.
654
- prompt_file: Path to a user prompt template file (overrides prompt_template).
655
- temperature: Randomness control (0.0 to 1.0).
656
- max_tokens: Maximum response length.
657
- top_p: Nucleus sampling parameter (0.0 to 1.0).
658
- presence_penalty: Penalty for repetition (-2.0 to 2.0).
659
- frequency_penalty: Penalty for frequent words (-2.0 to 2.0).
660
- model: Callable or string to determine the LLM model dynamically from context.
661
- **kwargs: Additional parameters for the LLM call.
662
-
663
- Returns:
664
- Decorator function wrapping the LLM logic.
665
- """
666
- def decorator(func: Callable) -> Callable:
667
- # Store all decorator parameters in a config dictionary
668
- config = {
669
- "system_prompt": system_prompt,
670
- "system_prompt_file": system_prompt_file,
671
- "prompt_template": prompt_template,
672
- "prompt_file": prompt_file,
673
- "temperature": temperature,
674
- "max_tokens": max_tokens,
675
- "top_p": top_p,
676
- "presence_penalty": presence_penalty,
677
- "frequency_penalty": frequency_penalty,
678
- "model": model,
679
- **kwargs,
680
- }
681
-
682
- async def wrapped_func(**func_kwargs):
683
- # Use func_kwargs to override config values if provided, otherwise use config defaults
684
- system_prompt_to_use = func_kwargs.pop("system_prompt", config["system_prompt"])
685
- system_prompt_file_to_use = func_kwargs.pop("system_prompt_file", config["system_prompt_file"])
686
- prompt_template_to_use = func_kwargs.pop("prompt_template", config["prompt_template"])
687
- prompt_file_to_use = func_kwargs.pop("prompt_file", config["prompt_file"])
688
- temperature_to_use = func_kwargs.pop("temperature", config["temperature"])
689
- max_tokens_to_use = func_kwargs.pop("max_tokens", config["max_tokens"])
690
- top_p_to_use = func_kwargs.pop("top_p", config["top_p"])
691
- presence_penalty_to_use = func_kwargs.pop("presence_penalty", config["presence_penalty"])
692
- frequency_penalty_to_use = func_kwargs.pop("frequency_penalty", config["frequency_penalty"])
693
- model_to_use = func_kwargs.pop("model", config["model"])
694
-
695
- # Handle callable model parameter
696
- if callable(model_to_use):
697
- model_to_use = model_to_use(func_kwargs)
698
-
699
- # Load system prompt from file if specified
700
- if system_prompt_file_to_use:
701
- system_content = cls._load_prompt_from_file(system_prompt_file_to_use, func_kwargs)
702
- else:
703
- system_content = system_prompt_to_use
704
-
705
- # Prepare template variables and render prompt
706
- sig = inspect.signature(func)
707
- template_vars = {k: v for k, v in func_kwargs.items() if k in sig.parameters}
708
- prompt = cls._render_template(prompt_template_to_use, prompt_file_to_use, template_vars)
709
- messages = [
710
- {"role": "system", "content": system_content},
711
- {"role": "user", "content": prompt},
712
- ]
713
-
714
- # Logging for debugging
715
- truncated_prompt = prompt[:200] + "..." if len(prompt) > 200 else prompt
716
- logger.info(f"LLM node {func.__name__} using model: {model_to_use}")
717
- logger.debug(f"System prompt: {system_content[:100]}...")
718
- logger.debug(f"User prompt preview: {truncated_prompt}")
719
-
720
- # Call the acompletion function with the resolved model
721
- try:
722
- response = await acompletion(
723
- model=model_to_use,
724
- messages=messages,
725
- temperature=temperature_to_use,
726
- max_tokens=max_tokens_to_use,
727
- top_p=top_p_to_use,
728
- presence_penalty=presence_penalty_to_use,
729
- frequency_penalty=frequency_penalty_to_use,
730
- drop_params=True,
731
- **kwargs,
732
- )
733
- content = response.choices[0].message.content.strip()
734
- wrapped_func.usage = {
735
- "prompt_tokens": response.usage.prompt_tokens,
736
- "completion_tokens": response.usage.completion_tokens,
737
- "total_tokens": response.usage.total_tokens,
738
- "cost": getattr(response, "cost", None),
739
- }
740
- logger.debug(f"LLM output from {func.__name__}: {content[:50]}...")
741
- return content
742
- except Exception as e:
743
- logger.error(f"Error in LLM node {func.__name__}: {e}")
744
- raise
745
-
746
- # Register the node with its inputs and output
747
- sig = inspect.signature(func)
748
- inputs = [param.name for param in sig.parameters.values()]
749
- logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
750
- cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
751
- return wrapped_func
752
- return decorator
753
-
754
- @classmethod
755
- def structured_llm_node(
756
- cls,
757
- system_prompt: str = "",
758
- system_prompt_file: Optional[str] = None,
759
- output: str = "",
760
- response_model: Type[BaseModel] = None,
761
- prompt_template: str = "",
762
- prompt_file: Optional[str] = None,
763
- temperature: float = 0.7,
764
- max_tokens: int = 2000,
765
- top_p: float = 1.0,
766
- presence_penalty: float = 0.0,
767
- frequency_penalty: float = 0.0,
768
- model: Union[Callable[[Dict[str, Any]], str], str] = lambda ctx: "gpt-3.5-turbo",
769
- **kwargs,
770
- ):
771
- """Decorator for creating LLM nodes with structured output, supporting dynamic parameters.
772
-
773
- Args:
774
- system_prompt: Inline system prompt defining LLM behavior.
775
- system_prompt_file: Path to a system prompt template file (overrides system_prompt).
776
- output: Context key for the LLM's structured result.
777
- response_model: Pydantic model class for structured output.
778
- prompt_template: Inline Jinja2 template for the user prompt.
779
- prompt_file: Path to a user prompt template file (overrides prompt_template).
780
- temperature: Randomness control (0.0 to 1.0).
781
- max_tokens: Maximum response length.
782
- top_p: Nucleus sampling parameter (0.0 to 1.0).
783
- presence_penalty: Penalty for repetition (-2.0 to 2.0).
784
- frequency_penalty: Penalty for frequent words (-2.0 to 2.0).
785
- model: Callable or string to determine the LLM model dynamically from context.
786
- **kwargs: Additional parameters for the LLM call.
787
-
788
- Returns:
789
- Decorator function wrapping the structured LLM logic.
790
- """
791
- try:
792
- client = instructor.from_litellm(acompletion)
793
- except ImportError:
794
- logger.error("Instructor not installed. Install with 'pip install instructor[litellm]'")
795
- raise ImportError("Instructor is required for structured_llm_node")
796
-
797
- def decorator(func: Callable) -> Callable:
798
- # Store all decorator parameters in a config dictionary
799
- config = {
800
- "system_prompt": system_prompt,
801
- "system_prompt_file": system_prompt_file,
802
- "prompt_template": prompt_template,
803
- "prompt_file": prompt_file,
804
- "temperature": temperature,
805
- "max_tokens": max_tokens,
806
- "top_p": top_p,
807
- "presence_penalty": presence_penalty,
808
- "frequency_penalty": frequency_penalty,
809
- "model": model,
810
- **kwargs,
811
- }
812
-
813
- async def wrapped_func(**func_kwargs):
814
- # Resolve parameters, prioritizing func_kwargs over config defaults
815
- system_prompt_to_use = func_kwargs.pop("system_prompt", config["system_prompt"])
816
- system_prompt_file_to_use = func_kwargs.pop("system_prompt_file", config["system_prompt_file"])
817
- prompt_template_to_use = func_kwargs.pop("prompt_template", config["prompt_template"])
818
- prompt_file_to_use = func_kwargs.pop("prompt_file", config["prompt_file"])
819
- temperature_to_use = func_kwargs.pop("temperature", config["temperature"])
820
- max_tokens_to_use = func_kwargs.pop("max_tokens", config["max_tokens"])
821
- top_p_to_use = func_kwargs.pop("top_p", config["top_p"])
822
- presence_penalty_to_use = func_kwargs.pop("presence_penalty", config["presence_penalty"])
823
- frequency_penalty_to_use = func_kwargs.pop("frequency_penalty", config["frequency_penalty"])
824
- model_to_use = func_kwargs.pop("model", config["model"])
825
-
826
- # Handle callable model parameter
827
- if callable(model_to_use):
828
- model_to_use = model_to_use(func_kwargs)
829
-
830
- # Load system prompt from file if specified
831
- if system_prompt_file_to_use:
832
- system_content = cls._load_prompt_from_file(system_prompt_file_to_use, func_kwargs)
833
- else:
834
- system_content = system_prompt_to_use
835
-
836
- # Render prompt using template variables
837
- sig = inspect.signature(func)
838
- template_vars = {k: v for k, v in func_kwargs.items() if k in sig.parameters}
839
- prompt = cls._render_template(prompt_template_to_use, prompt_file_to_use, template_vars)
840
- messages = [
841
- {"role": "system", "content": system_content},
842
- {"role": "user", "content": prompt},
843
- ]
844
-
845
- # Logging for debugging
846
- truncated_prompt = prompt[:200] + "..." if len(prompt) > 200 else prompt
847
- logger.info(f"Structured LLM node {func.__name__} using model: {model_to_use}")
848
- logger.debug(f"System prompt: {system_content[:100]}...")
849
- logger.debug(f"User prompt preview: {truncated_prompt}")
850
- logger.debug(f"Expected response model: {response_model.__name__}")
851
-
852
- # Generate structured response
853
- try:
854
- structured_response, raw_response = await client.chat.completions.create_with_completion(
855
- model=model_to_use,
856
- messages=messages,
857
- response_model=response_model,
858
- temperature=temperature_to_use,
859
- max_tokens=max_tokens_to_use,
860
- top_p=top_p_to_use,
861
- presence_penalty=presence_penalty_to_use,
862
- frequency_penalty=frequency_penalty_to_use,
863
- drop_params=True,
864
- **kwargs,
865
- )
866
- wrapped_func.usage = {
867
- "prompt_tokens": raw_response.usage.prompt_tokens,
868
- "completion_tokens": raw_response.usage.completion_tokens,
869
- "total_tokens": raw_response.usage.total_tokens,
870
- "cost": getattr(raw_response, "cost", None),
871
- }
872
- logger.debug(f"Structured output from {func.__name__}: {structured_response}")
873
- return structured_response
874
- except ValidationError as e:
875
- logger.error(f"Validation error in {func.__name__}: {e}")
876
- raise
877
- except Exception as e:
878
- logger.error(f"Error in structured LLM node {func.__name__}: {e}")
879
- raise
880
-
881
- # Register the node
882
- sig = inspect.signature(func)
883
- inputs = [param.name for param in sig.parameters.values()]
884
- logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
885
- cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
886
- return wrapped_func
887
- return decorator
888
-
889
- @classmethod
890
- def template_node(
891
- cls,
892
- output: str,
893
- template: str = "",
894
- template_file: Optional[str] = None,
895
- ):
896
- """Decorator for creating nodes that apply a Jinja2 template to inputs.
897
-
898
- Args:
899
- output: Context key for the rendered result.
900
- template: Inline Jinja2 template string.
901
- template_file: Path to a template file (overrides template).
902
-
903
- Returns:
904
- Decorator function wrapping the template logic.
905
- """
906
- def decorator(func: Callable) -> Callable:
907
- async def wrapped_func(**func_kwargs):
908
- template_to_use = func_kwargs.pop("template", template)
909
- template_file_to_use = func_kwargs.pop("template_file", template_file)
910
-
911
- sig = inspect.signature(func)
912
- expected_params = [p.name for p in sig.parameters.values() if p.name != 'rendered_content']
913
- template_vars = {k: v for k, v in func_kwargs.items() if k in expected_params}
914
- rendered_content = cls._render_template(template_to_use, template_file_to_use, template_vars)
915
-
916
- filtered_kwargs = {k: v for k, v in func_kwargs.items() if k in expected_params}
917
-
918
- try:
919
- if asyncio.iscoroutinefunction(func):
920
- result = await func(rendered_content=rendered_content, **filtered_kwargs)
921
- else:
922
- result = func(rendered_content=rendered_content, **filtered_kwargs)
923
- logger.debug(f"Template node {func.__name__} rendered: {rendered_content[:50]}...")
924
- return result
925
- except Exception as e:
926
- logger.error(f"Error in template node {func.__name__}: {e}")
927
- raise
928
- sig = inspect.signature(func)
929
- inputs = [param.name for param in sig.parameters.values()]
930
- if 'rendered_content' not in inputs:
931
- inputs.insert(0, 'rendered_content')
932
- logger.debug(f"Registering node {func.__name__} with inputs {inputs} and output {output}")
933
- cls.NODE_REGISTRY[func.__name__] = (wrapped_func, inputs, output)
934
- return wrapped_func
935
- return decorator
936
-
937
-
938
- # Add a templates directory path at the module level
939
- TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
940
-
941
- # Helper function to get template paths
942
- def get_template_path(template_name):
943
- return os.path.join(TEMPLATES_DIR, template_name)
944
-
945
-
946
- async def example_workflow():
947
- class OrderDetails(BaseModel):
948
- order_id: str
949
- items_in_stock: List[str]
950
- items_out_of_stock: List[str]
951
-
952
- async def progress_monitor(event: WorkflowEvent):
953
- print(f"[{event.event_type.value}] {event.node_name or 'Workflow'}")
954
- if event.result is not None:
955
- print(f"Result: {event.result}")
956
- if event.exception is not None:
957
- print(f"Exception: {event.exception}")
958
-
959
- class TokenUsageObserver:
960
- def __init__(self):
961
- self.total_prompt_tokens = 0
962
- self.total_completion_tokens = 0
963
- self.total_cost = 0.0
964
- self.node_usages = {}
965
-
966
- def __call__(self, event: WorkflowEvent):
967
- if event.event_type == WorkflowEventType.NODE_COMPLETED and event.usage:
968
- usage = event.usage
969
- self.total_prompt_tokens += usage.get("prompt_tokens", 0)
970
- self.total_completion_tokens += usage.get("completion_tokens", 0)
971
- if usage.get("cost") is not None:
972
- self.total_cost += usage["cost"]
973
- self.node_usages[event.node_name] = usage
974
- if event.event_type == WorkflowEventType.WORKFLOW_COMPLETED:
975
- print(f"Total prompt tokens: {self.total_prompt_tokens}")
976
- print(f"Total completion tokens: {self.total_completion_tokens}")
977
- print(f"Total cost: {self.total_cost}")
978
- for node, usage in self.node_usages.items():
979
- print(f"Node {node}: {usage}")
980
-
981
- @Nodes.validate_node(output="validation_result")
982
- async def validate_order(order: Dict[str, Any]) -> str:
983
- return "Order validated" if order.get("items") else "Invalid order"
984
-
985
- @Nodes.structured_llm_node(
986
- system_prompt_file=get_template_path("system_check_inventory.j2"),
987
- output="inventory_status",
988
- response_model=OrderDetails,
989
- prompt_file=get_template_path("prompt_check_inventory.j2"),
990
- )
991
- async def check_inventory(items: List[str]) -> OrderDetails:
992
- return OrderDetails(order_id="123", items_in_stock=["item1"], items_out_of_stock=[])
993
-
994
- @Nodes.define(output="payment_status")
995
- async def process_payment(order: Dict[str, Any]) -> str:
996
- return "Payment processed"
997
-
998
- @Nodes.define(output="shipping_confirmation")
999
- async def arrange_shipping(order: Dict[str, Any]) -> str:
1000
- return "Shipping arranged"
1001
-
1002
- @Nodes.define(output="order_status")
1003
- async def update_order_status(shipping_confirmation: str) -> str:
1004
- return "Order updated"
1005
-
1006
- @Nodes.define(output="email_status")
1007
- async def send_confirmation_email(shipping_confirmation: str) -> str:
1008
- return "Email sent"
1009
-
1010
- @Nodes.define(output="notification_status")
1011
- async def notify_customer_out_of_stock(inventory_status: OrderDetails) -> str:
1012
- return "Customer notified of out-of-stock"
1013
-
1014
- @Nodes.transform_node(output="transformed_items", transformer=lambda x: [item.upper() for item in x])
1015
- async def transform_items(items: List[str]) -> List[str]:
1016
- return items
1017
-
1018
- @Nodes.template_node(
1019
- output="formatted_message",
1020
- template="Order contains: {{ items | join(', ') }}",
1021
- )
1022
- async def format_order_message(rendered_content: str, items: List[str]) -> str:
1023
- return rendered_content
1024
-
1025
- payment_shipping_sub_wf = Workflow("process_payment").sequence("process_payment", "arrange_shipping")
1026
-
1027
- token_observer = TokenUsageObserver()
1028
-
1029
- workflow = (
1030
- Workflow("validate_order")
1031
- .add_observer(progress_monitor)
1032
- .add_observer(token_observer)
1033
- .node("validate_order", inputs_mapping={"order": "customer_order"})
1034
- .node("transform_items")
1035
- .node("format_order_message", inputs_mapping={
1036
- "items": "items",
1037
- "template": "Custom order: {{ items | join(', ') }}"
1038
- })
1039
- .node("check_inventory", inputs_mapping={
1040
- "model": lambda ctx: "gemini/gemini-2.0-flash",
1041
- "items": "transformed_items",
1042
- "temperature": 0.5,
1043
- "max_tokens": 1000
1044
- })
1045
- .add_sub_workflow(
1046
- "payment_shipping",
1047
- payment_shipping_sub_wf,
1048
- inputs={"order": lambda ctx: {"items": ctx["items"]}},
1049
- output="shipping_confirmation"
1050
- )
1051
- .branch(
1052
- [
1053
- ("payment_shipping", lambda ctx: len(ctx.get("inventory_status").items_out_of_stock) == 0 if ctx.get("inventory_status") else False),
1054
- ("notify_customer_out_of_stock", lambda ctx: len(ctx.get("inventory_status").items_out_of_stock) > 0 if ctx.get("inventory_status") else True)
1055
- ],
1056
- next_node="update_order_status"
1057
- )
1058
- .converge("update_order_status")
1059
- .sequence("update_order_status", "send_confirmation_email")
1060
- )
1061
-
1062
- initial_context = {"customer_order": {"items": ["item1", "item2"]}, "items": ["item1", "item2"]}
1063
- engine = workflow.build()
1064
- result = await engine.run(initial_context)
1065
- logger.info(f"Workflow result: {result}")
1066
-
1067
-
1068
- if __name__ == "__main__":
1069
- logger.info("Initializing Quantalogic Flow Package")
1070
- asyncio.run(example_workflow())