kailash 0.4.2__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kailash/nodes/base.py CHANGED
@@ -21,8 +21,12 @@ Key Components:
21
21
  import inspect
22
22
  import json
23
23
  import logging
24
+ import os
25
+ import threading
24
26
  from abc import ABC, abstractmethod
27
+ from collections import OrderedDict
25
28
  from datetime import UTC, datetime
29
+ from functools import lru_cache
26
30
  from typing import Any
27
31
 
28
32
  from pydantic import BaseModel, Field, ValidationError
@@ -158,6 +162,10 @@ class Node(ABC):
158
162
  - WorkflowExporter: Serializes nodes for export
159
163
  """
160
164
 
165
+ # Class-level configuration
166
+ _DEFAULT_CACHE_SIZE = 128
167
+ _SPECIAL_PARAMS = {"context"} # Parameters excluded from cache key
168
+
161
169
  def __init__(self, **kwargs):
162
170
  """Initialize the node with configuration parameters.
163
171
 
@@ -219,6 +227,25 @@ class Node(ABC):
219
227
  }
220
228
  self.config = {k: v for k, v in kwargs.items() if k not in internal_fields}
221
229
 
230
+ # Parameter resolution cache - initialize before validation
231
+ cache_size = int(
232
+ os.environ.get("KAILASH_PARAM_CACHE_SIZE", self._DEFAULT_CACHE_SIZE)
233
+ )
234
+ self._cache_enabled = (
235
+ os.environ.get("KAILASH_DISABLE_PARAM_CACHE", "").lower() != "true"
236
+ )
237
+
238
+ # Use OrderedDict for LRU implementation
239
+ self._param_cache = OrderedDict()
240
+ self._param_cache_lock = threading.Lock()
241
+ self._cache_max_size = cache_size
242
+ self._cached_params = None
243
+
244
+ # Cache statistics
245
+ self._cache_hits = 0
246
+ self._cache_misses = 0
247
+ self._cache_evictions = 0
248
+
222
249
  self._validate_config()
223
250
  except ValidationError as e:
224
251
  raise NodeConfigurationError(f"Invalid node metadata: {e}") from e
@@ -379,63 +406,10 @@ class Node(ABC):
379
406
  - LocalRuntime: During workflow execution
380
407
  - TestRunner: During unit testing
381
408
  """
382
- # Check if this node has implemented async_run
383
- import inspect
384
-
385
- # Get the actual async_run method from the instance's class
386
- async_run_method = getattr(self.__class__, "async_run", None)
387
- base_async_run = getattr(Node, "async_run", None)
388
-
389
- # Check if async_run has been overridden
390
- if async_run_method and async_run_method != base_async_run:
391
- # This node has a custom async_run implementation
392
- # Run it synchronously
393
- import asyncio
394
-
395
- try:
396
- # Check if we're already in an event loop
397
- loop = asyncio.get_running_loop()
398
-
399
- # We're in an event loop - use nest_asyncio
400
- import nest_asyncio
401
-
402
- nest_asyncio.apply()
403
- return asyncio.run(self.async_run(**kwargs))
404
-
405
- except RuntimeError:
406
- # No event loop running, we can use asyncio.run() directly
407
- return asyncio.run(self.async_run(**kwargs))
408
- else:
409
- # This is a regular synchronous node - subclass should override this method
410
- raise NotImplementedError(
411
- f"Node '{self.__class__.__name__}' must implement either run() or async_run() method"
412
- )
413
-
414
- async def async_run(self, **kwargs) -> dict[str, Any]:
415
- """Asynchronous execution method for the node.
416
-
417
- This method provides async execution support. By default, it calls
418
- the synchronous run() method. Nodes can override this for true
419
- async behavior.
420
-
421
- Design Philosophy:
422
- - Maintain backward compatibility with synchronous nodes
423
- - Support both sync and async execution methods
424
- - Provide clear error handling for async operations
425
- - Enable efficient parallel execution in workflows
426
-
427
- Args:
428
- **kwargs: Validated input parameters matching get_parameters()
429
-
430
- Returns:
431
- Dictionary of outputs that will be validated and passed
432
- to downstream nodes
433
-
434
- Raises:
435
- NodeExecutionError: If execution fails
436
- """
437
- # Default implementation calls the synchronous run() method
438
- return self.run(**kwargs)
409
+ # This is a synchronous node - subclass must override this method
410
+ raise NotImplementedError(
411
+ f"Node '{self.__class__.__name__}' must implement run() method"
412
+ )
439
413
 
440
414
  def _validate_config(self):
441
415
  """Validate node configuration against defined parameters.
@@ -474,7 +448,7 @@ class Node(ABC):
474
448
  - get_parameters() implementation errors
475
449
  """
476
450
  try:
477
- params = self.get_parameters()
451
+ params = self._get_cached_parameters()
478
452
  except Exception as e:
479
453
  raise NodeConfigurationError(f"Failed to get node parameters: {e}") from e
480
454
 
@@ -502,6 +476,21 @@ class Node(ABC):
502
476
  f"Conversion failed: {e}"
503
477
  ) from e
504
478
 
479
+ def _get_cached_parameters(self) -> dict[str, NodeParameter]:
480
+ """Get cached parameter definitions.
481
+
482
+ Returns:
483
+ Dictionary of parameter definitions, cached for performance
484
+ """
485
+ if self._cached_params is None:
486
+ try:
487
+ self._cached_params = self.get_parameters()
488
+ except Exception as e:
489
+ raise NodeValidationError(
490
+ f"Failed to get node parameters for validation: {e}"
491
+ ) from e
492
+ return self._cached_params
493
+
505
494
  def validate_inputs(self, **kwargs) -> dict[str, Any]:
506
495
  r"""Validate runtime inputs against node requirements.
507
496
 
@@ -555,28 +544,178 @@ class Node(ABC):
555
544
  - execute(): Before passing inputs to run()
556
545
  - Workflow validation: During connection checks
557
546
  """
558
- # Enhanced parameter resolution with auto-mapping
559
- try:
560
- params = self.get_parameters()
561
- except Exception as e:
562
- raise NodeValidationError(
563
- f"Failed to get node parameters for validation: {e}"
564
- ) from e
547
+ # Use cached parameters for better performance
548
+ params = self._get_cached_parameters()
549
+
550
+ # Check if caching is enabled
551
+ if not self._cache_enabled:
552
+ resolved = self._resolve_parameters(kwargs, params)
553
+ else:
554
+ # Check if we have a cached resolution for this input pattern
555
+ cache_key = self._get_cache_key(kwargs)
556
+
557
+ with self._param_cache_lock:
558
+ if cache_key in self._param_cache:
559
+ # Move to end for LRU
560
+ self._param_cache.move_to_end(cache_key)
561
+ self._cache_hits += 1
562
+
563
+ # Use cached resolution and apply values
564
+ cached_mapping = self._param_cache[cache_key]
565
+ resolved = self._apply_cached_mapping(kwargs, cached_mapping)
566
+ else:
567
+ self._cache_misses += 1
565
568
 
566
- # Phase 1: Resolve parameters using enhanced mapping
567
- resolved = self._resolve_parameters(kwargs, params)
569
+ # Phase 1: Resolve parameters using enhanced mapping
570
+ resolved = self._resolve_parameters(kwargs, params)
571
+
572
+ # Cache the mapping pattern for future use
573
+ mapping = self._extract_mapping_pattern(kwargs, resolved)
574
+ self._param_cache[cache_key] = mapping
575
+
576
+ # Evict oldest if cache is full (LRU)
577
+ if len(self._param_cache) > self._cache_max_size:
578
+ self._param_cache.popitem(last=False) # Remove oldest
579
+ self._cache_evictions += 1
568
580
 
569
581
  # Phase 2: Validate resolved parameters
570
582
  validated = self._validate_resolved_parameters(resolved, params)
571
583
 
572
584
  # Preserve special runtime parameters that are not in schema
573
- special_params = ["context"]
574
- for special_param in special_params:
585
+ for special_param in self._SPECIAL_PARAMS:
575
586
  if special_param in kwargs:
576
587
  validated[special_param] = kwargs[special_param]
577
588
 
578
589
  return validated
579
590
 
591
+ def _get_cached_parameters(self) -> dict[str, NodeParameter]:
592
+ """Get node parameters with caching for performance.
593
+
594
+ Returns:
595
+ Cached parameter definitions
596
+ """
597
+ if self._cached_params is None:
598
+ self._cached_params = self.get_parameters()
599
+ return self._cached_params
600
+
601
+ def _get_cache_key(self, inputs: dict) -> str:
602
+ """Generate a cache key based on input parameter names.
603
+
604
+ Args:
605
+ inputs: Runtime inputs dictionary
606
+
607
+ Returns:
608
+ Cache key string based on sorted parameter names
609
+ """
610
+ # Exclude special parameters from cache key
611
+ cache_params = [k for k in inputs.keys() if k not in self._SPECIAL_PARAMS]
612
+ return "|".join(sorted(cache_params))
613
+
614
+ def _apply_cached_mapping(self, inputs: dict, mapping: dict) -> dict:
615
+ """Apply cached mapping pattern to current inputs.
616
+
617
+ Args:
618
+ inputs: Current runtime inputs
619
+ mapping: Cached mapping pattern
620
+
621
+ Returns:
622
+ Resolved parameters dictionary
623
+ """
624
+ resolved = {}
625
+ for param_name, source_key in mapping.items():
626
+ if source_key in inputs:
627
+ resolved[param_name] = inputs[source_key]
628
+ return resolved
629
+
630
+ def _extract_mapping_pattern(self, inputs: dict, resolved: dict) -> dict:
631
+ """Extract the mapping pattern for caching.
632
+
633
+ The cache stores which input keys map to which parameter names,
634
+ allowing fast resolution for repeated input patterns.
635
+
636
+ Args:
637
+ inputs: Original runtime inputs
638
+ resolved: Resolved parameters
639
+
640
+ Returns:
641
+ Mapping pattern dictionary {param_name: input_key}
642
+ """
643
+ mapping = {}
644
+
645
+ # Build reverse mapping from resolved params to input keys
646
+ # This tracks the resolution decisions made by _resolve_parameters
647
+ for param_name in resolved:
648
+ # Direct match - parameter name exists in inputs
649
+ if param_name in inputs and self._safe_compare(
650
+ inputs[param_name], resolved[param_name]
651
+ ):
652
+ mapping[param_name] = param_name
653
+ else:
654
+ # Search for which input key provided this parameter value
655
+ # Must match exact resolution logic from _resolve_parameters
656
+ params = self._get_cached_parameters()
657
+ param_def = params.get(param_name)
658
+
659
+ if param_def:
660
+ # Check workflow alias
661
+ if param_def.workflow_alias and param_def.workflow_alias in inputs:
662
+ if self._safe_compare(
663
+ inputs[param_def.workflow_alias], resolved[param_name]
664
+ ):
665
+ mapping[param_name] = param_def.workflow_alias
666
+ continue
667
+
668
+ # Check auto_map_from alternatives
669
+ if param_def.auto_map_from:
670
+ for alt_name in param_def.auto_map_from:
671
+ if alt_name in inputs and self._safe_compare(
672
+ inputs[alt_name], resolved[param_name]
673
+ ):
674
+ mapping[param_name] = alt_name
675
+ break
676
+
677
+ return mapping
678
+
679
+ def _safe_compare(self, value1: Any, value2: Any) -> bool:
680
+ """Safely compare two values, handling special cases like DataFrames.
681
+
682
+ Args:
683
+ value1: First value to compare
684
+ value2: Second value to compare
685
+
686
+ Returns:
687
+ True if values are equal, False otherwise
688
+ """
689
+ # Handle pandas DataFrame and Series
690
+ try:
691
+ import pandas as pd
692
+
693
+ if isinstance(value1, (pd.DataFrame, pd.Series)) or isinstance(
694
+ value2, (pd.DataFrame, pd.Series)
695
+ ):
696
+ # For DataFrames/Series, use identity comparison
697
+ # This is safe for caching since we're tracking object references
698
+ return value1 is value2
699
+ except ImportError:
700
+ pass
701
+
702
+ # Handle numpy arrays
703
+ try:
704
+ import numpy as np
705
+
706
+ if isinstance(value1, np.ndarray) or isinstance(value2, np.ndarray):
707
+ # For numpy arrays, use identity comparison
708
+ return value1 is value2
709
+ except ImportError:
710
+ pass
711
+
712
+ # For all other types, use standard equality
713
+ try:
714
+ return value1 == value2
715
+ except (ValueError, TypeError):
716
+ # If comparison fails, they're not equal
717
+ return False
718
+
580
719
  def _resolve_parameters(self, runtime_inputs: dict, params: dict) -> dict:
581
720
  """Enhanced parameter resolution with auto-mapping.
582
721
 
@@ -598,67 +737,54 @@ class Node(ABC):
598
737
  resolved = {}
599
738
  used_inputs = set()
600
739
 
601
- # Phase 1: Direct parameter matches (preserves existing behavior)
740
+ # Optimized single-pass resolution combining all phases
602
741
  for param_name, param_def in params.items():
742
+ # Skip if already resolved
743
+ if param_name in resolved:
744
+ continue
745
+
746
+ # Phase 1: Direct match (highest priority)
603
747
  if param_name in runtime_inputs:
604
748
  resolved[param_name] = runtime_inputs[param_name]
605
749
  used_inputs.add(param_name)
606
- if self.logger:
607
- self.logger.debug(f"Direct match: {param_name}")
608
-
609
- # Phase 2: Workflow alias resolution
610
- for param_name, param_def in params.items():
611
- if param_name in resolved:
612
750
  continue
613
751
 
752
+ # Phase 2: Workflow alias
614
753
  if param_def.workflow_alias and param_def.workflow_alias in runtime_inputs:
615
754
  resolved[param_name] = runtime_inputs[param_def.workflow_alias]
616
755
  used_inputs.add(param_def.workflow_alias)
617
- if self.logger:
618
- self.logger.debug(
619
- f"Workflow alias match: {param_name} <- {param_def.workflow_alias}"
620
- )
621
- continue
622
-
623
- # Phase 3: Auto-mapping from alternative names
624
- for param_name, param_def in params.items():
625
- if param_name in resolved:
626
756
  continue
627
757
 
758
+ # Phase 3: Auto-mapping alternatives
628
759
  if param_def.auto_map_from:
629
760
  for alt_name in param_def.auto_map_from:
630
761
  if alt_name in runtime_inputs and alt_name not in used_inputs:
631
762
  resolved[param_name] = runtime_inputs[alt_name]
632
763
  used_inputs.add(alt_name)
633
- if self.logger:
634
- self.logger.debug(
635
- f"Auto-map match: {param_name} <- {alt_name}"
636
- )
637
764
  break
638
765
 
639
- # Phase 4: Primary input auto-mapping (for nodes like SwitchNode)
640
- primary_params = [p for p in params.values() if p.auto_map_primary]
641
- if primary_params and len(primary_params) == 1:
642
- primary_param = primary_params[0]
643
- if primary_param.name not in resolved:
644
- # Find the main data input (usually the largest unused input)
645
- remaining_inputs = {
646
- k: v
647
- for k, v in runtime_inputs.items()
648
- if k not in used_inputs and not k.startswith("_")
649
- }
650
- if remaining_inputs:
651
- # Use the input with the most substantial data as primary
652
- main_input = max(
653
- remaining_inputs.items(),
654
- key=lambda x: len(str(x[1])) if x[1] is not None else 0,
655
- )
656
- resolved[primary_param.name] = main_input[1]
657
- used_inputs.add(main_input[0])
658
- if self.logger:
659
- self.logger.debug(
660
- f"Primary auto-map: {primary_param.name} <- {main_input[0]}"
661
- )
766
+ # Phase 4: Primary input auto-mapping (handled separately for efficiency)
767
+ primary_params = []
768
+ for param_name, param_def in params.items():
769
+ if param_def.auto_map_primary and param_name not in resolved:
770
+ primary_params.append((param_name, param_def))
771
+
772
+ if len(primary_params) == 1:
773
+ param_name, param_def = primary_params[0]
774
+ # Find the main data input (usually the largest unused input)
775
+ remaining_inputs = {
776
+ k: v
777
+ for k, v in runtime_inputs.items()
778
+ if k not in used_inputs and not k.startswith("_")
779
+ }
780
+ if remaining_inputs:
781
+ # Use the input with the most substantial data as primary
782
+ main_input = max(
783
+ remaining_inputs.items(),
784
+ key=lambda x: len(str(x[1])) if x[1] is not None else 0,
785
+ )
786
+ resolved[param_name] = main_input[1]
787
+ used_inputs.add(main_input[0])
662
788
 
663
789
  return resolved
664
790
 
@@ -973,66 +1099,57 @@ class Node(ABC):
973
1099
  f"Node '{self.id}' execution failed: {type(e).__name__}: {e}"
974
1100
  ) from e
975
1101
 
976
- async def execute_async(self, **runtime_inputs) -> dict[str, Any]:
977
- """Execute the node asynchronously with validation and error handling.
978
-
979
- This is the async version of execute() that provides the same
980
- validation and error handling but allows for async execution.
981
-
982
- Execution flow:
983
- 1. Logs execution start
984
- 2. Validates inputs against parameter schema
985
- 3. Calls async_run() with validated inputs
986
- 4. Validates outputs are JSON-serializable
987
- 5. Logs execution time
988
- 6. Returns validated outputs
1102
+ def get_cache_stats(self) -> dict[str, Any]:
1103
+ """Get parameter cache statistics.
989
1104
 
990
1105
  Returns:
991
- Dictionary of validated outputs from async_run()
992
-
993
- Raises:
994
- NodeExecutionError: If execution fails
995
- NodeValidationError: If input/output validation fails
1106
+ Dictionary containing cache statistics:
1107
+ - enabled: Whether caching is enabled
1108
+ - size: Current cache size
1109
+ - max_size: Maximum cache size
1110
+ - hits: Number of cache hits
1111
+ - misses: Number of cache misses
1112
+ - evictions: Number of cache evictions
1113
+ - hit_rate: Cache hit rate (0-1)
996
1114
  """
997
- from datetime import UTC, datetime
1115
+ with self._param_cache_lock:
1116
+ total_requests = self._cache_hits + self._cache_misses
1117
+ hit_rate = self._cache_hits / total_requests if total_requests > 0 else 0
998
1118
 
999
- start_time = datetime.now(UTC)
1000
- self.logger.info(f"Starting async execution of node {self.id}")
1001
-
1002
- try:
1003
- # Merge config and runtime inputs
1004
- merged_inputs = {**self.config, **runtime_inputs}
1005
-
1006
- # Validate inputs
1007
- validated_inputs = self.validate_inputs(**merged_inputs)
1008
- self.logger.debug(f"Validated inputs for {self.id}: {validated_inputs}")
1119
+ return {
1120
+ "enabled": self._cache_enabled,
1121
+ "size": len(self._param_cache),
1122
+ "max_size": self._cache_max_size,
1123
+ "hits": self._cache_hits,
1124
+ "misses": self._cache_misses,
1125
+ "evictions": self._cache_evictions,
1126
+ "hit_rate": hit_rate,
1127
+ }
1009
1128
 
1010
- # Execute node logic asynchronously
1011
- outputs = await self.async_run(**validated_inputs)
1129
+ def clear_cache(self) -> None:
1130
+ """Clear the parameter resolution cache and reset statistics."""
1131
+ with self._param_cache_lock:
1132
+ self._param_cache.clear()
1133
+ self._cache_hits = 0
1134
+ self._cache_misses = 0
1135
+ self._cache_evictions = 0
1012
1136
 
1013
- # Validate outputs
1014
- validated_outputs = self.validate_outputs(outputs)
1137
+ def warm_cache(self, patterns: list[dict[str, Any]]) -> None:
1138
+ """Warm the cache with known parameter patterns.
1015
1139
 
1016
- execution_time = (datetime.now(UTC) - start_time).total_seconds()
1017
- self.logger.info(
1018
- f"Node {self.id} executed successfully (async) in {execution_time:.3f}s"
1019
- )
1020
- return validated_outputs
1140
+ Args:
1141
+ patterns: List of parameter dictionaries to pre-cache
1142
+ """
1143
+ if not self._cache_enabled:
1144
+ return
1021
1145
 
1022
- except NodeValidationError:
1023
- # Re-raise validation errors as-is
1024
- raise
1025
- except NodeExecutionError:
1026
- # Re-raise execution errors as-is
1027
- raise
1028
- except Exception as e:
1029
- # Wrap unexpected errors
1030
- self.logger.error(
1031
- f"Node {self.id} async execution failed: {e}", exc_info=True
1032
- )
1033
- raise NodeExecutionError(
1034
- f"Node '{self.id}' async execution failed: {type(e).__name__}: {e}"
1035
- ) from e
1146
+ for pattern in patterns:
1147
+ # Simulate parameter resolution to populate cache
1148
+ try:
1149
+ self.validate_inputs(**pattern)
1150
+ except Exception:
1151
+ # Ignore validation errors during warmup
1152
+ pass
1036
1153
 
1037
1154
  def to_dict(self) -> dict[str, Any]:
1038
1155
  """Convert node to dictionary representation.
@@ -48,8 +48,8 @@ class AsyncNode(Node):
48
48
  def execute(self, **runtime_inputs) -> dict[str, Any]:
49
49
  """Execute the node synchronously by running async code in a new event loop.
50
50
 
51
- This override allows AsyncNode to work with synchronous runtimes like LocalRuntime.
52
- It creates a new event loop to run the async code if needed.
51
+ This override allows AsyncNode to work with synchronous runtimes like LocalRuntime
52
+ by wrapping the async execution in a synchronous interface.
53
53
 
54
54
  Args:
55
55
  **runtime_inputs: Runtime inputs for node execution
@@ -64,37 +64,35 @@ class AsyncNode(Node):
64
64
  import asyncio
65
65
  import sys
66
66
 
67
- # Check if we're already in an event loop
68
- try:
69
- asyncio.get_running_loop()
70
- # We're in an event loop - this is problematic for sync execution
71
- # Try to use nest_asyncio if available
72
- try:
73
- import nest_asyncio
74
-
75
- nest_asyncio.apply()
76
- return asyncio.run(self.execute_async(**runtime_inputs))
77
- except ImportError:
78
- # Fall back to running in a thread pool
79
- import concurrent.futures
80
-
81
- with concurrent.futures.ThreadPoolExecutor() as executor:
82
- future = executor.submit(
83
- asyncio.run, self.execute_async(**runtime_inputs)
84
- )
85
- return future.result()
86
- except RuntimeError:
87
- # No event loop running, we can create one
88
- if sys.platform == "win32":
89
- # Windows requires special handling
90
- asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
91
- return asyncio.run(self.execute_async(**runtime_inputs))
67
+ # For sync execution, we always create a new event loop
68
+ # This avoids complexity with nested loops and ensures clean execution
69
+ if sys.platform == "win32":
70
+ # Windows requires special handling
71
+ asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
72
+
73
+ # Run the async method in a new event loop
74
+ return asyncio.run(self.execute_async(**runtime_inputs))
75
+
76
+ def run(self, **kwargs) -> dict[str, Any]:
77
+ """Synchronous run is not supported for AsyncNode.
78
+
79
+ AsyncNode subclasses should implement async_run() instead of run().
80
+ This method exists to provide a clear error message if someone
81
+ accidentally tries to implement run() on an async node.
82
+
83
+ Raises:
84
+ NotImplementedError: Always, as async nodes must use async_run()
85
+ """
86
+ raise NotImplementedError(
87
+ f"AsyncNode '{self.__class__.__name__}' should implement async_run() method, not run()"
88
+ )
92
89
 
93
90
  async def async_run(self, **kwargs) -> dict[str, Any]:
94
91
  """Asynchronous execution method for the node.
95
92
 
96
- This method should be overridden by subclasses that require asynchronous
97
- execution. The default implementation calls the synchronous run() method.
93
+ This method should be overridden by subclasses to implement asynchronous
94
+ execution logic. The default implementation raises NotImplementedError
95
+ to ensure async nodes properly implement their async behavior.
98
96
 
99
97
  Args:
100
98
  **kwargs: Input parameters for node execution
@@ -105,8 +103,9 @@ class AsyncNode(Node):
105
103
  Raises:
106
104
  NodeExecutionError: If execution fails
107
105
  """
108
- # Default implementation calls the synchronous run() method
109
- return self.run(**kwargs)
106
+ raise NotImplementedError(
107
+ f"AsyncNode '{self.__class__.__name__}' must implement async_run() method"
108
+ )
110
109
 
111
110
  async def execute_async(self, **runtime_inputs) -> dict[str, Any]:
112
111
  """Execute the node asynchronously with validation and error handling.
@@ -34,7 +34,8 @@ from decimal import Decimal
34
34
  from enum import Enum
35
35
  from typing import Any, AsyncIterator, Optional, Union
36
36
 
37
- from kailash.nodes.base import Node, NodeParameter, register_node
37
+ from kailash.nodes.base import NodeParameter, register_node
38
+ from kailash.nodes.base_async import AsyncNode
38
39
  from kailash.sdk_exceptions import NodeExecutionError, NodeValidationError
39
40
 
40
41
 
@@ -425,7 +426,7 @@ class SQLiteAdapter(DatabaseAdapter):
425
426
 
426
427
 
427
428
  @register_node()
428
- class AsyncSQLDatabaseNode(Node):
429
+ class AsyncSQLDatabaseNode(AsyncNode):
429
430
  """Asynchronous SQL database node for high-concurrency database operations.
430
431
 
431
432
  This node provides non-blocking database operations with connection pooling,
@@ -713,26 +714,6 @@ class AsyncSQLDatabaseNode(Node):
713
714
  except Exception as e:
714
715
  raise NodeExecutionError(f"Database query failed: {str(e)}")
715
716
 
716
- def run(self, **inputs) -> dict[str, Any]:
717
- """Synchronous run method - delegates to async_run."""
718
- import asyncio
719
-
720
- import nest_asyncio
721
-
722
- try:
723
- # Check if we're already in an event loop
724
- loop = asyncio.get_running_loop()
725
-
726
- # Apply nest_asyncio to allow nested event loops
727
- nest_asyncio.apply()
728
-
729
- # Now we can safely run even in an existing event loop
730
- return asyncio.run(self.async_run(**inputs))
731
-
732
- except RuntimeError:
733
- # No event loop running, we can use asyncio.run() directly
734
- return asyncio.run(self.async_run(**inputs))
735
-
736
717
  async def process(self, inputs: dict[str, Any]) -> dict[str, Any]:
737
718
  """Async process method for middleware compatibility."""
738
719
  return await self.async_run(**inputs)