kailash 0.6.6__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. kailash/__init__.py +35 -5
  2. kailash/access_control.py +64 -46
  3. kailash/adapters/__init__.py +5 -0
  4. kailash/adapters/mcp_platform_adapter.py +273 -0
  5. kailash/api/workflow_api.py +34 -3
  6. kailash/channels/__init__.py +21 -0
  7. kailash/channels/api_channel.py +409 -0
  8. kailash/channels/base.py +271 -0
  9. kailash/channels/cli_channel.py +661 -0
  10. kailash/channels/event_router.py +496 -0
  11. kailash/channels/mcp_channel.py +648 -0
  12. kailash/channels/session.py +423 -0
  13. kailash/mcp_server/discovery.py +57 -18
  14. kailash/middleware/communication/api_gateway.py +23 -3
  15. kailash/middleware/communication/realtime.py +83 -0
  16. kailash/middleware/core/agent_ui.py +1 -1
  17. kailash/middleware/gateway/storage_backends.py +393 -0
  18. kailash/middleware/mcp/enhanced_server.py +22 -16
  19. kailash/nexus/__init__.py +21 -0
  20. kailash/nexus/cli/__init__.py +5 -0
  21. kailash/nexus/cli/__main__.py +6 -0
  22. kailash/nexus/cli/main.py +176 -0
  23. kailash/nexus/factory.py +413 -0
  24. kailash/nexus/gateway.py +545 -0
  25. kailash/nodes/__init__.py +8 -5
  26. kailash/nodes/ai/iterative_llm_agent.py +988 -17
  27. kailash/nodes/ai/llm_agent.py +29 -9
  28. kailash/nodes/api/__init__.py +2 -2
  29. kailash/nodes/api/monitoring.py +1 -1
  30. kailash/nodes/base.py +29 -5
  31. kailash/nodes/base_async.py +54 -14
  32. kailash/nodes/code/async_python.py +1 -1
  33. kailash/nodes/code/python.py +50 -6
  34. kailash/nodes/data/async_sql.py +90 -0
  35. kailash/nodes/data/bulk_operations.py +939 -0
  36. kailash/nodes/data/query_builder.py +373 -0
  37. kailash/nodes/data/query_cache.py +512 -0
  38. kailash/nodes/monitoring/__init__.py +10 -0
  39. kailash/nodes/monitoring/deadlock_detector.py +964 -0
  40. kailash/nodes/monitoring/performance_anomaly.py +1078 -0
  41. kailash/nodes/monitoring/race_condition_detector.py +1151 -0
  42. kailash/nodes/monitoring/transaction_metrics.py +790 -0
  43. kailash/nodes/monitoring/transaction_monitor.py +931 -0
  44. kailash/nodes/security/behavior_analysis.py +414 -0
  45. kailash/nodes/system/__init__.py +17 -0
  46. kailash/nodes/system/command_parser.py +820 -0
  47. kailash/nodes/transaction/__init__.py +48 -0
  48. kailash/nodes/transaction/distributed_transaction_manager.py +983 -0
  49. kailash/nodes/transaction/saga_coordinator.py +652 -0
  50. kailash/nodes/transaction/saga_state_storage.py +411 -0
  51. kailash/nodes/transaction/saga_step.py +467 -0
  52. kailash/nodes/transaction/transaction_context.py +756 -0
  53. kailash/nodes/transaction/two_phase_commit.py +978 -0
  54. kailash/nodes/transform/processors.py +17 -1
  55. kailash/nodes/validation/__init__.py +21 -0
  56. kailash/nodes/validation/test_executor.py +532 -0
  57. kailash/nodes/validation/validation_nodes.py +447 -0
  58. kailash/resources/factory.py +1 -1
  59. kailash/runtime/access_controlled.py +9 -7
  60. kailash/runtime/async_local.py +84 -21
  61. kailash/runtime/local.py +21 -2
  62. kailash/runtime/parameter_injector.py +187 -31
  63. kailash/runtime/runner.py +6 -4
  64. kailash/runtime/testing.py +1 -1
  65. kailash/security.py +22 -3
  66. kailash/servers/__init__.py +32 -0
  67. kailash/servers/durable_workflow_server.py +430 -0
  68. kailash/servers/enterprise_workflow_server.py +522 -0
  69. kailash/servers/gateway.py +183 -0
  70. kailash/servers/workflow_server.py +293 -0
  71. kailash/utils/data_validation.py +192 -0
  72. kailash/workflow/builder.py +382 -15
  73. kailash/workflow/cyclic_runner.py +102 -10
  74. kailash/workflow/validation.py +144 -8
  75. kailash/workflow/visualization.py +99 -27
  76. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/METADATA +3 -2
  77. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/RECORD +81 -40
  78. kailash/workflow/builder_improvements.py +0 -207
  79. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/WHEEL +0 -0
  80. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/entry_points.txt +0 -0
  81. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/licenses/LICENSE +0 -0
  82. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/top_level.txt +0 -0
@@ -363,6 +363,13 @@ class LLMAgentNode(Node):
363
363
  default={},
364
364
  description="Configuration for tool execution behavior",
365
365
  ),
366
+ "use_real_mcp": NodeParameter(
367
+ name="use_real_mcp",
368
+ type=bool,
369
+ required=False,
370
+ default=True,
371
+ description="Use real MCP tool execution instead of mock execution",
372
+ ),
366
373
  }
367
374
 
368
375
  def run(self, **kwargs) -> dict[str, Any]:
@@ -629,12 +636,14 @@ class LLMAgentNode(Node):
629
636
  )
630
637
 
631
638
  # Retrieve MCP context if configured
632
- mcp_context_data = self._retrieve_mcp_context(mcp_servers, mcp_context)
639
+ mcp_context_data = self._retrieve_mcp_context(
640
+ mcp_servers, mcp_context, kwargs
641
+ )
633
642
 
634
643
  # Discover MCP tools if enabled
635
644
  discovered_mcp_tools = []
636
645
  if auto_discover_tools and mcp_servers:
637
- discovered_mcp_tools = self._discover_mcp_tools(mcp_servers)
646
+ discovered_mcp_tools = self._discover_mcp_tools(mcp_servers, kwargs)
638
647
  # Merge MCP tools with existing tools
639
648
  tools = self._merge_tools(tools, discovered_mcp_tools)
640
649
 
@@ -976,7 +985,7 @@ class LLMAgentNode(Node):
976
985
  return asyncio.run(coro)
977
986
 
978
987
  def _retrieve_mcp_context(
979
- self, mcp_servers: list[dict], mcp_context: list[str]
988
+ self, mcp_servers: list[dict], mcp_context: list[str], kwargs: dict = None
980
989
  ) -> list[dict[str, Any]]:
981
990
  """
982
991
  Retrieve context from Model Context Protocol (MCP) servers.
@@ -1043,7 +1052,7 @@ class LLMAgentNode(Node):
1043
1052
  context_data = []
1044
1053
 
1045
1054
  # Check if we should use real MCP implementation
1046
- use_real_mcp = hasattr(self, "_mcp_client") or self._should_use_real_mcp()
1055
+ use_real_mcp = hasattr(self, "_mcp_client") or self._should_use_real_mcp(kwargs)
1047
1056
 
1048
1057
  if use_real_mcp:
1049
1058
  # Use internal MCP client for real implementation
@@ -1224,14 +1233,25 @@ class LLMAgentNode(Node):
1224
1233
 
1225
1234
  return context_data
1226
1235
 
1227
- def _should_use_real_mcp(self) -> bool:
1236
+ def _should_use_real_mcp(self, kwargs: dict = None) -> bool:
1228
1237
  """Check if real MCP implementation should be used."""
1229
- # Check environment variable or configuration
1230
1238
  import os
1231
1239
 
1232
- return os.environ.get("KAILASH_USE_REAL_MCP", "false").lower() == "true"
1240
+ # 1. Check explicit parameter first (highest priority)
1241
+ if kwargs and "use_real_mcp" in kwargs:
1242
+ return kwargs["use_real_mcp"]
1233
1243
 
1234
- def _discover_mcp_tools(self, mcp_servers: list[dict]) -> list[dict[str, Any]]:
1244
+ # 2. Check environment variable (fallback)
1245
+ env_value = os.environ.get("KAILASH_USE_REAL_MCP", "").lower()
1246
+ if env_value in ("true", "false"):
1247
+ return env_value == "true"
1248
+
1249
+ # 3. Default to True (real MCP execution)
1250
+ return True
1251
+
1252
+ def _discover_mcp_tools(
1253
+ self, mcp_servers: list[dict], kwargs: dict = None
1254
+ ) -> list[dict[str, Any]]:
1235
1255
  """
1236
1256
  Discover available tools from MCP servers.
1237
1257
 
@@ -1244,7 +1264,7 @@ class LLMAgentNode(Node):
1244
1264
  discovered_tools = []
1245
1265
 
1246
1266
  # Check if we should use real MCP implementation
1247
- use_real_mcp = hasattr(self, "_mcp_client") or self._should_use_real_mcp()
1267
+ use_real_mcp = hasattr(self, "_mcp_client") or self._should_use_real_mcp(kwargs)
1248
1268
 
1249
1269
  if use_real_mcp:
1250
1270
  try:
@@ -23,7 +23,7 @@ import warnings
23
23
  from .auth import APIKeyNode, BasicAuthNode, OAuth2Node
24
24
  from .graphql import AsyncGraphQLClientNode, GraphQLClientNode
25
25
  from .http import AsyncHTTPRequestNode, HTTPRequestNode
26
- from .monitoring import HealthCheckNode
26
+ from .monitoring import APIHealthCheckNode
27
27
  from .rate_limiting import (
28
28
  AsyncRateLimitedAPINode,
29
29
  RateLimitConfig,
@@ -71,7 +71,7 @@ __all__ = [
71
71
  "AsyncRateLimitedAPINode",
72
72
  "create_rate_limiter",
73
73
  # Monitoring and Security
74
- "HealthCheckNode",
74
+ "APIHealthCheckNode",
75
75
  "SecurityScannerNode",
76
76
  # Backwards compatibility
77
77
  "HTTPClientNode", # Deprecated alias
@@ -13,7 +13,7 @@ from kailash.nodes.base import Node, NodeParameter, register_node
13
13
 
14
14
 
15
15
  @register_node()
16
- class HealthCheckNode(Node):
16
+ class APIHealthCheckNode(Node):
17
17
  """
18
18
  Performs health checks on various system components and services.
19
19
 
kailash/nodes/base.py CHANGED
@@ -1427,11 +1427,35 @@ class NodeRegistry:
1427
1427
  - Factory methods: Dynamic node creation
1428
1428
  """
1429
1429
  if node_name not in cls._nodes:
1430
- available_nodes = ", ".join(sorted(cls._nodes.keys()))
1431
- raise NodeConfigurationError(
1432
- f"Node '{node_name}' not found in registry. "
1433
- f"Available nodes: {available_nodes}"
1434
- )
1430
+ available_nodes = sorted(cls._nodes.keys())
1431
+
1432
+ # Provide more helpful error message based on registry state
1433
+ if not available_nodes:
1434
+ # Registry is empty - likely a test isolation or import issue
1435
+ raise NodeConfigurationError(
1436
+ f"Node '{node_name}' not found in registry. "
1437
+ f"The node registry is empty. This usually means:\n"
1438
+ f" 1. Nodes haven't been imported yet (try: import kailash.nodes)\n"
1439
+ f" 2. Test isolation cleared the registry without re-importing\n"
1440
+ f" 3. The node module containing '{node_name}' wasn't imported\n"
1441
+ f"Common nodes: PythonCodeNode, CSVReaderNode, SQLDatabaseNode, HTTPRequestNode"
1442
+ )
1443
+ else:
1444
+ # Registry has nodes but not the requested one
1445
+ nodes_list = ", ".join(available_nodes)
1446
+
1447
+ # Try to suggest similar node names
1448
+ similar = [
1449
+ n
1450
+ for n in available_nodes
1451
+ if node_name.lower() in n.lower() or n.lower() in node_name.lower()
1452
+ ]
1453
+ suggestion = f"\nDid you mean: {', '.join(similar)}?" if similar else ""
1454
+
1455
+ raise NodeConfigurationError(
1456
+ f"Node '{node_name}' not found in registry. "
1457
+ f"Available nodes: {nodes_list}{suggestion}"
1458
+ )
1435
1459
  return cls._nodes[node_name]
1436
1460
 
1437
1461
  @classmethod
@@ -46,10 +46,13 @@ class AsyncNode(Node):
46
46
  """
47
47
 
48
48
  def execute(self, **runtime_inputs) -> dict[str, Any]:
49
- """Execute the node synchronously by running async code in a new event loop.
49
+ """Execute the node synchronously by running async code with proper event loop handling.
50
50
 
51
- This override allows AsyncNode to work with synchronous runtimes like LocalRuntime
52
- by wrapping the async execution in a synchronous interface.
51
+ This enhanced implementation handles all event loop scenarios:
52
+ 1. No event loop: Create new one with asyncio.run()
53
+ 2. Event loop running: Use ThreadPoolExecutor with isolated loop
54
+ 3. Threaded contexts: Proper thread-safe execution
55
+ 4. Windows compatibility: ProactorEventLoopPolicy support
53
56
 
54
57
  Args:
55
58
  **runtime_inputs: Runtime inputs for node execution
@@ -62,7 +65,9 @@ class AsyncNode(Node):
62
65
  NodeExecutionError: If execution fails
63
66
  """
64
67
  import asyncio
68
+ import concurrent.futures
65
69
  import sys
70
+ import threading
66
71
 
67
72
  # For sync execution, we always create a new event loop
68
73
  # This avoids complexity with nested loops and ensures clean execution
@@ -70,22 +75,57 @@ class AsyncNode(Node):
70
75
  # Windows requires special handling
71
76
  asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
72
77
 
78
+ # Check if we're in a thread without an event loop
79
+ current_thread = threading.current_thread()
80
+ is_main_thread = isinstance(current_thread, threading._MainThread)
81
+
73
82
  # Run the async method - handle existing event loop
74
83
  try:
75
84
  # Try to get current event loop
76
85
  loop = asyncio.get_running_loop()
86
+ # Event loop is running - need to run in separate thread
87
+ return self._execute_in_thread(**runtime_inputs)
77
88
  except RuntimeError:
78
- # No event loop running, safe to use asyncio.run()
79
- return asyncio.run(self.execute_async(**runtime_inputs))
80
- else:
81
- # Event loop is running, create a task
82
- import concurrent.futures
83
-
84
- with concurrent.futures.ThreadPoolExecutor() as executor:
85
- future = executor.submit(
86
- asyncio.run, self.execute_async(**runtime_inputs)
87
- )
88
- return future.result()
89
+ # No event loop running
90
+ if is_main_thread:
91
+ # Main thread without loop - safe to use asyncio.run()
92
+ return asyncio.run(self.execute_async(**runtime_inputs))
93
+ else:
94
+ # Non-main thread without loop - create new loop
95
+ return self._execute_in_new_loop(**runtime_inputs)
96
+
97
+ def _execute_in_thread(self, **runtime_inputs) -> dict[str, Any]:
98
+ """Execute async code in a separate thread with its own event loop."""
99
+ import asyncio
100
+ import concurrent.futures
101
+
102
+ def run_in_new_loop():
103
+ """Run async code in a completely new event loop."""
104
+ # Create fresh event loop for this thread
105
+ new_loop = asyncio.new_event_loop()
106
+ asyncio.set_event_loop(new_loop)
107
+ try:
108
+ return new_loop.run_until_complete(self.execute_async(**runtime_inputs))
109
+ finally:
110
+ new_loop.close()
111
+ asyncio.set_event_loop(None)
112
+
113
+ with concurrent.futures.ThreadPoolExecutor() as executor:
114
+ future = executor.submit(run_in_new_loop)
115
+ return future.result()
116
+
117
+ def _execute_in_new_loop(self, **runtime_inputs) -> dict[str, Any]:
118
+ """Execute async code by creating a new event loop in current thread."""
119
+ import asyncio
120
+
121
+ # Create and set new event loop for this thread
122
+ loop = asyncio.new_event_loop()
123
+ asyncio.set_event_loop(loop)
124
+ try:
125
+ return loop.run_until_complete(self.execute_async(**runtime_inputs))
126
+ finally:
127
+ loop.close()
128
+ asyncio.set_event_loop(None)
89
129
 
90
130
  def run(self, **kwargs) -> dict[str, Any]:
91
131
  """Synchronous run is not supported for AsyncNode.
@@ -783,7 +783,7 @@ class AsyncPythonCodeNode(AsyncNode):
783
783
  "max_concurrent_tasks",
784
784
  "max_memory_mb",
785
785
  "imports",
786
- "config",
786
+ # Note: "config" removed - it's a valid runtime parameter name
787
787
  }
788
788
  runtime_inputs = {k: v for k, v in kwargs.items() if k not in config_params}
789
789
 
@@ -94,6 +94,7 @@ ALLOWED_MODULES = {
94
94
  "matplotlib",
95
95
  "seaborn",
96
96
  "plotly",
97
+ "array", # Required by numpy internally
97
98
  # File processing modules
98
99
  "csv", # For CSV file processing
99
100
  "mimetypes", # For MIME type detection
@@ -419,12 +420,55 @@ class CodeExecutor:
419
420
  }
420
421
 
421
422
  # Add allowed modules
422
- for module_name in self.allowed_modules:
423
- try:
424
- module = importlib.import_module(module_name)
425
- namespace[module_name] = module
426
- except ImportError:
427
- logger.warning(f"Module {module_name} not available")
423
+ # Check if we're running under coverage to avoid instrumentation conflicts
424
+ import sys
425
+
426
+ if "coverage" in sys.modules:
427
+ # Under coverage, use lazy loading for problematic modules
428
+ problematic_modules = {
429
+ "numpy",
430
+ "scipy",
431
+ "sklearn",
432
+ "pandas",
433
+ "matplotlib",
434
+ "seaborn",
435
+ "plotly",
436
+ "array",
437
+ }
438
+ safe_modules = self.allowed_modules - problematic_modules
439
+
440
+ # Eagerly load safe modules
441
+ for module_name in safe_modules:
442
+ try:
443
+ module = importlib.import_module(module_name)
444
+ namespace[module_name] = module
445
+ except ImportError:
446
+ logger.warning(f"Module {module_name} not available")
447
+
448
+ # Add lazy loader for problematic modules
449
+ class LazyModuleLoader:
450
+ def __getattr__(self, name):
451
+ if name in problematic_modules:
452
+ return importlib.import_module(name)
453
+ raise AttributeError(f"Module {name} not found")
454
+
455
+ # Make problematic modules available through lazy loading
456
+ for module_name in problematic_modules:
457
+ try:
458
+ # Try to import the module directly
459
+ module = importlib.import_module(module_name)
460
+ namespace[module_name] = module
461
+ except ImportError:
462
+ # If import fails, use lazy loader as fallback
463
+ namespace[module_name] = LazyModuleLoader()
464
+ else:
465
+ # Normal operation - eagerly load all modules
466
+ for module_name in self.allowed_modules:
467
+ try:
468
+ module = importlib.import_module(module_name)
469
+ namespace[module_name] = module
470
+ except ImportError:
471
+ logger.warning(f"Module {module_name} not available")
428
472
 
429
473
  # Add sanitized inputs
430
474
  namespace.update(sanitized_inputs)
@@ -431,6 +431,7 @@ class PostgreSQLAdapter(DatabaseAdapter):
431
431
  fetch_mode: FetchMode = FetchMode.ALL,
432
432
  fetch_size: Optional[int] = None,
433
433
  transaction: Optional[Any] = None,
434
+ parameter_types: Optional[dict[str, str]] = None,
434
435
  ) -> Any:
435
436
  """Execute query and return results."""
436
437
  # Convert dict params to positional for asyncpg
@@ -440,8 +441,10 @@ class PostgreSQLAdapter(DatabaseAdapter):
440
441
  import json
441
442
 
442
443
  query_params = []
444
+ param_names = [] # Track parameter names for type mapping
443
445
  for i, (key, value) in enumerate(params.items(), 1):
444
446
  query = query.replace(f":{key}", f"${i}")
447
+ param_names.append(key)
445
448
  # For PostgreSQL, lists should remain as lists for array operations
446
449
  # Only convert dicts to JSON strings
447
450
  if isinstance(value, dict):
@@ -449,6 +452,24 @@ class PostgreSQLAdapter(DatabaseAdapter):
449
452
  query_params.append(value)
450
453
  params = query_params
451
454
 
455
+ # Apply parameter type casts if provided
456
+ if parameter_types:
457
+ # Build a query with explicit type casts
458
+ for i, param_name in enumerate(param_names, 1):
459
+ if param_name in parameter_types:
460
+ pg_type = parameter_types[param_name]
461
+ # Replace $N with $N::type in the query
462
+ query = query.replace(f"${i}", f"${i}::{pg_type}")
463
+
464
+ else:
465
+ # For positional params, apply type casts if provided
466
+ if parameter_types and isinstance(params, (list, tuple)):
467
+ # Build query with type casts for positional parameters
468
+ for i, param_type in parameter_types.items():
469
+ if isinstance(i, int) and 0 <= i < len(params):
470
+ # Replace $N with $N::type
471
+ query = query.replace(f"${i+1}", f"${i+1}::{param_type}")
472
+
452
473
  # Ensure params is a list/tuple for asyncpg
453
474
  if params is None:
454
475
  params = []
@@ -1270,6 +1291,13 @@ class AsyncSQLDatabaseNode(AsyncNode):
1270
1291
  default=False,
1271
1292
  description="Whether to allow administrative SQL commands (CREATE, DROP, etc.)",
1272
1293
  ),
1294
+ NodeParameter(
1295
+ name="parameter_types",
1296
+ type=dict,
1297
+ required=False,
1298
+ default=None,
1299
+ description="Optional PostgreSQL type hints for parameters (e.g., {'role_id': 'text', 'metadata': 'jsonb'})",
1300
+ ),
1273
1301
  NodeParameter(
1274
1302
  name="retry_config",
1275
1303
  type=Any,
@@ -1532,6 +1560,9 @@ class AsyncSQLDatabaseNode(AsyncNode):
1532
1560
  "result_format", self.config.get("result_format", "dict")
1533
1561
  )
1534
1562
  user_context = inputs.get("user_context")
1563
+ parameter_types = inputs.get(
1564
+ "parameter_types", self.config.get("parameter_types")
1565
+ )
1535
1566
 
1536
1567
  if not query:
1537
1568
  raise NodeExecutionError("No query provided")
@@ -1576,8 +1607,12 @@ class AsyncSQLDatabaseNode(AsyncNode):
1576
1607
  fetch_mode=fetch_mode,
1577
1608
  fetch_size=fetch_size,
1578
1609
  user_context=user_context,
1610
+ parameter_types=parameter_types,
1579
1611
  )
1580
1612
 
1613
+ # Ensure all data is JSON-serializable (safety net for adapter inconsistencies)
1614
+ result = self._ensure_serializable(result)
1615
+
1581
1616
  # Format results based on requested format
1582
1617
  formatted_data = self._format_results(result, result_format)
1583
1618
 
@@ -1795,6 +1830,7 @@ class AsyncSQLDatabaseNode(AsyncNode):
1795
1830
  fetch_mode: FetchMode,
1796
1831
  fetch_size: Optional[int],
1797
1832
  user_context: Any = None,
1833
+ parameter_types: Optional[dict[str, str]] = None,
1798
1834
  ) -> Any:
1799
1835
  """Execute query with retry logic for transient failures.
1800
1836
 
@@ -1823,6 +1859,7 @@ class AsyncSQLDatabaseNode(AsyncNode):
1823
1859
  params=params,
1824
1860
  fetch_mode=fetch_mode,
1825
1861
  fetch_size=fetch_size,
1862
+ parameter_types=parameter_types,
1826
1863
  )
1827
1864
 
1828
1865
  # Apply data masking if access control is enabled
@@ -2010,6 +2047,7 @@ class AsyncSQLDatabaseNode(AsyncNode):
2010
2047
  params: Any,
2011
2048
  fetch_mode: FetchMode,
2012
2049
  fetch_size: Optional[int],
2050
+ parameter_types: Optional[dict[str, str]] = None,
2013
2051
  ) -> Any:
2014
2052
  """Execute query with automatic transaction management.
2015
2053
 
@@ -2034,6 +2072,7 @@ class AsyncSQLDatabaseNode(AsyncNode):
2034
2072
  fetch_mode=fetch_mode,
2035
2073
  fetch_size=fetch_size,
2036
2074
  transaction=self._active_transaction,
2075
+ parameter_types=parameter_types,
2037
2076
  )
2038
2077
  elif self._transaction_mode == "auto":
2039
2078
  # Auto-transaction mode
@@ -2045,6 +2084,7 @@ class AsyncSQLDatabaseNode(AsyncNode):
2045
2084
  fetch_mode=fetch_mode,
2046
2085
  fetch_size=fetch_size,
2047
2086
  transaction=transaction,
2087
+ parameter_types=parameter_types,
2048
2088
  )
2049
2089
  await adapter.commit_transaction(transaction)
2050
2090
  return result
@@ -2058,6 +2098,7 @@ class AsyncSQLDatabaseNode(AsyncNode):
2058
2098
  params=params,
2059
2099
  fetch_mode=fetch_mode,
2060
2100
  fetch_size=fetch_size,
2101
+ parameter_types=parameter_types,
2061
2102
  )
2062
2103
 
2063
2104
  @classmethod
@@ -2460,6 +2501,55 @@ class AsyncSQLDatabaseNode(AsyncNode):
2460
2501
 
2461
2502
  return modified_query, param_dict
2462
2503
 
2504
+ def _ensure_serializable(self, data: Any) -> Any:
2505
+ """Ensure all data types are JSON-serializable.
2506
+
2507
+ This is a safety net for cases where adapter _convert_row might not be called
2508
+ or might miss certain data types. It recursively processes the data structure
2509
+ to ensure datetime objects and other non-JSON-serializable types are converted.
2510
+
2511
+ Args:
2512
+ data: Raw data from database adapter
2513
+
2514
+ Returns:
2515
+ JSON-serializable data structure
2516
+ """
2517
+ if data is None:
2518
+ return None
2519
+ elif isinstance(data, bool):
2520
+ return data
2521
+ elif isinstance(data, (int, float, str)):
2522
+ return data
2523
+ elif isinstance(data, datetime):
2524
+ return data.isoformat()
2525
+ elif isinstance(data, date):
2526
+ return data.isoformat()
2527
+ elif hasattr(data, "total_seconds"): # timedelta
2528
+ return data.total_seconds()
2529
+ elif isinstance(data, Decimal):
2530
+ return float(data)
2531
+ elif isinstance(data, bytes):
2532
+ import base64
2533
+
2534
+ return base64.b64encode(data).decode("utf-8")
2535
+ elif hasattr(data, "__str__") and hasattr(data, "hex"): # UUID-like objects
2536
+ return str(data)
2537
+ elif isinstance(data, dict):
2538
+ return {
2539
+ key: self._ensure_serializable(value) for key, value in data.items()
2540
+ }
2541
+ elif isinstance(data, (list, tuple)):
2542
+ return [self._ensure_serializable(item) for item in data]
2543
+ else:
2544
+ # For any other type, try to convert to string as fallback
2545
+ try:
2546
+ # Test if it's already JSON serializable
2547
+ json.dumps(data)
2548
+ return data
2549
+ except (TypeError, ValueError):
2550
+ # Not serializable, convert to string
2551
+ return str(data)
2552
+
2463
2553
  def _format_results(self, data: list[dict], result_format: str) -> Any:
2464
2554
  """Format query results according to specified format.
2465
2555