kailash 0.9.17__py3-none-any.whl → 0.9.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kailash/__init__.py CHANGED
@@ -5,7 +5,6 @@ that align with container-node architecture while allowing rapid prototyping.
5
5
 
6
6
  New in v0.9.17: AsyncSQL per-pool locking eliminates lock contention bottleneck.
7
7
  Achieves 100% success at 300+ concurrent operations (was 50% failure). 85% performance improvement with per-pool locks.
8
- Previous v0.9.14: Code quality improvements and updated dependencies for DataFlow v0.4.6 compatibility.
9
8
  Previous v0.9.13: Fixed WorkflowBuilder parameter validation false positives (Bug 010).
10
9
  Enhanced validation.py to recognize auto_map_from parameters, eliminating spurious warnings.
11
10
  Previous v0.9.12: SQLite Compatibility & Code Quality improvements.
@@ -53,7 +52,7 @@ except ImportError:
53
52
  # For backward compatibility
54
53
  WorkflowGraph = Workflow
55
54
 
56
- __version__ = "0.9.17"
55
+ __version__ = "0.9.18"
57
56
 
58
57
  __all__ = [
59
58
  # Core workflow components
@@ -1075,9 +1075,9 @@ class ResourceSubscriptionManager:
1075
1075
  This method should be overridden or configured to fetch actual resource data.
1076
1076
  For now, it returns basic resource information from the monitored state.
1077
1077
  """
1078
- async with self._resource_monitor._lock:
1079
- if uri in self._resource_monitor._resource_states:
1080
- state = self._resource_monitor._resource_states[uri]
1078
+ async with self.resource_monitor._lock:
1079
+ if uri in self.resource_monitor._resource_states:
1080
+ state = self.resource_monitor._resource_states[uri]
1081
1081
  return {
1082
1082
  "uri": uri,
1083
1083
  "content": state.get("content", {}),
@@ -8,38 +8,38 @@ AsyncSQL lock contention monitoring.
8
8
 
9
9
  # Original monitoring imports
10
10
  from .alerts import AlertManager, AlertRule, AlertSeverity
11
- from .metrics import PerformanceMetrics, SecurityMetrics, ValidationMetrics
12
11
 
13
12
  # AsyncSQL lock monitoring imports
14
13
  from .asyncsql_metrics import (
14
+ PROMETHEUS_AVAILABLE,
15
15
  AsyncSQLMetrics,
16
- enable_metrics,
17
16
  disable_metrics,
17
+ enable_metrics,
18
18
  get_global_metrics,
19
- set_global_metrics,
19
+ integrate_with_async_sql,
20
20
  record_lock_acquisition,
21
21
  record_pool_operation,
22
22
  set_active_locks,
23
- integrate_with_async_sql,
24
- PROMETHEUS_AVAILABLE
23
+ set_global_metrics,
25
24
  )
25
+ from .metrics import PerformanceMetrics, SecurityMetrics, ValidationMetrics
26
26
 
27
27
  __all__ = [
28
28
  "ValidationMetrics",
29
29
  "SecurityMetrics",
30
- "PerformanceMetrics",
30
+ "PerformanceMetrics",
31
31
  "AlertManager",
32
32
  "AlertRule",
33
33
  "AlertSeverity",
34
34
  # AsyncSQL monitoring
35
35
  "AsyncSQLMetrics",
36
36
  "enable_metrics",
37
- "disable_metrics",
37
+ "disable_metrics",
38
38
  "get_global_metrics",
39
39
  "set_global_metrics",
40
40
  "record_lock_acquisition",
41
41
  "record_pool_operation",
42
42
  "set_active_locks",
43
43
  "integrate_with_async_sql",
44
- "PROMETHEUS_AVAILABLE"
44
+ "PROMETHEUS_AVAILABLE",
45
45
  ]
@@ -6,11 +6,12 @@ per-pool locking performance and contention patterns.
6
6
  """
7
7
 
8
8
  import time
9
- from typing import Optional, Dict, Any
10
9
  from contextlib import asynccontextmanager
10
+ from typing import Any, Dict, Optional
11
11
 
12
12
  try:
13
13
  import prometheus_client
14
+
14
15
  PROMETHEUS_AVAILABLE = True
15
16
  except ImportError:
16
17
  PROMETHEUS_AVAILABLE = False
@@ -18,66 +19,85 @@ except ImportError:
18
19
 
19
20
  class AsyncSQLMetrics:
20
21
  """Prometheus metrics collector for AsyncSQL lock contention monitoring."""
21
-
22
- def __init__(self, enabled: bool = True, registry: Optional[prometheus_client.CollectorRegistry] = None):
22
+
23
+ def __init__(
24
+ self,
25
+ enabled: bool = True,
26
+ registry: Optional[prometheus_client.CollectorRegistry] = None,
27
+ ):
23
28
  """
24
29
  Initialize AsyncSQL metrics collector.
25
-
30
+
26
31
  Args:
27
32
  enabled: Whether to collect metrics (disabled if prometheus_client not available)
28
33
  registry: Custom Prometheus registry (uses default if None)
29
34
  """
30
35
  self.enabled = enabled and PROMETHEUS_AVAILABLE
31
36
  self.registry = registry or prometheus_client.REGISTRY
32
-
37
+
33
38
  if not self.enabled:
34
39
  return
35
-
40
+
36
41
  # Lock acquisition counter
37
42
  self.lock_acquisition_counter = prometheus_client.Counter(
38
- 'asyncsql_lock_acquisitions_total',
39
- 'Total number of AsyncSQL lock acquisitions',
40
- ['pool_key', 'status'], # status: success, timeout, error
41
- registry=self.registry
43
+ "asyncsql_lock_acquisitions_total",
44
+ "Total number of AsyncSQL lock acquisitions",
45
+ ["pool_key", "status"], # status: success, timeout, error
46
+ registry=self.registry,
42
47
  )
43
-
44
- # Lock wait time histogram
48
+
49
+ # Lock wait time histogram
45
50
  self.lock_wait_time_histogram = prometheus_client.Histogram(
46
- 'asyncsql_lock_wait_seconds',
47
- 'Time spent waiting for AsyncSQL locks',
48
- ['pool_key'],
49
- buckets=(0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, float('inf')),
50
- registry=self.registry
51
+ "asyncsql_lock_wait_seconds",
52
+ "Time spent waiting for AsyncSQL locks",
53
+ ["pool_key"],
54
+ buckets=(
55
+ 0.001,
56
+ 0.005,
57
+ 0.01,
58
+ 0.025,
59
+ 0.05,
60
+ 0.1,
61
+ 0.25,
62
+ 0.5,
63
+ 1.0,
64
+ 2.5,
65
+ 5.0,
66
+ float("inf"),
67
+ ),
68
+ registry=self.registry,
51
69
  )
52
-
70
+
53
71
  # Active locks gauge
54
72
  self.active_locks_gauge = prometheus_client.Gauge(
55
- 'asyncsql_active_locks',
56
- 'Number of currently active AsyncSQL locks',
57
- ['pool_key'],
58
- registry=self.registry
73
+ "asyncsql_active_locks",
74
+ "Number of currently active AsyncSQL locks",
75
+ ["pool_key"],
76
+ registry=self.registry,
59
77
  )
60
-
78
+
61
79
  # Pool operations counter
62
80
  self.pool_operations_counter = prometheus_client.Counter(
63
- 'asyncsql_pool_operations_total',
64
- 'Total number of AsyncSQL pool operations',
65
- ['pool_key', 'operation'], # operation: create, cleanup, acquire, release
66
- registry=self.registry
81
+ "asyncsql_pool_operations_total",
82
+ "Total number of AsyncSQL pool operations",
83
+ ["pool_key", "operation"], # operation: create, cleanup, acquire, release
84
+ registry=self.registry,
67
85
  )
68
-
86
+
69
87
  # Lock contention summary
70
88
  self.lock_contention_summary = prometheus_client.Summary(
71
- 'asyncsql_lock_contention_seconds',
72
- 'Summary of AsyncSQL lock contention patterns',
73
- ['pool_key'],
74
- registry=self.registry
89
+ "asyncsql_lock_contention_seconds",
90
+ "Summary of AsyncSQL lock contention patterns",
91
+ ["pool_key"],
92
+ registry=self.registry,
75
93
  )
76
-
77
- def record_lock_acquisition(self, pool_key: str, status: str, wait_time: float = 0.0):
94
+
95
+ def record_lock_acquisition(
96
+ self, pool_key: str, status: str, wait_time: float = 0.0
97
+ ):
78
98
  """
79
99
  Record a lock acquisition event.
80
-
100
+
81
101
  Args:
82
102
  pool_key: The pool key for the lock
83
103
  status: 'success', 'timeout', or 'error'
@@ -85,44 +105,46 @@ class AsyncSQLMetrics:
85
105
  """
86
106
  if not self.enabled:
87
107
  return
88
-
108
+
89
109
  self.lock_acquisition_counter.labels(pool_key=pool_key, status=status).inc()
90
-
110
+
91
111
  if wait_time > 0:
92
112
  self.lock_wait_time_histogram.labels(pool_key=pool_key).observe(wait_time)
93
113
  self.lock_contention_summary.labels(pool_key=pool_key).observe(wait_time)
94
-
114
+
95
115
  def set_active_locks(self, pool_key: str, count: int):
96
116
  """
97
117
  Update the count of active locks for a pool.
98
-
118
+
99
119
  Args:
100
120
  pool_key: The pool key
101
121
  count: Number of active locks
102
122
  """
103
123
  if not self.enabled:
104
124
  return
105
-
125
+
106
126
  self.active_locks_gauge.labels(pool_key=pool_key).set(count)
107
-
127
+
108
128
  def record_pool_operation(self, pool_key: str, operation: str):
109
129
  """
110
130
  Record a pool operation event.
111
-
131
+
112
132
  Args:
113
133
  pool_key: The pool key
114
134
  operation: 'create', 'cleanup', 'acquire', 'release'
115
135
  """
116
136
  if not self.enabled:
117
137
  return
118
-
119
- self.pool_operations_counter.labels(pool_key=pool_key, operation=operation).inc()
120
-
138
+
139
+ self.pool_operations_counter.labels(
140
+ pool_key=pool_key, operation=operation
141
+ ).inc()
142
+
121
143
  @asynccontextmanager
122
144
  async def timed_lock_acquisition(self, pool_key: str):
123
145
  """
124
146
  Context manager to time lock acquisition and automatically record metrics.
125
-
147
+
126
148
  Usage:
127
149
  async with metrics.timed_lock_acquisition('my_pool_key'):
128
150
  # Lock acquisition logic here
@@ -131,16 +153,16 @@ class AsyncSQLMetrics:
131
153
  pass
132
154
  """
133
155
  start_time = time.time()
134
- status = 'error'
135
-
156
+ status = "error"
157
+
136
158
  try:
137
159
  yield
138
- status = 'success'
160
+ status = "success"
139
161
  except Exception as e:
140
- if 'timeout' in str(e).lower():
141
- status = 'timeout'
162
+ if "timeout" in str(e).lower():
163
+ status = "timeout"
142
164
  else:
143
- status = 'error'
165
+ status = "error"
144
166
  raise
145
167
  finally:
146
168
  wait_time = time.time() - start_time
@@ -165,13 +187,15 @@ def set_global_metrics(metrics: Optional[AsyncSQLMetrics]):
165
187
  _global_metrics = metrics
166
188
 
167
189
 
168
- def enable_metrics(registry: Optional[prometheus_client.CollectorRegistry] = None) -> AsyncSQLMetrics:
190
+ def enable_metrics(
191
+ registry: Optional[prometheus_client.CollectorRegistry] = None,
192
+ ) -> AsyncSQLMetrics:
169
193
  """
170
194
  Enable global AsyncSQL metrics collection.
171
-
195
+
172
196
  Args:
173
197
  registry: Custom Prometheus registry (uses default if None)
174
-
198
+
175
199
  Returns:
176
200
  The configured metrics instance
177
201
  """
@@ -211,41 +235,41 @@ def set_active_locks(pool_key: str, count: int):
211
235
  def integrate_with_async_sql():
212
236
  """
213
237
  Example of how to integrate metrics with AsyncSQLDatabaseNode.
214
-
238
+
215
239
  This would typically be called during AsyncSQL initialization or
216
240
  through a configuration setting.
217
241
  """
218
242
  if not PROMETHEUS_AVAILABLE:
219
243
  return None
220
-
244
+
221
245
  # Enable metrics
222
246
  metrics = enable_metrics()
223
-
247
+
224
248
  # Example: monkey-patch AsyncSQL methods to include metrics
225
249
  # (This is just an example - actual integration would be cleaner)
226
250
  from kailash.nodes.data.async_sql import AsyncSQLDatabaseNode
227
-
251
+
228
252
  # Store original methods
229
253
  original_get_pool_creation_lock = AsyncSQLDatabaseNode._get_pool_creation_lock
230
254
  original_acquire_lock = AsyncSQLDatabaseNode._acquire_pool_lock_with_timeout
231
-
232
- @classmethod
255
+
256
+ @classmethod
233
257
  def instrumented_get_pool_creation_lock(cls, pool_key: str):
234
258
  """Instrumented version that records pool operations."""
235
- record_pool_operation(pool_key, 'acquire')
259
+ record_pool_operation(pool_key, "acquire")
236
260
  return original_get_pool_creation_lock(pool_key)
237
-
261
+
238
262
  @classmethod
239
263
  async def instrumented_acquire_lock(cls, pool_key: str, timeout: float = 5.0):
240
264
  """Instrumented version that records lock acquisitions."""
241
265
  async with metrics.timed_lock_acquisition(pool_key):
242
266
  async with original_acquire_lock(pool_key, timeout):
243
267
  yield
244
-
268
+
245
269
  # Apply instrumentation
246
270
  AsyncSQLDatabaseNode._get_pool_creation_lock = instrumented_get_pool_creation_lock
247
271
  AsyncSQLDatabaseNode._acquire_pool_lock_with_timeout = instrumented_acquire_lock
248
-
272
+
249
273
  return metrics
250
274
 
251
275
 
@@ -253,23 +277,25 @@ if __name__ == "__main__":
253
277
  # Example usage
254
278
  print("AsyncSQL Metrics Module")
255
279
  print(f"Prometheus available: {PROMETHEUS_AVAILABLE}")
256
-
280
+
257
281
  if PROMETHEUS_AVAILABLE:
258
282
  # Enable metrics
259
283
  metrics = enable_metrics()
260
-
284
+
261
285
  # Simulate some metrics
262
- metrics.record_lock_acquisition('test_pool_1', 'success', 0.005)
263
- metrics.record_lock_acquisition('test_pool_1', 'success', 0.003)
264
- metrics.record_lock_acquisition('test_pool_2', 'timeout', 5.0)
265
- metrics.set_active_locks('test_pool_1', 2)
266
- metrics.record_pool_operation('test_pool_1', 'create')
267
-
286
+ metrics.record_lock_acquisition("test_pool_1", "success", 0.005)
287
+ metrics.record_lock_acquisition("test_pool_1", "success", 0.003)
288
+ metrics.record_lock_acquisition("test_pool_2", "timeout", 5.0)
289
+ metrics.set_active_locks("test_pool_1", 2)
290
+ metrics.record_pool_operation("test_pool_1", "create")
291
+
268
292
  print("Metrics recorded successfully")
269
293
  print("Access metrics at: http://localhost:8000/metrics")
270
294
  print("(Start prometheus_client HTTP server to view metrics)")
271
-
295
+
272
296
  # Start metrics server (for testing)
273
297
  # prometheus_client.start_http_server(8000)
274
298
  else:
275
- print("Install prometheus_client to enable metrics: pip install prometheus_client")
299
+ print(
300
+ "Install prometheus_client to enable metrics: pip install prometheus_client"
301
+ )
@@ -1845,6 +1845,144 @@ class LLMAgentNode(Node):
1845
1845
  "efficiency_score": completion_tokens / max(total_tokens, 1),
1846
1846
  }
1847
1847
 
1848
+ def _extract_tool_call_info(self, tool_call) -> dict[str, Any]:
1849
+ """Extract tool call information from both Pydantic models and dictionaries.
1850
+
1851
+ Handles OpenAI v1.97.1+ Pydantic models and legacy dictionary formats.
1852
+
1853
+ Args:
1854
+ tool_call: Tool call object (either Pydantic model or dict)
1855
+
1856
+ Returns:
1857
+ Dict with normalized tool call information
1858
+
1859
+ Raises:
1860
+ ValueError: If tool_call format is unrecognized or invalid
1861
+ json.JSONDecodeError: If tool arguments contain invalid JSON
1862
+ """
1863
+ if tool_call is None:
1864
+ raise ValueError("tool_call cannot be None")
1865
+
1866
+ # Try to detect OpenAI Pydantic model first (more specific check)
1867
+ try:
1868
+ # Import at runtime to avoid dependency issues
1869
+ from openai.types.chat import ChatCompletionMessageToolCall
1870
+
1871
+ if isinstance(tool_call, ChatCompletionMessageToolCall):
1872
+ # OpenAI Pydantic model format - validated type
1873
+ tool_id = tool_call.id
1874
+ function = tool_call.function
1875
+
1876
+ if not function:
1877
+ raise ValueError(f"Tool call {tool_id} has no function definition")
1878
+
1879
+ tool_name = function.name
1880
+ arguments_str = function.arguments or "{}"
1881
+
1882
+ # Validate required fields
1883
+ if not tool_name:
1884
+ raise ValueError(f"Tool call {tool_id} has no function name")
1885
+
1886
+ # Check for excessively large arguments (10MB limit)
1887
+ if len(arguments_str) > 10 * 1024 * 1024:
1888
+ raise ValueError(
1889
+ f"Tool call {tool_id} arguments too large ({len(arguments_str)} bytes). "
1890
+ f"Maximum allowed is 10MB."
1891
+ )
1892
+
1893
+ # Parse arguments - let JSONDecodeError propagate if invalid
1894
+ try:
1895
+ arguments_dict = json.loads(arguments_str) if arguments_str else {}
1896
+ except json.JSONDecodeError as e:
1897
+ # Log the error with context but still raise it
1898
+ self.logger.error(
1899
+ f"Invalid JSON in tool arguments for {tool_name} (id: {tool_id}): {arguments_str[:100]}... Error: {e}"
1900
+ )
1901
+ raise json.JSONDecodeError(
1902
+ f"Invalid JSON in tool '{tool_name}' arguments: {e.msg}",
1903
+ e.doc,
1904
+ e.pos,
1905
+ )
1906
+
1907
+ self.logger.debug(
1908
+ f"Extracted Pydantic tool call: {tool_name} (id: {tool_id})"
1909
+ )
1910
+
1911
+ return {
1912
+ "id": tool_id,
1913
+ "name": tool_name,
1914
+ "arguments": arguments_str,
1915
+ "arguments_dict": arguments_dict,
1916
+ }
1917
+
1918
+ except ImportError:
1919
+ # OpenAI not installed or old version - fall through to dict handling
1920
+ pass
1921
+ except TypeError:
1922
+ # Not a Pydantic model - fall through to dict handling
1923
+ pass
1924
+
1925
+ # Check if it's a dictionary format
1926
+ if isinstance(tool_call, dict):
1927
+ # Legacy dictionary format
1928
+ tool_id = tool_call.get("id")
1929
+ function = tool_call.get("function", {})
1930
+
1931
+ if not tool_id:
1932
+ raise ValueError("Tool call dictionary missing required 'id' field")
1933
+
1934
+ if not isinstance(function, dict):
1935
+ raise ValueError(
1936
+ f"Tool call {tool_id} 'function' field must be a dictionary"
1937
+ )
1938
+
1939
+ tool_name = function.get("name")
1940
+ arguments_str = function.get("arguments", "{}")
1941
+
1942
+ if not tool_name:
1943
+ raise ValueError(
1944
+ f"Tool call {tool_id} missing required 'function.name' field"
1945
+ )
1946
+
1947
+ # Check for excessively large arguments (10MB limit)
1948
+ if len(arguments_str) > 10 * 1024 * 1024:
1949
+ raise ValueError(
1950
+ f"Tool call {tool_id} arguments too large ({len(arguments_str)} bytes). "
1951
+ f"Maximum allowed is 10MB."
1952
+ )
1953
+
1954
+ # Parse arguments - let JSONDecodeError propagate if invalid
1955
+ try:
1956
+ arguments_dict = json.loads(arguments_str) if arguments_str else {}
1957
+ except json.JSONDecodeError as e:
1958
+ # Log the error with context but still raise it
1959
+ self.logger.error(
1960
+ f"Invalid JSON in tool arguments for {tool_name} (id: {tool_id}): {arguments_str[:100]}... Error: {e}"
1961
+ )
1962
+ raise json.JSONDecodeError(
1963
+ f"Invalid JSON in tool '{tool_name}' arguments: {e.msg}",
1964
+ e.doc,
1965
+ e.pos,
1966
+ )
1967
+
1968
+ self.logger.debug(
1969
+ f"Extracted dictionary tool call: {tool_name} (id: {tool_id})"
1970
+ )
1971
+
1972
+ return {
1973
+ "id": tool_id,
1974
+ "name": tool_name,
1975
+ "arguments": arguments_str,
1976
+ "arguments_dict": arguments_dict,
1977
+ }
1978
+
1979
+ # Unknown format - raise informative error
1980
+ raise ValueError(
1981
+ f"Unrecognized tool_call format: {type(tool_call)}. "
1982
+ f"Expected OpenAI ChatCompletionMessageToolCall or dict with 'id' and 'function' fields. "
1983
+ f"Got: {repr(tool_call)[:200]}..."
1984
+ )
1985
+
1848
1986
  async def _execute_mcp_tool_call(
1849
1987
  self, tool_call: dict, mcp_tools: list[dict]
1850
1988
  ) -> dict[str, Any]:
@@ -1857,8 +1995,10 @@ class LLMAgentNode(Node):
1857
1995
  Returns:
1858
1996
  Tool execution result
1859
1997
  """
1860
- tool_name = tool_call.get("function", {}).get("name", "")
1861
- tool_args = json.loads(tool_call.get("function", {}).get("arguments", "{}"))
1998
+ # Handle both OpenAI Pydantic models and dictionary formats
1999
+ tool_info = self._extract_tool_call_info(tool_call)
2000
+ tool_name = tool_info["name"]
2001
+ tool_args = tool_info["arguments_dict"]
1862
2002
 
1863
2003
  # Find the MCP tool definition
1864
2004
  mcp_tool = None
@@ -1922,8 +2062,10 @@ class LLMAgentNode(Node):
1922
2062
 
1923
2063
  for tool_call in tool_calls:
1924
2064
  try:
1925
- tool_name = tool_call.get("function", {}).get("name")
1926
- tool_id = tool_call.get("id")
2065
+ # Handle both OpenAI Pydantic models and dictionary formats
2066
+ tool_info = self._extract_tool_call_info(tool_call)
2067
+ tool_name = tool_info["name"]
2068
+ tool_id = tool_info["id"]
1927
2069
 
1928
2070
  # Check if this is an MCP tool
1929
2071
  if tool_name in mcp_tool_names:
@@ -1947,13 +2089,36 @@ class LLMAgentNode(Node):
1947
2089
  }
1948
2090
  )
1949
2091
 
2092
+ except (ValueError, json.JSONDecodeError) as e:
2093
+ # Handle extraction errors specifically
2094
+ self.logger.error(f"Tool call extraction failed: {e}")
2095
+ # Try to get minimal info for error reporting
2096
+ if isinstance(tool_call, dict):
2097
+ tool_id = tool_call.get("id", "unknown")
2098
+ tool_name = tool_call.get("function", {}).get("name", "unknown")
2099
+ else:
2100
+ tool_id = getattr(tool_call, "id", "unknown")
2101
+ tool_name = "unknown"
2102
+
2103
+ tool_results.append(
2104
+ {
2105
+ "tool_call_id": tool_id,
2106
+ "content": json.dumps(
2107
+ {
2108
+ "error": f"Invalid tool call format: {str(e)}",
2109
+ "tool": tool_name,
2110
+ "status": "failed",
2111
+ }
2112
+ ),
2113
+ }
2114
+ )
1950
2115
  except Exception as e:
1951
- # Format error result
1952
- tool_name = tool_call.get("function", {}).get("name", "unknown")
2116
+ # Handle other execution errors
2117
+ # Tool info was already extracted successfully if we got here
1953
2118
  self.logger.error(f"Tool execution failed for {tool_name}: {e}")
1954
2119
  tool_results.append(
1955
2120
  {
1956
- "tool_call_id": tool_call.get("id", "unknown"),
2121
+ "tool_call_id": tool_id,
1957
2122
  "content": json.dumps(
1958
2123
  {"error": str(e), "tool": tool_name, "status": "failed"}
1959
2124
  ),
@@ -1974,8 +2139,10 @@ class LLMAgentNode(Node):
1974
2139
  Returns:
1975
2140
  Tool execution result
1976
2141
  """
1977
- tool_name = tool_call.get("function", {}).get("name")
1978
- tool_args = json.loads(tool_call.get("function", {}).get("arguments", "{}"))
2142
+ # Handle both OpenAI Pydantic models and dictionary formats
2143
+ tool_info = self._extract_tool_call_info(tool_call)
2144
+ tool_name = tool_info["name"]
2145
+ tool_args = tool_info["arguments_dict"]
1979
2146
 
1980
2147
  # For now, return a mock result
1981
2148
  # In future, this could execute actual Python functions
@@ -1530,15 +1530,28 @@ class SQLiteAdapter(DatabaseAdapter):
1530
1530
 
1531
1531
  if fetch_mode == FetchMode.ONE:
1532
1532
  row = await cursor.fetchone()
1533
- return self._convert_row(dict(row)) if row else None
1533
+ result = self._convert_row(dict(row)) if row else None
1534
1534
  elif fetch_mode == FetchMode.ALL:
1535
1535
  rows = await cursor.fetchall()
1536
- return [self._convert_row(dict(row)) for row in rows]
1536
+ result = [self._convert_row(dict(row)) for row in rows]
1537
1537
  elif fetch_mode == FetchMode.MANY:
1538
1538
  if not fetch_size:
1539
1539
  raise ValueError("fetch_size required for MANY mode")
1540
1540
  rows = await cursor.fetchmany(fetch_size)
1541
- return [self._convert_row(dict(row)) for row in rows]
1541
+ result = [self._convert_row(dict(row)) for row in rows]
1542
+ else:
1543
+ result = []
1544
+
1545
+ # Check if this was an INSERT and capture lastrowid for SQLite
1546
+ if query.strip().upper().startswith("INSERT") and (
1547
+ not result or result == [] or result is None
1548
+ ):
1549
+ # For INSERT without RETURNING, capture lastrowid
1550
+ lastrowid = cursor.lastrowid if hasattr(cursor, "lastrowid") else None
1551
+ if lastrowid is not None:
1552
+ return {"lastrowid": lastrowid}
1553
+
1554
+ return result
1542
1555
  else:
1543
1556
  # Create new connection for non-transactional queries
1544
1557
  if self._is_memory_db:
@@ -1557,6 +1570,19 @@ class SQLiteAdapter(DatabaseAdapter):
1557
1570
  raise ValueError("fetch_size required for MANY mode")
1558
1571
  rows = await cursor.fetchmany(fetch_size)
1559
1572
  result = [self._convert_row(dict(row)) for row in rows]
1573
+ else:
1574
+ result = []
1575
+
1576
+ # Check if this was an INSERT and capture lastrowid for SQLite
1577
+ if query.strip().upper().startswith("INSERT") and (
1578
+ not result or result == []
1579
+ ):
1580
+ # For INSERT without RETURNING, capture lastrowid
1581
+ lastrowid = (
1582
+ cursor.lastrowid if hasattr(cursor, "lastrowid") else None
1583
+ )
1584
+ if lastrowid is not None:
1585
+ result = {"lastrowid": lastrowid}
1560
1586
 
1561
1587
  # Commit for memory databases (needed for INSERT/UPDATE/DELETE)
1562
1588
  await db.commit()
@@ -1577,9 +1603,24 @@ class SQLiteAdapter(DatabaseAdapter):
1577
1603
  if not fetch_size:
1578
1604
  raise ValueError("fetch_size required for MANY mode")
1579
1605
  rows = await cursor.fetchmany(fetch_size)
1580
- return [self._convert_row(dict(row)) for row in rows]
1606
+ result = [self._convert_row(dict(row)) for row in rows]
1607
+ else:
1608
+ result = []
1581
1609
 
1582
- await db.commit()
1610
+ # Check if this was an INSERT and capture lastrowid for SQLite
1611
+ if query.strip().upper().startswith("INSERT") and (
1612
+ not result or result == []
1613
+ ):
1614
+ # For INSERT without RETURNING, capture lastrowid
1615
+ lastrowid = (
1616
+ cursor.lastrowid if hasattr(cursor, "lastrowid") else None
1617
+ )
1618
+ if lastrowid is not None:
1619
+ await db.commit() # Commit before returning
1620
+ return {"lastrowid": lastrowid}
1621
+
1622
+ await db.commit()
1623
+ return result
1583
1624
 
1584
1625
  async def execute_many(
1585
1626
  self,
@@ -3421,28 +3462,37 @@ class AsyncSQLDatabaseNode(AsyncNode):
3421
3462
  parameter_types=parameter_types,
3422
3463
  )
3423
3464
 
3424
- # Ensure all data is JSON-serializable (safety net for adapter inconsistencies)
3425
- result = self._ensure_serializable(result)
3465
+ # Check for special SQLite lastrowid result
3466
+ if isinstance(result, dict) and "lastrowid" in result:
3467
+ # This is a special SQLite INSERT result
3468
+ formatted_data = result # Keep as-is
3469
+ row_count = 1 # One row was inserted
3470
+ else:
3471
+ # Ensure all data is JSON-serializable (safety net for adapter inconsistencies)
3472
+ result = self._ensure_serializable(result)
3426
3473
 
3427
- # Format results based on requested format
3428
- formatted_data = self._format_results(result, result_format)
3474
+ # Format results based on requested format
3475
+ formatted_data = self._format_results(result, result_format)
3476
+ row_count = None # Will be calculated below
3429
3477
 
3430
3478
  # For DataFrame, we need special handling for row count
3431
- row_count = 0
3432
- if result_format == "dataframe":
3433
- try:
3434
- row_count = len(formatted_data)
3435
- except:
3436
- # If pandas isn't available, formatted_data is still a list
3479
+ if row_count is None: # Only calculate if not already set
3480
+ if result_format == "dataframe":
3481
+ try:
3482
+ row_count = len(formatted_data)
3483
+ except:
3484
+ # If pandas isn't available, formatted_data is still a list
3485
+ row_count = (
3486
+ len(result)
3487
+ if isinstance(result, list)
3488
+ else (1 if result else 0)
3489
+ )
3490
+ else:
3437
3491
  row_count = (
3438
3492
  len(result)
3439
3493
  if isinstance(result, list)
3440
3494
  else (1 if result else 0)
3441
3495
  )
3442
- else:
3443
- row_count = (
3444
- len(result) if isinstance(result, list) else (1 if result else 0)
3445
- )
3446
3496
 
3447
3497
  # Extract column names if available
3448
3498
  columns = []
@@ -4677,13 +4727,30 @@ class AsyncSQLDatabaseNode(AsyncNode):
4677
4727
  self._adapter = None
4678
4728
 
4679
4729
  def __del__(self):
4680
- """Ensure connections are closed."""
4730
+ """Ensure connections are closed safely."""
4681
4731
  if self._adapter and self._connected:
4682
- # Schedule cleanup in the event loop if it exists
4732
+ # Try to schedule cleanup, but be resilient to event loop issues
4683
4733
  try:
4684
- loop = asyncio.get_event_loop()
4685
- if not loop.is_closed():
4686
- loop.create_task(self.cleanup())
4687
- except RuntimeError:
4688
- # No event loop, can't clean up async resources
4734
+ import asyncio
4735
+
4736
+ # Check if there's a running event loop that's not closed
4737
+ try:
4738
+ loop = asyncio.get_running_loop()
4739
+ if loop and not loop.is_closed():
4740
+ # Create cleanup task only if loop is healthy
4741
+ try:
4742
+ loop.create_task(self.cleanup())
4743
+ except RuntimeError as e:
4744
+ # Loop might be closing, ignore gracefully
4745
+ logger.debug(f"Could not schedule cleanup task: {e}")
4746
+ else:
4747
+ logger.debug("Event loop is closed, skipping async cleanup")
4748
+ except RuntimeError:
4749
+ # No running event loop - this is normal during shutdown
4750
+ logger.debug(
4751
+ "No running event loop for cleanup, connections will be cleaned by GC"
4752
+ )
4753
+ except Exception as e:
4754
+ # Complete fallback - any unexpected error should not crash __del__
4755
+ logger.debug(f"Error during connection cleanup: {e}")
4689
4756
  pass
kailash/runtime/local.py CHANGED
@@ -2333,7 +2333,10 @@ class LocalRuntime:
2333
2333
  else:
2334
2334
  # Standard node execution (backward compatibility)
2335
2335
  try:
2336
- if hasattr(node, "async_run"):
2336
+ if hasattr(node, "execute_async"):
2337
+ # For AsyncNode and its subclasses, use execute_async which handles event loop properly
2338
+ node_result = await node.execute_async(**inputs)
2339
+ elif hasattr(node, "async_run"):
2337
2340
  node_result = await node.async_run(**inputs)
2338
2341
  else:
2339
2342
  node_result = node.execute(**inputs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kailash
3
- Version: 0.9.17
3
+ Version: 0.9.19
4
4
  Summary: Python SDK for the Kailash container-node architecture
5
5
  Home-page: https://github.com/integrum/kailash-python-sdk
6
6
  Author: Integrum
@@ -1,4 +1,4 @@
1
- kailash/__init__.py,sha256=ojLQWITpkyPY6cwQ6jbWBeKWxa6m6nO4h6sdugKCMkQ,3032
1
+ kailash/__init__.py,sha256=UXdg3RRcDvHRZQ_btbIDsx1ekoYuJmtoVuRJKbF-sRo,2928
2
2
  kailash/__main__.py,sha256=vr7TVE5o16V6LsTmRFKG6RDKUXHpIWYdZ6Dok2HkHnI,198
3
3
  kailash/access_control.py,sha256=MjKtkoQ2sg1Mgfe7ovGxVwhAbpJKvaepPWr8dxOueMA,26058
4
4
  kailash/access_control_abac.py,sha256=FPfa_8PuDP3AxTjdWfiH3ntwWO8NodA0py9W8SE5dno,30263
@@ -93,7 +93,7 @@ kailash/mcp_server/oauth.py,sha256=GFC2O2ueiTTI6V-91Huevhc3K8CxrHe22knuHfuCTqY,5
93
93
  kailash/mcp_server/protocol.py,sha256=NIdEwJT21JT9ItajXniPNvCbZtTbpqyOC_ZezqsguGE,35694
94
94
  kailash/mcp_server/registry_integration.py,sha256=B8CSLq_O1ea3cXrbVjC3bB_OFgHIP-KS9dk77mNM02I,19791
95
95
  kailash/mcp_server/server.py,sha256=yFp1F4QQl6gkTY_9JJWmiMiwfT-zACLJLubz-NR5sCw,108675
96
- kailash/mcp_server/subscriptions.py,sha256=J0FXg6_8lOffQ5SSwm2_DmqIS2pOii6ikxORANLCy1o,58589
96
+ kailash/mcp_server/subscriptions.py,sha256=UK0Ssjab-lHJ16DFPi6lmvLh5xMYNRZia0DgYb6aQ60,58586
97
97
  kailash/mcp_server/transports.py,sha256=fBa7CTVYTDb0ZbBQTsZ2d8rKvcVuqBIteczq8eqarr4,49919
98
98
  kailash/mcp_server/servers/ai_registry.py,sha256=IdF_keUuJlMsvjLjSAykxxbm46K4qA7eCj7T-lYSrzk,10007
99
99
  kailash/mcp_server/utils/__init__.py,sha256=R20N-iiKXUPxc9MOh6vPO1vIfkPmwhEQ5KNFgGd4xSs,771
@@ -151,9 +151,9 @@ kailash/migration/tests/test_compatibility_checker.py,sha256=Gx_lTedk1K-1sIhGDap
151
151
  kailash/migration/tests/test_integration.py,sha256=-3j3LZdoaZ5HUcwY99wVM30FrE473rHjSH3i_tu3xNY,17202
152
152
  kailash/migration/tests/test_migration_assistant.py,sha256=H0td6dL3Xkw8ivImFcQP_Cuh0WeqDRpbEKJFzuQ1LEc,14615
153
153
  kailash/migration/tests/test_performance_comparator.py,sha256=cQgX4DHfqXYGmcKrl77qtlMBRYDs7xjaFxTih0M3XdE,15257
154
- kailash/monitoring/__init__.py,sha256=41M8uKmU-rWOwNqaDG3Y3uhp0coy6JZE4riMjUMQru4,1167
154
+ kailash/monitoring/__init__.py,sha256=w7We20bpBdcYR3PTfN9lkep8fPEc3T2eenUkNwjdw_s,1167
155
155
  kailash/monitoring/alerts.py,sha256=Hk3Xs0EEkOIBH2ZhlejJBOsLYaPlvRejAAEGqNQISc0,21400
156
- kailash/monitoring/asyncsql_metrics.py,sha256=Wlw8Ypo_WYOsAdjc7YVc3JOxsW4D0ImuZcehKFMLfRs,9487
156
+ kailash/monitoring/asyncsql_metrics.py,sha256=jj9M8D5qHoS3zEFfZYsUCWsy5kb-J5-iYVacmNUaGjE,9577
157
157
  kailash/monitoring/metrics.py,sha256=SiAnL3o6K0QaJHgfAuWBa-0pTkW5zymhuPEsj4bgOgM,22022
158
158
  kailash/nodes/__init__.py,sha256=zn4M0f-sIPAq8bG5golQIxmEY8lG5d55Kzg8UNL2lAY,6392
159
159
  kailash/nodes/__init___original.py,sha256=p2KSo0dyUBCLClU123qpQ0tyv5S_36PTxosNyW58nyY,1031
@@ -184,7 +184,7 @@ kailash/nodes/ai/embedding_generator.py,sha256=akGCzz7zLRSziqEQCiPwL2qWhRWxuM_1R
184
184
  kailash/nodes/ai/hybrid_search.py,sha256=k26uDDP_bwrIpv7Yl7PBCPvWSyQEmTlBjI1IpbgDsO4,35446
185
185
  kailash/nodes/ai/intelligent_agent_orchestrator.py,sha256=LvBqMKc64zSxFWVCjbLKKel2QwEzoTeJAEgna7rZw00,83097
186
186
  kailash/nodes/ai/iterative_llm_agent.py,sha256=h8iP1KFhB_eCDs7UvmY_9y0OUBuprYMj2MLM6dR0W2c,100287
187
- kailash/nodes/ai/llm_agent.py,sha256=NeNJZbV_VOUbULug2LASwyzLyoUO5wi58Bc9sXTubuc,90181
187
+ kailash/nodes/ai/llm_agent.py,sha256=p7_WFXrkvezUleU8mLPE6JzGd3qRhWCqFIBBiMRnGYA,96943
188
188
  kailash/nodes/ai/models.py,sha256=wsEeUTuegy87mnLtKgSTg7ggCXvC1n3MsL-iZ4qujHs,16393
189
189
  kailash/nodes/ai/self_organizing.py,sha256=B7NwKaBW8OHQBf5b0F9bSs8Wm-5BDJ9IjIkxS9h00mg,62885
190
190
  kailash/nodes/ai/semantic_memory.py,sha256=ZTXIgxwMheux712cN__cNrQ3VgHaKcDyfQv_Gto7MRM,18644
@@ -220,7 +220,7 @@ kailash/nodes/compliance/data_retention.py,sha256=90bH_eGwlcDzUdklAJeXQM-RcuLUGQ
220
220
  kailash/nodes/compliance/gdpr.py,sha256=ZMoHZjAo4QtGwtFCzGMrAUBFV3TbZOnJ5DZGZS87Bas,70548
221
221
  kailash/nodes/data/__init__.py,sha256=f0h4ysvXxlyFcNJLvDyXrgJ0ixwDF1cS0pJ2QNPakhg,5213
222
222
  kailash/nodes/data/async_connection.py,sha256=wfArHs9svU48bxGZIiixSV2YVn9cukNgEjagwTRu6J4,17250
223
- kailash/nodes/data/async_sql.py,sha256=dhDBn5Ont0XBLnZz0_gG8s_8dossj50J0upuvanU7fw,185523
223
+ kailash/nodes/data/async_sql.py,sha256=9C-XRTDrzpVwFRrI13ym539UajT0Qgh9jmIjisDPi28,188864
224
224
  kailash/nodes/data/async_vector.py,sha256=HtwQLO25IXu8Vq80qzU8rMkUAKPQ2qM0x8YxjXHlygU,21005
225
225
  kailash/nodes/data/bulk_operations.py,sha256=WVopmosVkIlweFxVt3boLdCPc93EqpYyQ1Ez9mCIt0c,34453
226
226
  kailash/nodes/data/directory.py,sha256=fbfLqD_ijRubk-4xew3604QntPsyDxqaF4k6TpfyjDg,9923
@@ -341,7 +341,7 @@ kailash/runtime/async_local.py,sha256=sYNggSU0R-oo8cCvU5ayodDBqASzUhxu994ZvZxDSC
341
341
  kailash/runtime/compatibility_reporter.py,sha256=TOQD0ODnJdsxEPyNSYOV_zQxu60X_yvHeu26seFOMEA,19807
342
342
  kailash/runtime/docker.py,sha256=sZknVl1PCGfAZeyc0-exTuKlllSyjYlFIgJoiB3CRNs,23500
343
343
  kailash/runtime/hierarchical_switch_executor.py,sha256=k6aPGbpf6z2m6dTbHrEyuDR8ZCvOqUanBGYp70arQn0,20782
344
- kailash/runtime/local.py,sha256=6c2RtT3YisYPnBAOho6lbd4fYyMiJG8EyUBOGmcei_U,201159
344
+ kailash/runtime/local.py,sha256=nIQRWUwSHVg2Daafq_JggBLf-zTDBaGMcwObBzVI0po,201389
345
345
  kailash/runtime/parallel.py,sha256=-M9VVG36RxnrrmdbcBe9IjQWb58tAEEo76RQQ2uIXaE,21084
346
346
  kailash/runtime/parallel_cyclic.py,sha256=yANZHnePjhCPuCFbq3lFQA1K6jbCv5Of5-vIKbCsmZk,19863
347
347
  kailash/runtime/parameter_injection.py,sha256=kG4GhmarsRr5t3VDFbc2G1HSbsZJg6UmienHCE2Ru7o,14852
@@ -424,10 +424,10 @@ kailash/workflow/templates.py,sha256=XQMAKZXC2dlxgMMQhSEOWAF3hIbe9JJt9j_THchhAm8
424
424
  kailash/workflow/type_inference.py,sha256=i1F7Yd_Z3elTXrthsLpqGbOnQBIVVVEjhRpI0HrIjd0,24492
425
425
  kailash/workflow/validation.py,sha256=LdbIPQSokCqSLfWTBhJR82pa_0va44pcVu9dpEM4rvY,45177
426
426
  kailash/workflow/visualization.py,sha256=nHBW-Ai8QBMZtn2Nf3EE1_aiMGi9S6Ui_BfpA5KbJPU,23187
427
- kailash-0.9.17.dist-info/licenses/LICENSE,sha256=9GYZHXVUmx6FdFRNzOeE_w7a_aEGeYbqTVmFtJlrbGk,13438
428
- kailash-0.9.17.dist-info/licenses/NOTICE,sha256=9ssIK4LcHSTFqriXGdteMpBPTS1rSLlYtjppZ_bsjZ0,723
429
- kailash-0.9.17.dist-info/METADATA,sha256=xUZBeaugdsC-xcj_U4bEYCVupaxJA02HCER2c9LmldQ,23528
430
- kailash-0.9.17.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
431
- kailash-0.9.17.dist-info/entry_points.txt,sha256=M_q3b8PG5W4XbhSgESzIJjh3_4OBKtZFYFsOdkr2vO4,45
432
- kailash-0.9.17.dist-info/top_level.txt,sha256=z7GzH2mxl66498pVf5HKwo5wwfPtt9Aq95uZUpH6JV0,8
433
- kailash-0.9.17.dist-info/RECORD,,
427
+ kailash-0.9.19.dist-info/licenses/LICENSE,sha256=9GYZHXVUmx6FdFRNzOeE_w7a_aEGeYbqTVmFtJlrbGk,13438
428
+ kailash-0.9.19.dist-info/licenses/NOTICE,sha256=9ssIK4LcHSTFqriXGdteMpBPTS1rSLlYtjppZ_bsjZ0,723
429
+ kailash-0.9.19.dist-info/METADATA,sha256=RY2liVVkhKdErnyayfo4_vH2OyMVKgvtvimhAT7JWvA,23528
430
+ kailash-0.9.19.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
431
+ kailash-0.9.19.dist-info/entry_points.txt,sha256=M_q3b8PG5W4XbhSgESzIJjh3_4OBKtZFYFsOdkr2vO4,45
432
+ kailash-0.9.19.dist-info/top_level.txt,sha256=z7GzH2mxl66498pVf5HKwo5wwfPtt9Aq95uZUpH6JV0,8
433
+ kailash-0.9.19.dist-info/RECORD,,