kailash 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. kailash/access_control.py +40 -39
  2. kailash/api/auth.py +26 -32
  3. kailash/api/custom_nodes.py +29 -29
  4. kailash/api/custom_nodes_secure.py +35 -35
  5. kailash/api/database.py +17 -17
  6. kailash/api/gateway.py +19 -19
  7. kailash/api/mcp_integration.py +24 -23
  8. kailash/api/studio.py +45 -45
  9. kailash/api/workflow_api.py +8 -8
  10. kailash/cli/commands.py +5 -8
  11. kailash/manifest.py +42 -42
  12. kailash/mcp/__init__.py +1 -1
  13. kailash/mcp/ai_registry_server.py +20 -20
  14. kailash/mcp/client.py +9 -11
  15. kailash/mcp/client_new.py +10 -10
  16. kailash/mcp/server.py +1 -2
  17. kailash/mcp/server_enhanced.py +449 -0
  18. kailash/mcp/servers/ai_registry.py +6 -6
  19. kailash/mcp/utils/__init__.py +31 -0
  20. kailash/mcp/utils/cache.py +267 -0
  21. kailash/mcp/utils/config.py +263 -0
  22. kailash/mcp/utils/formatters.py +293 -0
  23. kailash/mcp/utils/metrics.py +418 -0
  24. kailash/nodes/ai/agents.py +9 -9
  25. kailash/nodes/ai/ai_providers.py +33 -34
  26. kailash/nodes/ai/embedding_generator.py +31 -32
  27. kailash/nodes/ai/intelligent_agent_orchestrator.py +62 -66
  28. kailash/nodes/ai/iterative_llm_agent.py +48 -48
  29. kailash/nodes/ai/llm_agent.py +32 -33
  30. kailash/nodes/ai/models.py +13 -13
  31. kailash/nodes/ai/self_organizing.py +44 -44
  32. kailash/nodes/api/auth.py +11 -11
  33. kailash/nodes/api/graphql.py +13 -13
  34. kailash/nodes/api/http.py +19 -19
  35. kailash/nodes/api/monitoring.py +20 -20
  36. kailash/nodes/api/rate_limiting.py +9 -13
  37. kailash/nodes/api/rest.py +29 -29
  38. kailash/nodes/api/security.py +44 -47
  39. kailash/nodes/base.py +21 -23
  40. kailash/nodes/base_async.py +7 -7
  41. kailash/nodes/base_cycle_aware.py +12 -12
  42. kailash/nodes/base_with_acl.py +5 -5
  43. kailash/nodes/code/python.py +66 -57
  44. kailash/nodes/data/directory.py +6 -6
  45. kailash/nodes/data/event_generation.py +10 -10
  46. kailash/nodes/data/file_discovery.py +28 -31
  47. kailash/nodes/data/readers.py +8 -8
  48. kailash/nodes/data/retrieval.py +10 -10
  49. kailash/nodes/data/sharepoint_graph.py +17 -17
  50. kailash/nodes/data/sources.py +5 -5
  51. kailash/nodes/data/sql.py +13 -13
  52. kailash/nodes/data/streaming.py +25 -25
  53. kailash/nodes/data/vector_db.py +22 -22
  54. kailash/nodes/data/writers.py +7 -7
  55. kailash/nodes/logic/async_operations.py +17 -17
  56. kailash/nodes/logic/convergence.py +11 -11
  57. kailash/nodes/logic/loop.py +4 -4
  58. kailash/nodes/logic/operations.py +11 -11
  59. kailash/nodes/logic/workflow.py +8 -9
  60. kailash/nodes/mixins/mcp.py +17 -17
  61. kailash/nodes/mixins.py +8 -10
  62. kailash/nodes/transform/chunkers.py +3 -3
  63. kailash/nodes/transform/formatters.py +7 -7
  64. kailash/nodes/transform/processors.py +10 -10
  65. kailash/runtime/access_controlled.py +18 -18
  66. kailash/runtime/async_local.py +17 -19
  67. kailash/runtime/docker.py +20 -22
  68. kailash/runtime/local.py +16 -16
  69. kailash/runtime/parallel.py +23 -23
  70. kailash/runtime/parallel_cyclic.py +27 -27
  71. kailash/runtime/runner.py +6 -6
  72. kailash/runtime/testing.py +20 -20
  73. kailash/sdk_exceptions.py +0 -58
  74. kailash/security.py +14 -26
  75. kailash/tracking/manager.py +38 -38
  76. kailash/tracking/metrics_collector.py +15 -14
  77. kailash/tracking/models.py +53 -53
  78. kailash/tracking/storage/base.py +7 -17
  79. kailash/tracking/storage/database.py +22 -23
  80. kailash/tracking/storage/filesystem.py +38 -40
  81. kailash/utils/export.py +21 -21
  82. kailash/utils/templates.py +2 -3
  83. kailash/visualization/api.py +30 -34
  84. kailash/visualization/dashboard.py +17 -17
  85. kailash/visualization/performance.py +16 -16
  86. kailash/visualization/reports.py +25 -27
  87. kailash/workflow/builder.py +8 -8
  88. kailash/workflow/convergence.py +13 -12
  89. kailash/workflow/cycle_analyzer.py +30 -32
  90. kailash/workflow/cycle_builder.py +12 -12
  91. kailash/workflow/cycle_config.py +16 -15
  92. kailash/workflow/cycle_debugger.py +40 -40
  93. kailash/workflow/cycle_exceptions.py +29 -29
  94. kailash/workflow/cycle_profiler.py +21 -21
  95. kailash/workflow/cycle_state.py +20 -22
  96. kailash/workflow/cyclic_runner.py +44 -44
  97. kailash/workflow/graph.py +40 -40
  98. kailash/workflow/mermaid_visualizer.py +9 -11
  99. kailash/workflow/migration.py +22 -22
  100. kailash/workflow/mock_registry.py +6 -6
  101. kailash/workflow/runner.py +9 -9
  102. kailash/workflow/safety.py +12 -13
  103. kailash/workflow/state.py +8 -11
  104. kailash/workflow/templates.py +19 -19
  105. kailash/workflow/validation.py +14 -14
  106. kailash/workflow/visualization.py +22 -22
  107. {kailash-0.3.0.dist-info → kailash-0.3.2.dist-info}/METADATA +53 -5
  108. kailash-0.3.2.dist-info/RECORD +136 -0
  109. kailash-0.3.0.dist-info/RECORD +0 -130
  110. {kailash-0.3.0.dist-info → kailash-0.3.2.dist-info}/WHEEL +0 -0
  111. {kailash-0.3.0.dist-info → kailash-0.3.2.dist-info}/entry_points.txt +0 -0
  112. {kailash-0.3.0.dist-info → kailash-0.3.2.dist-info}/licenses/LICENSE +0 -0
  113. {kailash-0.3.0.dist-info → kailash-0.3.2.dist-info}/top_level.txt +0 -0
@@ -21,7 +21,7 @@ import time
21
21
  import uuid
22
22
  from collections import defaultdict, deque
23
23
  from datetime import datetime
24
- from typing import Any, Dict, List, Optional
24
+ from typing import Any
25
25
 
26
26
  from kailash.nodes.ai.a2a import SharedMemoryPoolNode
27
27
  from kailash.nodes.ai.self_organizing import (
@@ -140,7 +140,7 @@ class IntelligentCacheNode(Node):
140
140
  self.cost_metrics = {}
141
141
  self.query_abstractions = {}
142
142
 
143
- def get_parameters(self) -> Dict[str, NodeParameter]:
143
+ def get_parameters(self) -> dict[str, NodeParameter]:
144
144
  return {
145
145
  "action": NodeParameter(
146
146
  name="action",
@@ -197,7 +197,7 @@ class IntelligentCacheNode(Node):
197
197
  ),
198
198
  }
199
199
 
200
- def run(self, **kwargs) -> Dict[str, Any]:
200
+ def run(self, **kwargs) -> dict[str, Any]:
201
201
  """Execute cache operations."""
202
202
  action = kwargs.get("action", "get")
203
203
 
@@ -214,7 +214,7 @@ class IntelligentCacheNode(Node):
214
214
  else:
215
215
  return {"success": False, "error": f"Unknown action: {action}"}
216
216
 
217
- def _cache_data(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
217
+ def _cache_data(self, kwargs: dict[str, Any]) -> dict[str, Any]:
218
218
  """Cache data with intelligent indexing."""
219
219
  cache_key = kwargs.get("cache_key")
220
220
  if not cache_key:
@@ -262,7 +262,7 @@ class IntelligentCacheNode(Node):
262
262
  "semantic_tags": semantic_tags,
263
263
  }
264
264
 
265
- def _get_cached(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
265
+ def _get_cached(self, kwargs: dict[str, Any]) -> dict[str, Any]:
266
266
  """Retrieve cached data with intelligent matching."""
267
267
  cache_key = kwargs.get("cache_key")
268
268
  query = kwargs.get("query")
@@ -307,7 +307,7 @@ class IntelligentCacheNode(Node):
307
307
 
308
308
  return {"success": True, "hit": False, "cache_key": cache_key, "query": query}
309
309
 
310
- def _find_semantic_matches(self, query: str, threshold: float) -> List[Dict]:
310
+ def _find_semantic_matches(self, query: str, threshold: float) -> list[dict]:
311
311
  """Find semantically similar cached entries."""
312
312
  matches = []
313
313
  query_words = set(query.lower().split())
@@ -333,14 +333,14 @@ class IntelligentCacheNode(Node):
333
333
  matches.sort(key=lambda x: x["similarity"], reverse=True)
334
334
  return matches
335
335
 
336
- def _generate_cache_key(self, kwargs: Dict[str, Any]) -> str:
336
+ def _generate_cache_key(self, kwargs: dict[str, Any]) -> str:
337
337
  """Generate cache key from request parameters."""
338
338
  data_str = json.dumps(kwargs.get("data", {}), sort_keys=True)
339
339
  metadata_str = json.dumps(kwargs.get("metadata", {}), sort_keys=True)
340
340
  combined = f"{data_str}_{metadata_str}_{time.time()}"
341
341
  return hashlib.md5(combined.encode()).hexdigest()[:16]
342
342
 
343
- def _invalidate(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
343
+ def _invalidate(self, kwargs: dict[str, Any]) -> dict[str, Any]:
344
344
  """Invalidate cache entries."""
345
345
  cache_key = kwargs.get("cache_key")
346
346
  pattern = kwargs.get("pattern")
@@ -363,7 +363,7 @@ class IntelligentCacheNode(Node):
363
363
  "count": len(invalidated_keys),
364
364
  }
365
365
 
366
- def _cleanup_expired(self) -> Dict[str, Any]:
366
+ def _cleanup_expired(self) -> dict[str, Any]:
367
367
  """Remove expired cache entries."""
368
368
  current_time = time.time()
369
369
  expired_keys = []
@@ -384,7 +384,7 @@ class IntelligentCacheNode(Node):
384
384
  "remaining_entries": len(self.cache),
385
385
  }
386
386
 
387
- def _get_stats(self) -> Dict[str, Any]:
387
+ def _get_stats(self) -> dict[str, Any]:
388
388
  """Get cache statistics."""
389
389
  current_time = time.time()
390
390
  active_entries = sum(
@@ -488,7 +488,7 @@ class MCPAgentNode(SelfOrganizingAgentNode):
488
488
  self.tool_registry = {}
489
489
  self.call_history = deque(maxlen=100)
490
490
 
491
- def get_parameters(self) -> Dict[str, NodeParameter]:
491
+ def get_parameters(self) -> dict[str, NodeParameter]:
492
492
  params = super().get_parameters()
493
493
 
494
494
  params.update(
@@ -525,7 +525,7 @@ class MCPAgentNode(SelfOrganizingAgentNode):
525
525
 
526
526
  return params
527
527
 
528
- def run(self, **kwargs) -> Dict[str, Any]:
528
+ def run(self, **kwargs) -> dict[str, Any]:
529
529
  """Execute MCP-enhanced self-organizing agent."""
530
530
  # Set up MCP servers
531
531
  mcp_servers = kwargs.get("mcp_servers", [])
@@ -556,7 +556,7 @@ class MCPAgentNode(SelfOrganizingAgentNode):
556
556
 
557
557
  return result
558
558
 
559
- def _setup_mcp_clients(self, servers: List[Dict]):
559
+ def _setup_mcp_clients(self, servers: list[dict]):
560
560
  """Set up MCP clients for configured servers.
561
561
 
562
562
  NOTE: MCP is now a built-in capability of LLM agents. This method
@@ -581,7 +581,7 @@ class MCPAgentNode(SelfOrganizingAgentNode):
581
581
  except Exception as e:
582
582
  print(f"Failed to register MCP server {server_name}: {e}")
583
583
 
584
- def _enhance_task_with_tools(self, task: str, kwargs: Dict) -> str:
584
+ def _enhance_task_with_tools(self, task: str, kwargs: dict) -> str:
585
585
  """Enhance task description with available tools."""
586
586
  list(self.tool_registry.keys())
587
587
 
@@ -613,8 +613,8 @@ class MCPAgentNode(SelfOrganizingAgentNode):
613
613
  return context
614
614
 
615
615
  def _process_tool_calls(
616
- self, content: str, cache_node_id: Optional[str]
617
- ) -> List[Dict]:
616
+ self, content: str, cache_node_id: str | None
617
+ ) -> list[dict]:
618
618
  """Process any tool calls mentioned in the agent's response."""
619
619
  tool_results = []
620
620
 
@@ -655,15 +655,13 @@ class MCPAgentNode(SelfOrganizingAgentNode):
655
655
 
656
656
  return tool_results
657
657
 
658
- def _check_cache_for_tool(
659
- self, tool_name: str, cache_node_id: str
660
- ) -> Optional[Dict]:
658
+ def _check_cache_for_tool(self, tool_name: str, cache_node_id: str) -> dict | None:
661
659
  """Check cache for tool call results."""
662
660
  # This would interact with the cache node in a real workflow
663
661
  # For now, return None to indicate no cache
664
662
  return None
665
663
 
666
- def _execute_tool_call(self, tool_name: str, arguments: Dict) -> Dict[str, Any]:
664
+ def _execute_tool_call(self, tool_name: str, arguments: dict) -> dict[str, Any]:
667
665
  """Execute a tool call through MCP."""
668
666
  tool_info = self.tool_registry.get(tool_name)
669
667
  if not tool_info:
@@ -698,10 +696,9 @@ class MCPAgentNode(SelfOrganizingAgentNode):
698
696
  except Exception as e:
699
697
  return {"success": False, "error": str(e)}
700
698
 
701
- def _cache_tool_result(self, tool_name: str, result: Dict, cache_node_id: str):
699
+ def _cache_tool_result(self, tool_name: str, result: dict, cache_node_id: str):
702
700
  """Cache tool call result."""
703
701
  # This would interact with the cache node in a real workflow
704
- pass
705
702
 
706
703
 
707
704
  @register_node()
@@ -827,7 +824,7 @@ class QueryAnalysisNode(Node):
827
824
  },
828
825
  }
829
826
 
830
- def get_parameters(self) -> Dict[str, NodeParameter]:
827
+ def get_parameters(self) -> dict[str, NodeParameter]:
831
828
  return {
832
829
  "query": NodeParameter(
833
830
  name="query",
@@ -858,7 +855,7 @@ class QueryAnalysisNode(Node):
858
855
  ),
859
856
  }
860
857
 
861
- def run(self, **kwargs) -> Dict[str, Any]:
858
+ def run(self, **kwargs) -> dict[str, Any]:
862
859
  """Analyze query and determine optimal solving approach."""
863
860
  query = kwargs.get("query")
864
861
  if not query:
@@ -907,7 +904,7 @@ class QueryAnalysisNode(Node):
907
904
  },
908
905
  }
909
906
 
910
- def _analyze_patterns(self, query: str) -> Dict[str, Any]:
907
+ def _analyze_patterns(self, query: str) -> dict[str, Any]:
911
908
  """Analyze query against known patterns."""
912
909
  query_lower = query.lower()
913
910
  matches = {}
@@ -930,15 +927,14 @@ class QueryAnalysisNode(Node):
930
927
 
931
928
  return matches
932
929
 
933
- def _assess_complexity(self, query: str, context: Dict, patterns: Dict) -> float:
930
+ def _assess_complexity(self, query: str, context: dict, patterns: dict) -> float:
934
931
  """Assess query complexity."""
935
932
  base_complexity = 0.5
936
933
 
937
934
  # Pattern-based complexity
938
935
  if patterns:
939
936
  max_pattern_complexity = max(
940
- self.query_patterns[pattern]["complexity"]
941
- for pattern in patterns.keys()
937
+ self.query_patterns[pattern]["complexity"] for pattern in patterns
942
938
  )
943
939
  base_complexity = max(base_complexity, max_pattern_complexity)
944
940
 
@@ -961,12 +957,12 @@ class QueryAnalysisNode(Node):
961
957
 
962
958
  return max(0.1, min(1.0, base_complexity))
963
959
 
964
- def _determine_capabilities(self, patterns: Dict, context: Dict) -> List[str]:
960
+ def _determine_capabilities(self, patterns: dict, context: dict) -> list[str]:
965
961
  """Determine required capabilities."""
966
962
  capabilities = set()
967
963
 
968
964
  # Pattern-based capabilities
969
- for pattern_name in patterns.keys():
965
+ for pattern_name in patterns:
970
966
  pattern_info = self.query_patterns[pattern_name]
971
967
  capabilities.update(pattern_info["required_capabilities"])
972
968
 
@@ -980,10 +976,10 @@ class QueryAnalysisNode(Node):
980
976
 
981
977
  return list(capabilities)
982
978
 
983
- def _analyze_mcp_needs(self, query: str, patterns: Dict, mcp_servers: List) -> Dict:
979
+ def _analyze_mcp_needs(self, query: str, patterns: dict, mcp_servers: list) -> dict:
984
980
  """Analyze MCP tool requirements."""
985
981
  mcp_needed = any(
986
- self.query_patterns[pattern]["mcp_likely"] for pattern in patterns.keys()
982
+ self.query_patterns[pattern]["mcp_likely"] for pattern in patterns
987
983
  )
988
984
 
989
985
  # Check for specific tool indicators
@@ -1009,8 +1005,8 @@ class QueryAnalysisNode(Node):
1009
1005
  }
1010
1006
 
1011
1007
  def _suggest_team_composition(
1012
- self, capabilities: List[str], complexity: float, agents: List
1013
- ) -> Dict:
1008
+ self, capabilities: list[str], complexity: float, agents: list
1009
+ ) -> dict:
1014
1010
  """Suggest optimal team composition."""
1015
1011
  # Basic team size estimation
1016
1012
  base_size = max(2, len(capabilities) // 2)
@@ -1027,8 +1023,8 @@ class QueryAnalysisNode(Node):
1027
1023
  }
1028
1024
 
1029
1025
  def _determine_strategy(
1030
- self, patterns: Dict, complexity: float, context: Dict
1031
- ) -> Dict:
1026
+ self, patterns: dict, complexity: float, context: dict
1027
+ ) -> dict:
1032
1028
  """Determine solution strategy."""
1033
1029
  if complexity < 0.4:
1034
1030
  approach = "single_agent"
@@ -1053,7 +1049,7 @@ class QueryAnalysisNode(Node):
1053
1049
  "iterative_refinement": complexity > 0.5,
1054
1050
  }
1055
1051
 
1056
- def _estimate_solution_requirements(self, complexity: float, context: Dict) -> Dict:
1052
+ def _estimate_solution_requirements(self, complexity: float, context: dict) -> dict:
1057
1053
  """Estimate solution requirements."""
1058
1054
  # Base estimates
1059
1055
  estimated_time = 30 + int(complexity * 120) # 30-150 minutes
@@ -1157,7 +1153,7 @@ class OrchestrationManagerNode(Node):
1157
1153
  self.session_id = str(uuid.uuid4())
1158
1154
  self.orchestration_history = deque(maxlen=50)
1159
1155
 
1160
- def get_parameters(self) -> Dict[str, NodeParameter]:
1156
+ def get_parameters(self) -> dict[str, NodeParameter]:
1161
1157
  return {
1162
1158
  "query": NodeParameter(
1163
1159
  name="query",
@@ -1216,7 +1212,7 @@ class OrchestrationManagerNode(Node):
1216
1212
  ),
1217
1213
  }
1218
1214
 
1219
- def run(self, **kwargs) -> Dict[str, Any]:
1215
+ def run(self, **kwargs) -> dict[str, Any]:
1220
1216
  """Execute complete orchestrated solution workflow."""
1221
1217
  start_time = time.time()
1222
1218
 
@@ -1330,14 +1326,14 @@ class OrchestrationManagerNode(Node):
1330
1326
 
1331
1327
  return final_result
1332
1328
 
1333
- def _analyze_query(self, query: str, context: Dict, mcp_servers: List) -> Dict:
1329
+ def _analyze_query(self, query: str, context: dict, mcp_servers: list) -> dict:
1334
1330
  """Analyze the incoming query."""
1335
1331
  analyzer = QueryAnalysisNode()
1336
1332
  return analyzer.run(query=query, context=context, mcp_servers=mcp_servers)
1337
1333
 
1338
1334
  def _setup_infrastructure(
1339
- self, pool_size: int, mcp_servers: List, enable_caching: bool
1340
- ) -> Dict:
1335
+ self, pool_size: int, mcp_servers: list, enable_caching: bool
1336
+ ) -> dict:
1341
1337
  """Set up core infrastructure components."""
1342
1338
  infrastructure = {}
1343
1339
 
@@ -1362,8 +1358,8 @@ class OrchestrationManagerNode(Node):
1362
1358
  return infrastructure
1363
1359
 
1364
1360
  def _create_agent_pool(
1365
- self, query_analysis: Dict, infrastructure: Dict, mcp_servers: List
1366
- ) -> List[Dict]:
1361
+ self, query_analysis: dict, infrastructure: dict, mcp_servers: list
1362
+ ) -> list[dict]:
1367
1363
  """Create specialized agent pool based on query analysis."""
1368
1364
  analysis = query_analysis["analysis"]
1369
1365
  analysis["required_capabilities"]
@@ -1458,8 +1454,8 @@ class OrchestrationManagerNode(Node):
1458
1454
  return agent_pool
1459
1455
 
1460
1456
  def _form_team(
1461
- self, query_analysis: Dict, agent_pool: List, iteration: int
1462
- ) -> Dict:
1457
+ self, query_analysis: dict, agent_pool: list, iteration: int
1458
+ ) -> dict:
1463
1459
  """Form optimal team for current iteration."""
1464
1460
  analysis = query_analysis["analysis"]
1465
1461
 
@@ -1484,11 +1480,11 @@ class OrchestrationManagerNode(Node):
1484
1480
  def _collaborative_solve(
1485
1481
  self,
1486
1482
  query: str,
1487
- context: Dict,
1488
- team_result: Dict,
1489
- infrastructure: Dict,
1483
+ context: dict,
1484
+ team_result: dict,
1485
+ infrastructure: dict,
1490
1486
  iteration: int,
1491
- ) -> Dict:
1487
+ ) -> dict:
1492
1488
  """Execute collaborative problem solving."""
1493
1489
  team = team_result["team"]
1494
1490
  solution_memory = infrastructure["solution_memory"]
@@ -1579,11 +1575,11 @@ class OrchestrationManagerNode(Node):
1579
1575
 
1580
1576
  def _simulate_agent_work(
1581
1577
  self,
1582
- agent: Dict,
1578
+ agent: dict,
1583
1579
  task: str,
1584
- cache: Optional[IntelligentCacheNode],
1585
- context_info: List = None,
1586
- ) -> Dict:
1580
+ cache: IntelligentCacheNode | None,
1581
+ context_info: list = None,
1582
+ ) -> dict:
1587
1583
  """Simulate agent performing work (with caching)."""
1588
1584
  agent_id = agent["id"]
1589
1585
  capabilities = agent["capabilities"]
@@ -1640,7 +1636,7 @@ class OrchestrationManagerNode(Node):
1640
1636
  return result
1641
1637
 
1642
1638
  def _calculate_solution_confidence(
1643
- self, info_results: List, analysis_results: List, synthesis_results: List
1639
+ self, info_results: list, analysis_results: list, synthesis_results: list
1644
1640
  ) -> float:
1645
1641
  """Calculate overall solution confidence."""
1646
1642
  all_results = info_results + analysis_results + synthesis_results
@@ -1652,11 +1648,11 @@ class OrchestrationManagerNode(Node):
1652
1648
 
1653
1649
  def _evaluate_solution(
1654
1650
  self,
1655
- solution: Dict,
1656
- query_analysis: Dict,
1651
+ solution: dict,
1652
+ query_analysis: dict,
1657
1653
  quality_threshold: float,
1658
1654
  iteration: int,
1659
- ) -> Dict:
1655
+ ) -> dict:
1660
1656
  """Evaluate solution quality."""
1661
1657
  evaluator = SolutionEvaluatorNode()
1662
1658
 
@@ -1674,11 +1670,11 @@ class OrchestrationManagerNode(Node):
1674
1670
  def _finalize_results(
1675
1671
  self,
1676
1672
  query: str,
1677
- final_solution: Dict,
1678
- history: List,
1673
+ final_solution: dict,
1674
+ history: list,
1679
1675
  total_time: float,
1680
- infrastructure: Dict,
1681
- ) -> Dict:
1676
+ infrastructure: dict,
1677
+ ) -> dict:
1682
1678
  """Finalize and format results."""
1683
1679
  # Get cache statistics
1684
1680
  cache_stats = {}
@@ -1833,7 +1829,7 @@ class ConvergenceDetectorNode(Node):
1833
1829
  super().__init__()
1834
1830
  self.convergence_history = deque(maxlen=100)
1835
1831
 
1836
- def get_parameters(self) -> Dict[str, NodeParameter]:
1832
+ def get_parameters(self) -> dict[str, NodeParameter]:
1837
1833
  return {
1838
1834
  "solution_history": NodeParameter(
1839
1835
  name="solution_history",
@@ -1886,7 +1882,7 @@ class ConvergenceDetectorNode(Node):
1886
1882
  ),
1887
1883
  }
1888
1884
 
1889
- def run(self, **kwargs) -> Dict[str, Any]:
1885
+ def run(self, **kwargs) -> dict[str, Any]:
1890
1886
  """Determine if solution has converged and iteration should stop."""
1891
1887
  solution_history = kwargs.get("solution_history", [])
1892
1888
  quality_threshold = kwargs.get("quality_threshold", 0.8)
@@ -2029,7 +2025,7 @@ class ConvergenceDetectorNode(Node):
2029
2025
  ),
2030
2026
  }
2031
2027
 
2032
- def _calculate_improvement_trend(self, history: List[Dict]) -> Dict:
2028
+ def _calculate_improvement_trend(self, history: list[dict]) -> dict:
2033
2029
  """Calculate the trend in solution improvement."""
2034
2030
  if len(history) < 2:
2035
2031
  return {"trend": "insufficient_data", "rate": 0.0}
@@ -2075,7 +2071,7 @@ class ConvergenceDetectorNode(Node):
2075
2071
  "consistency": 1.0 - (max(scores) - min(scores)) / max(max(scores), 0.1),
2076
2072
  }
2077
2073
 
2078
- def _generate_recommendations(self, signals: Dict, iteration: int) -> List[str]:
2074
+ def _generate_recommendations(self, signals: dict, iteration: int) -> list[str]:
2079
2075
  """Generate recommendations based on convergence signals."""
2080
2076
  recommendations = []
2081
2077
 
@@ -3,7 +3,7 @@
3
3
  import time
4
4
  from dataclasses import dataclass, field
5
5
  from datetime import datetime
6
- from typing import Any, Dict, List, Optional
6
+ from typing import Any
7
7
 
8
8
  from kailash.nodes.ai.llm_agent import LLMAgentNode
9
9
  from kailash.nodes.base import NodeParameter, register_node
@@ -16,16 +16,16 @@ class IterationState:
16
16
  iteration: int
17
17
  phase: str # discovery, planning, execution, reflection, convergence, synthesis
18
18
  start_time: float
19
- end_time: Optional[float] = None
20
- discoveries: Dict[str, Any] = field(default_factory=dict)
21
- plan: Dict[str, Any] = field(default_factory=dict)
22
- execution_results: Dict[str, Any] = field(default_factory=dict)
23
- reflection: Dict[str, Any] = field(default_factory=dict)
24
- convergence_decision: Dict[str, Any] = field(default_factory=dict)
19
+ end_time: float | None = None
20
+ discoveries: dict[str, Any] = field(default_factory=dict)
21
+ plan: dict[str, Any] = field(default_factory=dict)
22
+ execution_results: dict[str, Any] = field(default_factory=dict)
23
+ reflection: dict[str, Any] = field(default_factory=dict)
24
+ convergence_decision: dict[str, Any] = field(default_factory=dict)
25
25
  success: bool = False
26
- error: Optional[str] = None
26
+ error: str | None = None
27
27
 
28
- def to_dict(self) -> Dict[str, Any]:
28
+ def to_dict(self) -> dict[str, Any]:
29
29
  """Convert to dictionary for serialization."""
30
30
  return {
31
31
  "iteration": self.iteration,
@@ -50,15 +50,15 @@ class MCPToolCapability:
50
50
  name: str
51
51
  description: str
52
52
  primary_function: str
53
- input_requirements: List[str]
53
+ input_requirements: list[str]
54
54
  output_format: str
55
55
  domain: str
56
56
  complexity: str # simple, medium, complex
57
- dependencies: List[str]
57
+ dependencies: list[str]
58
58
  confidence: float
59
59
  server_source: str
60
60
 
61
- def to_dict(self) -> Dict[str, Any]:
61
+ def to_dict(self) -> dict[str, Any]:
62
62
  return {
63
63
  "name": self.name,
64
64
  "description": self.description,
@@ -111,7 +111,7 @@ class IterativeLLMAgentNode(LLMAgentNode):
111
111
  ... )
112
112
  """
113
113
 
114
- def get_parameters(self) -> Dict[str, NodeParameter]:
114
+ def get_parameters(self) -> dict[str, NodeParameter]:
115
115
  """Get parameters for iterative LLM agent configuration."""
116
116
  base_params = super().get_parameters()
117
117
 
@@ -189,7 +189,7 @@ class IterativeLLMAgentNode(LLMAgentNode):
189
189
  base_params.update(iterative_params)
190
190
  return base_params
191
191
 
192
- def run(self, **kwargs) -> Dict[str, Any]:
192
+ def run(self, **kwargs) -> dict[str, Any]:
193
193
  """
194
194
  Execute iterative LLM agent with 6-phase process.
195
195
 
@@ -220,7 +220,7 @@ class IterativeLLMAgentNode(LLMAgentNode):
220
220
 
221
221
  # Initialize iterative execution state
222
222
  start_time = time.time()
223
- iterations: List[IterationState] = []
223
+ iterations: list[IterationState] = []
224
224
  global_discoveries = {
225
225
  "servers": {},
226
226
  "tools": {},
@@ -354,11 +354,11 @@ class IterativeLLMAgentNode(LLMAgentNode):
354
354
 
355
355
  def _phase_discovery(
356
356
  self,
357
- kwargs: Dict[str, Any],
358
- global_discoveries: Dict[str, Any],
357
+ kwargs: dict[str, Any],
358
+ global_discoveries: dict[str, Any],
359
359
  discovery_mode: str,
360
- discovery_budget: Dict[str, Any],
361
- ) -> Dict[str, Any]:
360
+ discovery_budget: dict[str, Any],
361
+ ) -> dict[str, Any]:
362
362
  """
363
363
  Phase 1: Discover MCP servers, tools, and resources.
364
364
 
@@ -450,8 +450,8 @@ class IterativeLLMAgentNode(LLMAgentNode):
450
450
  return discoveries
451
451
 
452
452
  def _discover_server_tools(
453
- self, server_config: Any, budget: Dict[str, Any]
454
- ) -> List[Dict[str, Any]]:
453
+ self, server_config: Any, budget: dict[str, Any]
454
+ ) -> list[dict[str, Any]]:
455
455
  """Discover tools from a specific MCP server."""
456
456
  # Use existing MCP tool discovery from parent class
457
457
  try:
@@ -497,8 +497,8 @@ class IterativeLLMAgentNode(LLMAgentNode):
497
497
  return []
498
498
 
499
499
  def _discover_server_resources(
500
- self, server_config: Any, budget: Dict[str, Any]
501
- ) -> List[Dict[str, Any]]:
500
+ self, server_config: Any, budget: dict[str, Any]
501
+ ) -> list[dict[str, Any]]:
502
502
  """Discover resources from a specific MCP server."""
503
503
  # Mock implementation - in real version would use MCP resource discovery
504
504
  try:
@@ -532,7 +532,7 @@ class IterativeLLMAgentNode(LLMAgentNode):
532
532
  return []
533
533
 
534
534
  def _analyze_tool_capability(
535
- self, tool: Dict[str, Any], server_id: str
535
+ self, tool: dict[str, Any], server_id: str
536
536
  ) -> MCPToolCapability:
537
537
  """Analyze tool description to understand semantic capabilities."""
538
538
  # Extract tool information
@@ -593,11 +593,11 @@ class IterativeLLMAgentNode(LLMAgentNode):
593
593
 
594
594
  def _phase_planning(
595
595
  self,
596
- kwargs: Dict[str, Any],
597
- discoveries: Dict[str, Any],
598
- global_discoveries: Dict[str, Any],
599
- previous_iterations: List[IterationState],
600
- ) -> Dict[str, Any]:
596
+ kwargs: dict[str, Any],
597
+ discoveries: dict[str, Any],
598
+ global_discoveries: dict[str, Any],
599
+ previous_iterations: list[IterationState],
600
+ ) -> dict[str, Any]:
601
601
  """Phase 2: Create execution plan based on discoveries."""
602
602
  messages = kwargs.get("messages", [])
603
603
  user_query = ""
@@ -659,8 +659,8 @@ class IterativeLLMAgentNode(LLMAgentNode):
659
659
  return plan
660
660
 
661
661
  def _phase_execution(
662
- self, kwargs: Dict[str, Any], plan: Dict[str, Any], discoveries: Dict[str, Any]
663
- ) -> Dict[str, Any]:
662
+ self, kwargs: dict[str, Any], plan: dict[str, Any], discoveries: dict[str, Any]
663
+ ) -> dict[str, Any]:
664
664
  """Phase 3: Execute the planned actions."""
665
665
  execution_results = {
666
666
  "steps_completed": [],
@@ -710,10 +710,10 @@ class IterativeLLMAgentNode(LLMAgentNode):
710
710
 
711
711
  def _phase_reflection(
712
712
  self,
713
- kwargs: Dict[str, Any],
714
- execution_results: Dict[str, Any],
715
- previous_iterations: List[IterationState],
716
- ) -> Dict[str, Any]:
713
+ kwargs: dict[str, Any],
714
+ execution_results: dict[str, Any],
715
+ previous_iterations: list[IterationState],
716
+ ) -> dict[str, Any]:
717
717
  """Phase 4: Reflect on execution results and assess progress."""
718
718
  reflection = {
719
719
  "quality_assessment": {},
@@ -794,12 +794,12 @@ class IterativeLLMAgentNode(LLMAgentNode):
794
794
 
795
795
  def _phase_convergence(
796
796
  self,
797
- kwargs: Dict[str, Any],
797
+ kwargs: dict[str, Any],
798
798
  iteration_state: IterationState,
799
- previous_iterations: List[IterationState],
800
- convergence_criteria: Dict[str, Any],
801
- global_discoveries: Dict[str, Any],
802
- ) -> Dict[str, Any]:
799
+ previous_iterations: list[IterationState],
800
+ convergence_criteria: dict[str, Any],
801
+ global_discoveries: dict[str, Any],
802
+ ) -> dict[str, Any]:
803
803
  """Phase 5: Decide whether to continue iterating or stop."""
804
804
  convergence_result = {
805
805
  "should_stop": False,
@@ -1140,9 +1140,9 @@ class IterativeLLMAgentNode(LLMAgentNode):
1140
1140
 
1141
1141
  def _phase_synthesis(
1142
1142
  self,
1143
- kwargs: Dict[str, Any],
1144
- iterations: List[IterationState],
1145
- global_discoveries: Dict[str, Any],
1143
+ kwargs: dict[str, Any],
1144
+ iterations: list[IterationState],
1145
+ global_discoveries: dict[str, Any],
1146
1146
  ) -> str:
1147
1147
  """Phase 6: Synthesize results from all iterations into final response."""
1148
1148
  messages = kwargs.get("messages", [])
@@ -1203,7 +1203,7 @@ class IterativeLLMAgentNode(LLMAgentNode):
1203
1203
  return synthesis
1204
1204
 
1205
1205
  def _update_global_discoveries(
1206
- self, global_discoveries: Dict[str, Any], new_discoveries: Dict[str, Any]
1206
+ self, global_discoveries: dict[str, Any], new_discoveries: dict[str, Any]
1207
1207
  ) -> None:
1208
1208
  """Update global discoveries with new findings."""
1209
1209
  # Update servers
@@ -1229,9 +1229,9 @@ class IterativeLLMAgentNode(LLMAgentNode):
1229
1229
 
1230
1230
  def _adapt_strategy(
1231
1231
  self,
1232
- kwargs: Dict[str, Any],
1232
+ kwargs: dict[str, Any],
1233
1233
  iteration_state: IterationState,
1234
- previous_iterations: List[IterationState],
1234
+ previous_iterations: list[IterationState],
1235
1235
  ) -> None:
1236
1236
  """Adapt strategy for next iteration based on results."""
1237
1237
  # Simple adaptation logic (in real implementation, use more sophisticated ML)
@@ -1249,8 +1249,8 @@ class IterativeLLMAgentNode(LLMAgentNode):
1249
1249
  )
1250
1250
 
1251
1251
  def _calculate_resource_usage(
1252
- self, iterations: List[IterationState]
1253
- ) -> Dict[str, Any]:
1252
+ self, iterations: list[IterationState]
1253
+ ) -> dict[str, Any]:
1254
1254
  """Calculate resource usage across all iterations."""
1255
1255
  total_duration = sum(
1256
1256
  (iter_state.end_time - iter_state.start_time)