kailash 0.1.5__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/access_control.py +740 -0
  3. kailash/api/__main__.py +6 -0
  4. kailash/api/auth.py +668 -0
  5. kailash/api/custom_nodes.py +285 -0
  6. kailash/api/custom_nodes_secure.py +377 -0
  7. kailash/api/database.py +620 -0
  8. kailash/api/studio.py +915 -0
  9. kailash/api/studio_secure.py +893 -0
  10. kailash/mcp/__init__.py +53 -0
  11. kailash/mcp/__main__.py +13 -0
  12. kailash/mcp/ai_registry_server.py +712 -0
  13. kailash/mcp/client.py +447 -0
  14. kailash/mcp/client_new.py +334 -0
  15. kailash/mcp/server.py +293 -0
  16. kailash/mcp/server_new.py +336 -0
  17. kailash/mcp/servers/__init__.py +12 -0
  18. kailash/mcp/servers/ai_registry.py +289 -0
  19. kailash/nodes/__init__.py +4 -2
  20. kailash/nodes/ai/__init__.py +2 -0
  21. kailash/nodes/ai/a2a.py +714 -67
  22. kailash/nodes/ai/intelligent_agent_orchestrator.py +31 -37
  23. kailash/nodes/ai/iterative_llm_agent.py +1280 -0
  24. kailash/nodes/ai/llm_agent.py +324 -1
  25. kailash/nodes/ai/self_organizing.py +5 -6
  26. kailash/nodes/base.py +15 -2
  27. kailash/nodes/base_async.py +45 -0
  28. kailash/nodes/base_cycle_aware.py +374 -0
  29. kailash/nodes/base_with_acl.py +338 -0
  30. kailash/nodes/code/python.py +135 -27
  31. kailash/nodes/data/readers.py +16 -6
  32. kailash/nodes/data/writers.py +16 -6
  33. kailash/nodes/logic/__init__.py +8 -0
  34. kailash/nodes/logic/convergence.py +642 -0
  35. kailash/nodes/logic/loop.py +153 -0
  36. kailash/nodes/logic/operations.py +187 -27
  37. kailash/nodes/mixins/__init__.py +11 -0
  38. kailash/nodes/mixins/mcp.py +228 -0
  39. kailash/nodes/mixins.py +387 -0
  40. kailash/runtime/__init__.py +2 -1
  41. kailash/runtime/access_controlled.py +458 -0
  42. kailash/runtime/local.py +106 -33
  43. kailash/runtime/parallel_cyclic.py +529 -0
  44. kailash/sdk_exceptions.py +90 -5
  45. kailash/security.py +845 -0
  46. kailash/tracking/manager.py +38 -15
  47. kailash/tracking/models.py +1 -1
  48. kailash/tracking/storage/filesystem.py +30 -2
  49. kailash/utils/__init__.py +8 -0
  50. kailash/workflow/__init__.py +18 -0
  51. kailash/workflow/convergence.py +270 -0
  52. kailash/workflow/cycle_analyzer.py +768 -0
  53. kailash/workflow/cycle_builder.py +573 -0
  54. kailash/workflow/cycle_config.py +709 -0
  55. kailash/workflow/cycle_debugger.py +760 -0
  56. kailash/workflow/cycle_exceptions.py +601 -0
  57. kailash/workflow/cycle_profiler.py +671 -0
  58. kailash/workflow/cycle_state.py +338 -0
  59. kailash/workflow/cyclic_runner.py +985 -0
  60. kailash/workflow/graph.py +500 -39
  61. kailash/workflow/migration.py +768 -0
  62. kailash/workflow/safety.py +365 -0
  63. kailash/workflow/templates.py +744 -0
  64. kailash/workflow/validation.py +693 -0
  65. {kailash-0.1.5.dist-info → kailash-0.2.0.dist-info}/METADATA +256 -12
  66. kailash-0.2.0.dist-info/RECORD +125 -0
  67. kailash/nodes/mcp/__init__.py +0 -11
  68. kailash/nodes/mcp/client.py +0 -554
  69. kailash/nodes/mcp/resource.py +0 -682
  70. kailash/nodes/mcp/server.py +0 -577
  71. kailash-0.1.5.dist-info/RECORD +0 -88
  72. {kailash-0.1.5.dist-info → kailash-0.2.0.dist-info}/WHEEL +0 -0
  73. {kailash-0.1.5.dist-info → kailash-0.2.0.dist-info}/entry_points.txt +0 -0
  74. {kailash-0.1.5.dist-info → kailash-0.2.0.dist-info}/licenses/LICENSE +0 -0
  75. {kailash-0.1.5.dist-info → kailash-0.2.0.dist-info}/top_level.txt +0 -0
@@ -182,6 +182,13 @@ class LLMAgentNode(Node):
182
182
  default=[],
183
183
  description="MCP resource URIs to include as context",
184
184
  ),
185
+ "auto_discover_tools": NodeParameter(
186
+ name="auto_discover_tools",
187
+ type=bool,
188
+ required=False,
189
+ default=False,
190
+ description="Automatically discover and use MCP tools",
191
+ ),
185
192
  "rag_config": NodeParameter(
186
193
  name="rag_config",
187
194
  type=dict,
@@ -451,6 +458,7 @@ class LLMAgentNode(Node):
451
458
  memory_config = kwargs.get("memory_config", {})
452
459
  mcp_servers = kwargs.get("mcp_servers", [])
453
460
  mcp_context = kwargs.get("mcp_context", [])
461
+ auto_discover_tools = kwargs.get("auto_discover_tools", False)
454
462
  rag_config = kwargs.get("rag_config", {})
455
463
  generation_config = kwargs.get("generation_config", {})
456
464
  streaming = kwargs.get("streaming", False)
@@ -469,6 +477,12 @@ class LLMAgentNode(Node):
469
477
  # Retrieve MCP context if configured
470
478
  mcp_context_data = self._retrieve_mcp_context(mcp_servers, mcp_context)
471
479
 
480
+ # Discover MCP tools if enabled
481
+ if auto_discover_tools and mcp_servers:
482
+ mcp_tools = self._discover_mcp_tools(mcp_servers)
483
+ # Merge MCP tools with existing tools
484
+ tools = self._merge_tools(tools, mcp_tools)
485
+
472
486
  # Perform RAG retrieval if configured
473
487
  rag_context = self._perform_rag_retrieval(
474
488
  messages, rag_config, mcp_context_data
@@ -727,7 +741,125 @@ class LLMAgentNode(Node):
727
741
 
728
742
  context_data = []
729
743
 
730
- # Mock MCP context retrieval
744
+ # Check if we should use real MCP implementation
745
+ use_real_mcp = hasattr(self, "_mcp_client") or self._should_use_real_mcp()
746
+
747
+ if use_real_mcp:
748
+ # Use internal MCP client for real implementation
749
+ try:
750
+ import asyncio
751
+ from datetime import datetime
752
+
753
+ from kailash.mcp import MCPClient
754
+
755
+ # Initialize MCP client if not already done
756
+ if not hasattr(self, "_mcp_client"):
757
+ self._mcp_client = MCPClient()
758
+
759
+ # Process each server
760
+ for server_config in mcp_servers:
761
+ try:
762
+ # List resources from server
763
+ resources = asyncio.run(
764
+ self._mcp_client.list_resources(server_config)
765
+ )
766
+
767
+ # Read specific resources if requested
768
+ for uri in mcp_context:
769
+ try:
770
+ resource_data = asyncio.run(
771
+ self._mcp_client.read_resource(server_config, uri)
772
+ )
773
+
774
+ if resource_data:
775
+ # Extract content from resource data
776
+ content = resource_data
777
+ if isinstance(resource_data, dict):
778
+ content = resource_data.get(
779
+ "content", resource_data
780
+ )
781
+
782
+ # Handle different content formats
783
+ if isinstance(content, list):
784
+ # MCP returns content as array of items
785
+ text_content = ""
786
+ for item in content:
787
+ if (
788
+ isinstance(item, dict)
789
+ and item.get("type") == "text"
790
+ ):
791
+ text_content += item.get("text", "")
792
+ elif isinstance(item, str):
793
+ text_content += item
794
+ content = text_content
795
+
796
+ context_data.append(
797
+ {
798
+ "uri": uri,
799
+ "content": str(content),
800
+ "source": server_config.get(
801
+ "name", "mcp_server"
802
+ ),
803
+ "retrieved_at": datetime.now().isoformat(),
804
+ "relevance_score": 0.95, # High score for explicitly requested
805
+ "metadata": (
806
+ resource_data
807
+ if isinstance(resource_data, dict)
808
+ else {}
809
+ ),
810
+ }
811
+ )
812
+ except Exception as e:
813
+ self.logger.debug(f"Failed to read resource {uri}: {e}")
814
+
815
+ # Auto-discover and include relevant resources
816
+ if resources and isinstance(resources, list):
817
+ for resource in resources[
818
+ :3
819
+ ]: # Limit auto-discovered resources
820
+ resource_dict = (
821
+ resource
822
+ if isinstance(resource, dict)
823
+ else {"uri": str(resource)}
824
+ )
825
+ context_data.append(
826
+ {
827
+ "uri": resource_dict.get("uri", ""),
828
+ "content": f"Auto-discovered: {resource_dict.get('name', '')} - {resource_dict.get('description', '')}",
829
+ "source": server_config.get(
830
+ "name", "mcp_server"
831
+ ),
832
+ "retrieved_at": datetime.now().isoformat(),
833
+ "relevance_score": 0.75,
834
+ "metadata": resource_dict,
835
+ }
836
+ )
837
+
838
+ except Exception as e:
839
+ self.logger.debug(f"MCP server connection failed: {e}")
840
+ # Fall back to mock for this server
841
+ context_data.append(
842
+ {
843
+ "uri": f"mcp://{server_config.get('name', 'unknown')}/fallback",
844
+ "content": "Connection failed, using fallback content",
845
+ "source": server_config.get("name", "unknown"),
846
+ "retrieved_at": datetime.now().isoformat(),
847
+ "relevance_score": 0.5,
848
+ "metadata": {"error": str(e)},
849
+ }
850
+ )
851
+
852
+ # If we got real data, return it
853
+ if context_data:
854
+ return context_data
855
+
856
+ except ImportError:
857
+ # MCPClient not available, fall back to mock
858
+ pass
859
+ except Exception as e:
860
+ self.logger.debug(f"MCP retrieval error: {e}")
861
+
862
+ # Fallback to mock implementation
731
863
  for uri in mcp_context:
732
864
  context_data.append(
733
865
  {
@@ -754,6 +886,146 @@ class LLMAgentNode(Node):
754
886
 
755
887
  return context_data
756
888
 
889
+ def _should_use_real_mcp(self) -> bool:
890
+ """Check if real MCP implementation should be used."""
891
+ # Check environment variable or configuration
892
+ import os
893
+
894
+ return os.environ.get("KAILASH_USE_REAL_MCP", "false").lower() == "true"
895
+
896
+ def _discover_mcp_tools(self, mcp_servers: List[dict]) -> List[Dict[str, Any]]:
897
+ """
898
+ Discover available tools from MCP servers.
899
+
900
+ Args:
901
+ mcp_servers: List of MCP server configurations
902
+
903
+ Returns:
904
+ List of tool definitions in OpenAI function calling format
905
+ """
906
+ discovered_tools = []
907
+
908
+ # Check if we should use real MCP implementation
909
+ use_real_mcp = hasattr(self, "_mcp_client") or self._should_use_real_mcp()
910
+
911
+ if use_real_mcp:
912
+ try:
913
+ import asyncio
914
+
915
+ from kailash.mcp import MCPClient
916
+
917
+ # Initialize MCP client if not already done
918
+ if not hasattr(self, "_mcp_client"):
919
+ self._mcp_client = MCPClient()
920
+
921
+ # Discover tools from each server
922
+ for server_config in mcp_servers:
923
+ try:
924
+ # Discover tools asynchronously
925
+ tools = asyncio.run(
926
+ self._mcp_client.discover_tools(server_config)
927
+ )
928
+
929
+ # Convert MCP tools to OpenAI function calling format
930
+ if isinstance(tools, list):
931
+ for tool in tools:
932
+ tool_dict = (
933
+ tool
934
+ if isinstance(tool, dict)
935
+ else {"name": str(tool)}
936
+ )
937
+ # Extract tool info
938
+ function_def = {
939
+ "name": tool_dict.get("name", "unknown"),
940
+ "description": tool_dict.get("description", ""),
941
+ "parameters": tool_dict.get(
942
+ "inputSchema", tool_dict.get("parameters", {})
943
+ ),
944
+ }
945
+ # Add MCP metadata
946
+ function_def["mcp_server"] = server_config.get(
947
+ "name", "mcp_server"
948
+ )
949
+ function_def["mcp_server_config"] = server_config
950
+
951
+ discovered_tools.append(
952
+ {"type": "function", "function": function_def}
953
+ )
954
+
955
+ except Exception as e:
956
+ self.logger.debug(
957
+ f"Failed to discover tools from {server_config.get('name', 'unknown')}: {e}"
958
+ )
959
+
960
+ except ImportError:
961
+ # MCPClient not available, use mock tools
962
+ pass
963
+ except Exception as e:
964
+ self.logger.debug(f"MCP tool discovery error: {e}")
965
+
966
+ # If no real tools discovered, provide minimal generic tools
967
+ if not discovered_tools:
968
+ # Provide minimal generic tools for each server
969
+ for server_config in mcp_servers:
970
+ server_name = server_config.get("name", "mcp_server")
971
+ discovered_tools.extend(
972
+ [
973
+ {
974
+ "type": "function",
975
+ "function": {
976
+ "name": f"mcp_{server_name}_search",
977
+ "description": f"Search for information in {server_name}",
978
+ "parameters": {
979
+ "type": "object",
980
+ "properties": {
981
+ "query": {
982
+ "type": "string",
983
+ "description": "Search query",
984
+ }
985
+ },
986
+ "required": ["query"],
987
+ },
988
+ "mcp_server": server_name,
989
+ "mcp_server_config": server_config,
990
+ },
991
+ }
992
+ ]
993
+ )
994
+
995
+ return discovered_tools
996
+
997
+ def _merge_tools(
998
+ self, existing_tools: List[dict], mcp_tools: List[dict]
999
+ ) -> List[dict]:
1000
+ """
1001
+ Merge MCP discovered tools with existing tools, avoiding duplicates.
1002
+
1003
+ Args:
1004
+ existing_tools: Tools already defined
1005
+ mcp_tools: Tools discovered from MCP servers
1006
+
1007
+ Returns:
1008
+ Merged list of tools
1009
+ """
1010
+ # Create a set of existing tool names for deduplication
1011
+ existing_names = set()
1012
+ for tool in existing_tools:
1013
+ if isinstance(tool, dict) and "function" in tool:
1014
+ existing_names.add(tool["function"].get("name", ""))
1015
+ elif isinstance(tool, dict) and "name" in tool:
1016
+ existing_names.add(tool["name"])
1017
+
1018
+ # Add MCP tools that don't conflict
1019
+ merged_tools = existing_tools.copy()
1020
+ for mcp_tool in mcp_tools:
1021
+ if isinstance(mcp_tool, dict) and "function" in mcp_tool:
1022
+ tool_name = mcp_tool["function"].get("name", "")
1023
+ if tool_name and tool_name not in existing_names:
1024
+ merged_tools.append(mcp_tool)
1025
+ existing_names.add(tool_name)
1026
+
1027
+ return merged_tools
1028
+
757
1029
  def _perform_rag_retrieval(
758
1030
  self, messages: List[dict], rag_config: dict, mcp_context: List[dict]
759
1031
  ) -> Dict[str, Any]:
@@ -1159,3 +1431,54 @@ class LLMAgentNode(Node):
1159
1431
  "provider": provider,
1160
1432
  "efficiency_score": completion_tokens / max(total_tokens, 1),
1161
1433
  }
1434
+
1435
+ async def _execute_mcp_tool_call(
1436
+ self, tool_call: dict, mcp_tools: List[dict]
1437
+ ) -> Dict[str, Any]:
1438
+ """Execute an MCP tool call.
1439
+
1440
+ Args:
1441
+ tool_call: Tool call from LLM response
1442
+ mcp_tools: List of discovered MCP tools
1443
+
1444
+ Returns:
1445
+ Tool execution result
1446
+ """
1447
+ tool_name = tool_call.get("function", {}).get("name", "")
1448
+ tool_args = json.loads(tool_call.get("function", {}).get("arguments", "{}"))
1449
+
1450
+ # Find the MCP tool definition
1451
+ mcp_tool = None
1452
+ for tool in mcp_tools:
1453
+ if tool.get("function", {}).get("name") == tool_name:
1454
+ mcp_tool = tool
1455
+ break
1456
+
1457
+ if not mcp_tool:
1458
+ return {"error": f"MCP tool '{tool_name}' not found", "success": False}
1459
+
1460
+ # Get server config from tool
1461
+ server_config = mcp_tool.get("function", {}).get("mcp_server_config", {})
1462
+
1463
+ try:
1464
+ from kailash.mcp import MCPClient
1465
+
1466
+ # Initialize MCP client if not already done
1467
+ if not hasattr(self, "_mcp_client"):
1468
+ self._mcp_client = MCPClient()
1469
+
1470
+ # Call the tool
1471
+ result = await self._mcp_client.call_tool(
1472
+ server_config, tool_name, tool_args
1473
+ )
1474
+
1475
+ return {
1476
+ "result": result,
1477
+ "success": True,
1478
+ "tool_name": tool_name,
1479
+ "server": server_config.get("name", "unknown"),
1480
+ }
1481
+
1482
+ except Exception as e:
1483
+ self.logger.error(f"MCP tool execution failed: {e}")
1484
+ return {"error": str(e), "success": False, "tool_name": tool_name}
@@ -9,11 +9,10 @@ import random
9
9
  import time
10
10
  import uuid
11
11
  from collections import defaultdict, deque
12
- from datetime import datetime
13
12
  from enum import Enum
14
- from typing import Any, Callable, Dict, List, Optional, Set, Tuple
13
+ from typing import Any, Dict, List, Set
15
14
 
16
- from kailash.nodes.ai.a2a import A2AAgentNode, SharedMemoryPoolNode
15
+ from kailash.nodes.ai.a2a import A2AAgentNode
17
16
  from kailash.nodes.base import Node, NodeParameter, register_node
18
17
 
19
18
 
@@ -536,7 +535,7 @@ class ProblemAnalyzerNode(Node):
536
535
  problem_description = kwargs["problem_description"]
537
536
  context = kwargs.get("context", {})
538
537
  strategy = kwargs.get("decomposition_strategy", "hierarchical")
539
- depth = kwargs.get("analysis_depth", "standard")
538
+ kwargs.get("analysis_depth", "standard")
540
539
 
541
540
  # Extract key terms and requirements
542
541
  problem_lower = problem_description.lower()
@@ -952,7 +951,7 @@ class TeamFormationNode(Node):
952
951
  ) -> List[Dict]:
953
952
  """Form team using swarm intelligence principles."""
954
953
  required_capabilities = set(problem.get("required_capabilities", []))
955
- complexity = problem.get("complexity_score", 0.5)
954
+ problem.get("complexity_score", 0.5)
956
955
 
957
956
  # Calculate attraction scores between agents
958
957
  attraction_matrix = {}
@@ -1306,7 +1305,7 @@ class SelfOrganizingAgentNode(A2AAgentNode):
1306
1305
 
1307
1306
  # Add self-organization instructions to system prompt
1308
1307
  so_prompt = f"""You are a self-organizing agent with capabilities: {', '.join(capabilities)}.
1309
-
1308
+
1310
1309
  Current team context: {json.dumps(team_context, indent=2)}
1311
1310
  Collaboration mode: {collaboration_mode}
1312
1311
 
kailash/nodes/base.py CHANGED
@@ -60,7 +60,8 @@ class NodeMetadata(BaseModel):
60
60
  version: str = Field("1.0.0", description="Node version")
61
61
  author: str = Field("", description="Node author")
62
62
  created_at: datetime = Field(
63
- default_factory=datetime.utcnow, description="Node creation date"
63
+ default_factory=lambda: datetime.now(timezone.utc),
64
+ description="Node creation date",
64
65
  )
65
66
  tags: Set[str] = Field(default_factory=set, description="Node tags")
66
67
 
@@ -185,7 +186,19 @@ class Node(ABC):
185
186
  ),
186
187
  )
187
188
  self.logger = logging.getLogger(f"kailash.nodes.{self.id}")
188
- self.config = kwargs
189
+
190
+ # Filter out internal fields from config
191
+ internal_fields = {
192
+ "id",
193
+ "name",
194
+ "description",
195
+ "version",
196
+ "author",
197
+ "tags",
198
+ "metadata",
199
+ }
200
+ self.config = {k: v for k, v in kwargs.items() if k not in internal_fields}
201
+
189
202
  self._validate_config()
190
203
  except ValidationError as e:
191
204
  raise NodeConfigurationError(f"Invalid node metadata: {e}") from e
@@ -45,6 +45,51 @@ class AsyncNode(Node):
45
45
  - TaskManager: Tracks node execution status
46
46
  """
47
47
 
48
+ def execute(self, **runtime_inputs) -> Dict[str, Any]:
49
+ """Execute the node synchronously by running async code in a new event loop.
50
+
51
+ This override allows AsyncNode to work with synchronous runtimes like LocalRuntime.
52
+ It creates a new event loop to run the async code if needed.
53
+
54
+ Args:
55
+ **runtime_inputs: Runtime inputs for node execution
56
+
57
+ Returns:
58
+ Dictionary of validated outputs
59
+
60
+ Raises:
61
+ NodeValidationError: If inputs or outputs are invalid
62
+ NodeExecutionError: If execution fails
63
+ """
64
+ import asyncio
65
+ import sys
66
+
67
+ # Check if we're already in an event loop
68
+ try:
69
+ asyncio.get_running_loop()
70
+ # We're in an event loop - this is problematic for sync execution
71
+ # Try to use nest_asyncio if available
72
+ try:
73
+ import nest_asyncio
74
+
75
+ nest_asyncio.apply()
76
+ return asyncio.run(self.execute_async(**runtime_inputs))
77
+ except ImportError:
78
+ # Fall back to running in a thread pool
79
+ import concurrent.futures
80
+
81
+ with concurrent.futures.ThreadPoolExecutor() as executor:
82
+ future = executor.submit(
83
+ asyncio.run, self.execute_async(**runtime_inputs)
84
+ )
85
+ return future.result()
86
+ except RuntimeError:
87
+ # No event loop running, we can create one
88
+ if sys.platform == "win32":
89
+ # Windows requires special handling
90
+ asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
91
+ return asyncio.run(self.execute_async(**runtime_inputs))
92
+
48
93
  async def async_run(self, **kwargs) -> Dict[str, Any]:
49
94
  """Asynchronous execution method for the node.
50
95