kailash 0.9.5__py3-none-any.whl → 0.9.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kailash/__init__.py CHANGED
@@ -50,7 +50,7 @@ except ImportError:
50
50
  # For backward compatibility
51
51
  WorkflowGraph = Workflow
52
52
 
53
- __version__ = "0.9.5"
53
+ __version__ = "0.9.7"
54
54
 
55
55
  __all__ = [
56
56
  # Core workflow components
@@ -310,6 +310,7 @@ class IterativeLLMAgentNode(LLMAgentNode):
310
310
  convergence_reason = "max_iterations_reached"
311
311
 
312
312
  try:
313
+
313
314
  # Main iterative loop
314
315
  for iteration_num in range(1, max_iterations + 1):
315
316
  iteration_state = IterationState(
@@ -694,7 +695,21 @@ class IterativeLLMAgentNode(LLMAgentNode):
694
695
  global_discoveries.get("tools", {}).values()
695
696
  )
696
697
 
697
- # Simple planning logic (in real implementation, use LLM for planning)
698
+ # If we have no tools available, create a plan for direct LLM response
699
+ if not available_tools:
700
+ return {
701
+ "user_query": user_query,
702
+ "selected_tools": [],
703
+ "execution_steps": [
704
+ {"step": 1, "action": "direct_llm_response", "tools": []}
705
+ ],
706
+ "expected_outcomes": ["direct_response"],
707
+ "resource_requirements": {},
708
+ "success_criteria": {"response_generated": True},
709
+ "planning_mode": "direct_llm",
710
+ }
711
+
712
+ # Create plan with available tools
698
713
  plan = {
699
714
  "user_query": user_query,
700
715
  "selected_tools": [],
@@ -702,6 +717,7 @@ class IterativeLLMAgentNode(LLMAgentNode):
702
717
  "expected_outcomes": [],
703
718
  "resource_requirements": {},
704
719
  "success_criteria": {},
720
+ "planning_mode": "tool_based",
705
721
  }
706
722
 
707
723
  # Select relevant tools
@@ -711,26 +727,38 @@ class IterativeLLMAgentNode(LLMAgentNode):
711
727
  elif isinstance(tool, dict):
712
728
  plan["selected_tools"].append(tool.get("name", "unknown"))
713
729
 
714
- # Create execution steps
730
+ # Create execution steps based on query and available tools
715
731
  if "analyze" in user_query.lower():
732
+ # For analysis queries, create multi-step plan
716
733
  plan["execution_steps"] = [
717
734
  {
718
735
  "step": 1,
719
736
  "action": "gather_data",
720
- "tools": plan["selected_tools"][:1],
737
+ "tools": (
738
+ plan["selected_tools"][:1] if plan["selected_tools"] else []
739
+ ),
721
740
  },
722
741
  {
723
742
  "step": 2,
724
743
  "action": "perform_analysis",
725
- "tools": plan["selected_tools"][1:2],
744
+ "tools": (
745
+ plan["selected_tools"][1:2]
746
+ if len(plan["selected_tools"]) > 1
747
+ else []
748
+ ),
726
749
  },
727
750
  {
728
751
  "step": 3,
729
752
  "action": "generate_insights",
730
- "tools": plan["selected_tools"][2:3],
753
+ "tools": (
754
+ plan["selected_tools"][2:3]
755
+ if len(plan["selected_tools"]) > 2
756
+ else []
757
+ ),
731
758
  },
732
759
  ]
733
760
  else:
761
+ # For other queries, single step execution
734
762
  plan["execution_steps"] = [
735
763
  {"step": 1, "action": "execute_query", "tools": plan["selected_tools"]}
736
764
  ]
@@ -754,6 +782,45 @@ class IterativeLLMAgentNode(LLMAgentNode):
754
782
  # Check if we should use real MCP tool execution
755
783
  use_real_mcp = kwargs.get("use_real_mcp", True)
756
784
 
785
+ # Handle direct LLM response mode
786
+ if plan.get("planning_mode") == "direct_llm":
787
+ try:
788
+ llm_response = super().run(**kwargs)
789
+
790
+ if llm_response.get("success") and llm_response.get("response"):
791
+ content = llm_response["response"].get("content", "")
792
+ step_result = {
793
+ "step": 1,
794
+ "action": "direct_llm_response",
795
+ "tools_used": [],
796
+ "output": content,
797
+ "success": True,
798
+ "duration": 2.0, # Estimate for LLM call
799
+ "llm_response": llm_response,
800
+ }
801
+
802
+ execution_results["steps_completed"].append(step_result)
803
+ execution_results["intermediate_results"].append(content)
804
+ execution_results["tool_outputs"]["llm_response"] = content
805
+ else:
806
+ raise Exception(
807
+ f"LLM response failed: {llm_response.get('error', 'Unknown error')}"
808
+ )
809
+
810
+ except Exception as e:
811
+ error_result = {
812
+ "step": 1,
813
+ "action": "direct_llm_response",
814
+ "tools_used": [],
815
+ "error": str(e),
816
+ "success": False,
817
+ }
818
+ execution_results["steps_completed"].append(error_result)
819
+ execution_results["errors"].append(str(e))
820
+ execution_results["success"] = False
821
+
822
+ return execution_results
823
+
757
824
  # Execute each step in the plan
758
825
  for step in plan.get("execution_steps", []):
759
826
  step_num = step.get("step", 0)
@@ -761,12 +828,12 @@ class IterativeLLMAgentNode(LLMAgentNode):
761
828
  tools = step.get("tools", [])
762
829
 
763
830
  try:
764
- if use_real_mcp:
831
+ if use_real_mcp and tools:
765
832
  # Real MCP tool execution
766
833
  step_result = self._execute_tools_with_mcp(
767
834
  step_num, action, tools, discoveries, kwargs
768
835
  )
769
- else:
836
+ elif tools:
770
837
  # Mock tool execution for backward compatibility
771
838
  step_result = {
772
839
  "step": step_num,
@@ -776,20 +843,52 @@ class IterativeLLMAgentNode(LLMAgentNode):
776
843
  "success": True,
777
844
  "duration": 1.5,
778
845
  }
846
+ else:
847
+ # No tools available, try direct LLM call for this step
848
+ self.logger.info(
849
+ f"No tools for step {step_num}, using direct LLM call"
850
+ )
851
+ step_messages = [
852
+ {
853
+ "role": "user",
854
+ "content": f"Please {action}: {plan.get('user_query', '')}",
855
+ }
856
+ ]
857
+ step_kwargs = {**kwargs, "messages": step_messages}
858
+ llm_response = super().run(**step_kwargs)
859
+
860
+ if llm_response.get("success") and llm_response.get("response"):
861
+ content = llm_response["response"].get("content", "")
862
+ step_result = {
863
+ "step": step_num,
864
+ "action": action,
865
+ "tools_used": [],
866
+ "output": content,
867
+ "success": True,
868
+ "duration": 2.0,
869
+ }
870
+ else:
871
+ raise Exception(f"LLM call failed for step {step_num}")
779
872
 
780
873
  execution_results["steps_completed"].append(step_result)
781
874
  execution_results["intermediate_results"].append(step_result["output"])
782
875
 
783
876
  # Store tool outputs
784
- for tool in tools:
785
- if step_result["success"]:
786
- execution_results["tool_outputs"][tool] = step_result.get(
787
- "tool_outputs", {}
788
- ).get(tool, step_result["output"])
789
- else:
790
- execution_results["tool_outputs"][
791
- tool
792
- ] = f"Error executing {tool}: {step_result.get('error', 'Unknown error')}"
877
+ if tools:
878
+ for tool in tools:
879
+ if step_result["success"]:
880
+ execution_results["tool_outputs"][tool] = step_result.get(
881
+ "tool_outputs", {}
882
+ ).get(tool, step_result["output"])
883
+ else:
884
+ execution_results["tool_outputs"][
885
+ tool
886
+ ] = f"Error executing {tool}: {step_result.get('error', 'Unknown error')}"
887
+ else:
888
+ # Store LLM response output
889
+ execution_results["tool_outputs"][f"step_{step_num}_llm"] = (
890
+ step_result["output"]
891
+ )
793
892
 
794
893
  except Exception as e:
795
894
  error_result = {
@@ -872,11 +971,60 @@ class IterativeLLMAgentNode(LLMAgentNode):
872
971
  self.logger.error(f"Tool execution failed for {tool_name}: {e}")
873
972
 
874
973
  # Combine all tool outputs
875
- step_result["output"] = (
876
- "\n".join(tool_results)
877
- if tool_results
878
- else f"No tools executed for action: {action}"
879
- )
974
+ if tool_results:
975
+ step_result["output"] = "\n".join(tool_results)
976
+ else:
977
+ # No tools were executed - fall back to LLM for this action
978
+ self.logger.info(
979
+ f"No MCP tools available for action: {action}, using LLM fallback"
980
+ )
981
+
982
+ # Extract user query from kwargs
983
+ messages = kwargs.get("messages", [])
984
+ user_query = ""
985
+ for msg in reversed(messages):
986
+ if msg.get("role") == "user":
987
+ user_query = msg.get("content", "")
988
+ break
989
+
990
+ # Create a prompt for the LLM to handle this action
991
+ action_prompt = f"Please {action} for the following request: {user_query}"
992
+ llm_messages = [
993
+ {
994
+ "role": "system",
995
+ "content": kwargs.get(
996
+ "system_prompt", "You are a helpful AI assistant."
997
+ ),
998
+ },
999
+ {"role": "user", "content": action_prompt},
1000
+ ]
1001
+
1002
+ # Use parent's LLM capabilities
1003
+ try:
1004
+ llm_kwargs = {
1005
+ "provider": kwargs.get("provider", "openai"),
1006
+ "model": kwargs.get("model", "gpt-4"),
1007
+ "messages": llm_messages,
1008
+ "temperature": kwargs.get("temperature", 0.7),
1009
+ "max_tokens": kwargs.get("max_tokens", 1000),
1010
+ }
1011
+
1012
+ llm_response = super().run(**llm_kwargs)
1013
+
1014
+ if llm_response.get("success") and llm_response.get("response"):
1015
+ content = llm_response["response"].get("content", "")
1016
+ step_result["output"] = f"LLM Response for {action}: {content}"
1017
+ step_result["success"] = True
1018
+ else:
1019
+ step_result["output"] = (
1020
+ f"Failed to execute {action}: {llm_response.get('error', 'Unknown error')}"
1021
+ )
1022
+ step_result["success"] = False
1023
+ except Exception as e:
1024
+ self.logger.error(f"LLM fallback failed for action {action}: {e}")
1025
+ step_result["output"] = f"Error executing {action}: {str(e)}"
1026
+ step_result["success"] = False
1027
+
880
1028
  step_result["duration"] = time.time() - start_time
881
1029
 
882
1030
  # Mark as failed if no tools executed successfully
@@ -1520,7 +1668,58 @@ class IterativeLLMAgentNode(LLMAgentNode):
1520
1668
  )
1521
1669
  all_insights.extend(goals_achieved)
1522
1670
 
1523
- # Create synthesized response
1671
+ # If we have no meaningful results from iterations, fall back to base LLM agent
1672
+ if not all_results and not all_insights:
1673
+ self.logger.info(
1674
+ "No iterative results found, falling back to base LLM response"
1675
+ )
1676
+ try:
1677
+ # Use parent's run method to get a proper LLM response
1678
+ base_response = super().run(**kwargs)
1679
+ if base_response.get("success") and base_response.get("response"):
1680
+ return base_response["response"].get("content", "")
1681
+ except Exception as e:
1682
+ self.logger.warning(f"Base LLM fallback failed: {e}")
1683
+
1684
+ # Create synthesized response using LLM
1685
+ synthesis_messages = [
1686
+ {
1687
+ "role": "system",
1688
+ "content": """You are an AI assistant synthesizing results from an iterative analysis process.
1689
+ Create a comprehensive, helpful response based on the findings from multiple iterations of analysis.""",
1690
+ },
1691
+ {
1692
+ "role": "user",
1693
+ "content": f"""Original query: {user_query}
1694
+
1695
+ Results from {len(iterations)} iterations:
1696
+ {chr(10).join(all_results[:10]) if all_results else "No specific results generated"}
1697
+
1698
+ Insights achieved:
1699
+ {chr(10).join(all_insights[:5]) if all_insights else "No specific insights achieved"}
1700
+
1701
+ Please provide a comprehensive response to the original query based on these findings. If the findings are limited,
1702
+ provide your best analysis of the query directly.""",
1703
+ },
1704
+ ]
1705
+
1706
+ try:
1707
+ # Use the parent's LLM capabilities to generate synthesis
1708
+ synthesis_kwargs = {
1709
+ "provider": kwargs.get("provider", "openai"),
1710
+ "model": kwargs.get("model", "gpt-4"),
1711
+ "messages": synthesis_messages,
1712
+ "temperature": kwargs.get("temperature", 0.7),
1713
+ "max_tokens": kwargs.get("max_tokens", 1000),
1714
+ }
1715
+
1716
+ synthesis_response = super().run(**synthesis_kwargs)
1717
+ if synthesis_response.get("success") and synthesis_response.get("response"):
1718
+ return synthesis_response["response"].get("content", "")
1719
+ except Exception as e:
1720
+ self.logger.warning(f"LLM synthesis failed: {e}")
1721
+
1722
+ # Fallback to basic synthesis if LLM fails
1524
1723
  synthesis = f"## Analysis Results for: {user_query}\n\n"
1525
1724
 
1526
1725
  if all_results:
@@ -1539,20 +1738,6 @@ class IterativeLLMAgentNode(LLMAgentNode):
1539
1738
  f"- {successful_iterations}/{len(iterations)} iterations successful\n\n"
1540
1739
  )
1541
1740
 
1542
- # Add confidence and evidence
1543
- final_confidence = 0.8 # Mock final confidence
1544
- synthesis += f"### Confidence: {final_confidence:.1%}\n"
1545
- synthesis += f"Based on analysis using {len(global_discoveries.get('tools', {}))} MCP tools and comprehensive iterative processing.\n\n"
1546
-
1547
- # Add recommendations if analysis-focused
1548
- if "analyze" in user_query.lower() or "recommend" in user_query.lower():
1549
- synthesis += "### Recommendations:\n"
1550
- synthesis += (
1551
- "1. Continue monitoring key metrics identified in this analysis\n"
1552
- )
1553
- synthesis += "2. Consider implementing suggested improvements\n"
1554
- synthesis += "3. Review findings with stakeholders for validation\n"
1555
-
1556
1741
  return synthesis
1557
1742
 
1558
1743
  def _update_global_discoveries(
kailash/nodes/base.py CHANGED
@@ -1068,6 +1068,9 @@ class Node(ABC):
1068
1068
  if an object can be serialized. Used by validate_outputs()
1069
1069
  to identify problematic values.
1070
1070
 
1071
+ Enhanced to recognize objects with .to_dict() methods and validate their output.
1072
+ This resolves platform-specific serialization issues between LocalRuntime and Nexus.
1073
+
1071
1074
  Args:
1072
1075
  obj: Any object to test for JSON serializability
1073
1076
 
@@ -1081,6 +1084,16 @@ class Node(ABC):
1081
1084
  json.dumps(obj)
1082
1085
  return True
1083
1086
  except (TypeError, ValueError):
1087
+ # Check if object has .to_dict() method for enhanced validation
1088
+ if hasattr(obj, "to_dict") and callable(getattr(obj, "to_dict")):
1089
+ try:
1090
+ dict_result = obj.to_dict()
1091
+ # Validate that .to_dict() result is actually serializable
1092
+ json.dumps(dict_result)
1093
+ return True
1094
+ except (TypeError, ValueError, AttributeError):
1095
+ # If .to_dict() fails or returns non-serializable data, fall back to False
1096
+ return False
1084
1097
  return False
1085
1098
 
1086
1099
  def execute(self, **runtime_inputs) -> dict[str, Any]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kailash
3
- Version: 0.9.5
3
+ Version: 0.9.7
4
4
  Summary: Python SDK for the Kailash container-node architecture
5
5
  Home-page: https://github.com/integrum/kailash-python-sdk
6
6
  Author: Integrum
@@ -1,4 +1,4 @@
1
- kailash/__init__.py,sha256=iwNjVBZF7kF09CsoOP8EUdmUe5uHc-Fdnl8RhXS2OYo,2771
1
+ kailash/__init__.py,sha256=E3eRrjccUAH1875857yUosDh9qAHt-Inu_Q383PARDA,2771
2
2
  kailash/__main__.py,sha256=vr7TVE5o16V6LsTmRFKG6RDKUXHpIWYdZ6Dok2HkHnI,198
3
3
  kailash/access_control.py,sha256=MjKtkoQ2sg1Mgfe7ovGxVwhAbpJKvaepPWr8dxOueMA,26058
4
4
  kailash/access_control_abac.py,sha256=FPfa_8PuDP3AxTjdWfiH3ntwWO8NodA0py9W8SE5dno,30263
@@ -140,7 +140,7 @@ kailash/monitoring/__init__.py,sha256=C5WmkNpk_mmAScqMWiCfkUbjhM5W16dsnRnc3Ial-U
140
140
  kailash/monitoring/alerts.py,sha256=eKX4ooPw1EicumPuswlR_nU18UgRETWvFg8FzCW5pVU,21416
141
141
  kailash/monitoring/metrics.py,sha256=SiAnL3o6K0QaJHgfAuWBa-0pTkW5zymhuPEsj4bgOgM,22022
142
142
  kailash/nodes/__init__.py,sha256=p2KSo0dyUBCLClU123qpQ0tyv5S_36PTxosNyW58nyY,1031
143
- kailash/nodes/base.py,sha256=Mre45ucevCJo8y9gt1i_Ed4QsmemGeAZgA1jfBuTIbI,81870
143
+ kailash/nodes/base.py,sha256=3KPCp2PDLCPGm4VHSHt8QSONLTX9y3UhQ-3ldQf4oUg,82623
144
144
  kailash/nodes/base_async.py,sha256=whxepCiVplrltfzEQuabmnGCpEV5WgfqwgxbLdCyiDk,8864
145
145
  kailash/nodes/base_cycle_aware.py,sha256=Xpze9xZzLepWeLpi9Y3tMn1dm2LVv-omr5TSQuGTtWo,13377
146
146
  kailash/nodes/base_with_acl.py,sha256=ZfrkLPgrEBcNbG0LKvtq6glDxyOYOMRw3VXX4vWX6bI,11852
@@ -166,7 +166,7 @@ kailash/nodes/ai/ai_providers.py,sha256=egfiOZzPmZ10d3wBCJ6ST4tRFrrtq0kt1VyCqxVp
166
166
  kailash/nodes/ai/embedding_generator.py,sha256=akGCzz7zLRSziqEQCiPwL2qWhRWxuM_1RQh-YtVEddw,31879
167
167
  kailash/nodes/ai/hybrid_search.py,sha256=k26uDDP_bwrIpv7Yl7PBCPvWSyQEmTlBjI1IpbgDsO4,35446
168
168
  kailash/nodes/ai/intelligent_agent_orchestrator.py,sha256=LvBqMKc64zSxFWVCjbLKKel2QwEzoTeJAEgna7rZw00,83097
169
- kailash/nodes/ai/iterative_llm_agent.py,sha256=T_Rtmz6E5gB0HugT2Q8FHZE9Giqy5WiBB0UCJ4PflNw,93308
169
+ kailash/nodes/ai/iterative_llm_agent.py,sha256=G6pQnvSJcMBxloBvLBletFdiIRZGntNaMaVx2no0igY,101273
170
170
  kailash/nodes/ai/llm_agent.py,sha256=NeNJZbV_VOUbULug2LASwyzLyoUO5wi58Bc9sXTubuc,90181
171
171
  kailash/nodes/ai/models.py,sha256=wsEeUTuegy87mnLtKgSTg7ggCXvC1n3MsL-iZ4qujHs,16393
172
172
  kailash/nodes/ai/self_organizing.py,sha256=B7NwKaBW8OHQBf5b0F9bSs8Wm-5BDJ9IjIkxS9h00mg,62885
@@ -403,9 +403,9 @@ kailash/workflow/templates.py,sha256=XQMAKZXC2dlxgMMQhSEOWAF3hIbe9JJt9j_THchhAm8
403
403
  kailash/workflow/type_inference.py,sha256=i1F7Yd_Z3elTXrthsLpqGbOnQBIVVVEjhRpI0HrIjd0,24492
404
404
  kailash/workflow/validation.py,sha256=r2zApGiiG8UEn7p5Ji842l8OR1_KftzDkWc7gg0cac0,44675
405
405
  kailash/workflow/visualization.py,sha256=nHBW-Ai8QBMZtn2Nf3EE1_aiMGi9S6Ui_BfpA5KbJPU,23187
406
- kailash-0.9.5.dist-info/licenses/LICENSE,sha256=Axe6g7bTrJkToK9h9j2SpRUKKNaDZDCo2lQ2zPxCE6s,1065
407
- kailash-0.9.5.dist-info/METADATA,sha256=CdfS313HOOe7TPgBqpJaWlrycgXFgPXz_PI3FLihVwc,22298
408
- kailash-0.9.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
409
- kailash-0.9.5.dist-info/entry_points.txt,sha256=M_q3b8PG5W4XbhSgESzIJjh3_4OBKtZFYFsOdkr2vO4,45
410
- kailash-0.9.5.dist-info/top_level.txt,sha256=z7GzH2mxl66498pVf5HKwo5wwfPtt9Aq95uZUpH6JV0,8
411
- kailash-0.9.5.dist-info/RECORD,,
406
+ kailash-0.9.7.dist-info/licenses/LICENSE,sha256=Axe6g7bTrJkToK9h9j2SpRUKKNaDZDCo2lQ2zPxCE6s,1065
407
+ kailash-0.9.7.dist-info/METADATA,sha256=yo7vzKt8ikuCZCQBcsJeDPMm8VRW421kg8-U64whL0w,22298
408
+ kailash-0.9.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
409
+ kailash-0.9.7.dist-info/entry_points.txt,sha256=M_q3b8PG5W4XbhSgESzIJjh3_4OBKtZFYFsOdkr2vO4,45
410
+ kailash-0.9.7.dist-info/top_level.txt,sha256=z7GzH2mxl66498pVf5HKwo5wwfPtt9Aq95uZUpH6JV0,8
411
+ kailash-0.9.7.dist-info/RECORD,,