kailash 0.9.4__py3-none-any.whl → 0.9.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/nodes/ai/iterative_llm_agent.py +151 -27
- kailash/nodes/base.py +13 -0
- {kailash-0.9.4.dist-info → kailash-0.9.6.dist-info}/METADATA +17 -18
- {kailash-0.9.4.dist-info → kailash-0.9.6.dist-info}/RECORD +9 -9
- {kailash-0.9.4.dist-info → kailash-0.9.6.dist-info}/WHEEL +0 -0
- {kailash-0.9.4.dist-info → kailash-0.9.6.dist-info}/entry_points.txt +0 -0
- {kailash-0.9.4.dist-info → kailash-0.9.6.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.9.4.dist-info → kailash-0.9.6.dist-info}/top_level.txt +0 -0
kailash/__init__.py
CHANGED
@@ -310,6 +310,7 @@ class IterativeLLMAgentNode(LLMAgentNode):
|
|
310
310
|
convergence_reason = "max_iterations_reached"
|
311
311
|
|
312
312
|
try:
|
313
|
+
|
313
314
|
# Main iterative loop
|
314
315
|
for iteration_num in range(1, max_iterations + 1):
|
315
316
|
iteration_state = IterationState(
|
@@ -694,7 +695,21 @@ class IterativeLLMAgentNode(LLMAgentNode):
|
|
694
695
|
global_discoveries.get("tools", {}).values()
|
695
696
|
)
|
696
697
|
|
697
|
-
#
|
698
|
+
# If we have no tools available, create a plan for direct LLM response
|
699
|
+
if not available_tools:
|
700
|
+
return {
|
701
|
+
"user_query": user_query,
|
702
|
+
"selected_tools": [],
|
703
|
+
"execution_steps": [
|
704
|
+
{"step": 1, "action": "direct_llm_response", "tools": []}
|
705
|
+
],
|
706
|
+
"expected_outcomes": ["direct_response"],
|
707
|
+
"resource_requirements": {},
|
708
|
+
"success_criteria": {"response_generated": True},
|
709
|
+
"planning_mode": "direct_llm",
|
710
|
+
}
|
711
|
+
|
712
|
+
# Create plan with available tools
|
698
713
|
plan = {
|
699
714
|
"user_query": user_query,
|
700
715
|
"selected_tools": [],
|
@@ -702,6 +717,7 @@ class IterativeLLMAgentNode(LLMAgentNode):
|
|
702
717
|
"expected_outcomes": [],
|
703
718
|
"resource_requirements": {},
|
704
719
|
"success_criteria": {},
|
720
|
+
"planning_mode": "tool_based",
|
705
721
|
}
|
706
722
|
|
707
723
|
# Select relevant tools
|
@@ -754,6 +770,45 @@ class IterativeLLMAgentNode(LLMAgentNode):
|
|
754
770
|
# Check if we should use real MCP tool execution
|
755
771
|
use_real_mcp = kwargs.get("use_real_mcp", True)
|
756
772
|
|
773
|
+
# Handle direct LLM response mode
|
774
|
+
if plan.get("planning_mode") == "direct_llm":
|
775
|
+
try:
|
776
|
+
llm_response = super().run(**kwargs)
|
777
|
+
|
778
|
+
if llm_response.get("success") and llm_response.get("response"):
|
779
|
+
content = llm_response["response"].get("content", "")
|
780
|
+
step_result = {
|
781
|
+
"step": 1,
|
782
|
+
"action": "direct_llm_response",
|
783
|
+
"tools_used": [],
|
784
|
+
"output": content,
|
785
|
+
"success": True,
|
786
|
+
"duration": 2.0, # Estimate for LLM call
|
787
|
+
"llm_response": llm_response,
|
788
|
+
}
|
789
|
+
|
790
|
+
execution_results["steps_completed"].append(step_result)
|
791
|
+
execution_results["intermediate_results"].append(content)
|
792
|
+
execution_results["tool_outputs"]["llm_response"] = content
|
793
|
+
else:
|
794
|
+
raise Exception(
|
795
|
+
f"LLM response failed: {llm_response.get('error', 'Unknown error')}"
|
796
|
+
)
|
797
|
+
|
798
|
+
except Exception as e:
|
799
|
+
error_result = {
|
800
|
+
"step": 1,
|
801
|
+
"action": "direct_llm_response",
|
802
|
+
"tools_used": [],
|
803
|
+
"error": str(e),
|
804
|
+
"success": False,
|
805
|
+
}
|
806
|
+
execution_results["steps_completed"].append(error_result)
|
807
|
+
execution_results["errors"].append(str(e))
|
808
|
+
execution_results["success"] = False
|
809
|
+
|
810
|
+
return execution_results
|
811
|
+
|
757
812
|
# Execute each step in the plan
|
758
813
|
for step in plan.get("execution_steps", []):
|
759
814
|
step_num = step.get("step", 0)
|
@@ -761,12 +816,12 @@ class IterativeLLMAgentNode(LLMAgentNode):
|
|
761
816
|
tools = step.get("tools", [])
|
762
817
|
|
763
818
|
try:
|
764
|
-
if use_real_mcp:
|
819
|
+
if use_real_mcp and tools:
|
765
820
|
# Real MCP tool execution
|
766
821
|
step_result = self._execute_tools_with_mcp(
|
767
822
|
step_num, action, tools, discoveries, kwargs
|
768
823
|
)
|
769
|
-
|
824
|
+
elif tools:
|
770
825
|
# Mock tool execution for backward compatibility
|
771
826
|
step_result = {
|
772
827
|
"step": step_num,
|
@@ -776,20 +831,52 @@ class IterativeLLMAgentNode(LLMAgentNode):
|
|
776
831
|
"success": True,
|
777
832
|
"duration": 1.5,
|
778
833
|
}
|
834
|
+
else:
|
835
|
+
# No tools available, try direct LLM call for this step
|
836
|
+
self.logger.info(
|
837
|
+
f"No tools for step {step_num}, using direct LLM call"
|
838
|
+
)
|
839
|
+
step_messages = [
|
840
|
+
{
|
841
|
+
"role": "user",
|
842
|
+
"content": f"Please {action}: {plan.get('user_query', '')}",
|
843
|
+
}
|
844
|
+
]
|
845
|
+
step_kwargs = {**kwargs, "messages": step_messages}
|
846
|
+
llm_response = super().run(**step_kwargs)
|
847
|
+
|
848
|
+
if llm_response.get("success") and llm_response.get("response"):
|
849
|
+
content = llm_response["response"].get("content", "")
|
850
|
+
step_result = {
|
851
|
+
"step": step_num,
|
852
|
+
"action": action,
|
853
|
+
"tools_used": [],
|
854
|
+
"output": content,
|
855
|
+
"success": True,
|
856
|
+
"duration": 2.0,
|
857
|
+
}
|
858
|
+
else:
|
859
|
+
raise Exception(f"LLM call failed for step {step_num}")
|
779
860
|
|
780
861
|
execution_results["steps_completed"].append(step_result)
|
781
862
|
execution_results["intermediate_results"].append(step_result["output"])
|
782
863
|
|
783
864
|
# Store tool outputs
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
"tool_outputs"
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
865
|
+
if tools:
|
866
|
+
for tool in tools:
|
867
|
+
if step_result["success"]:
|
868
|
+
execution_results["tool_outputs"][tool] = step_result.get(
|
869
|
+
"tool_outputs", {}
|
870
|
+
).get(tool, step_result["output"])
|
871
|
+
else:
|
872
|
+
execution_results["tool_outputs"][
|
873
|
+
tool
|
874
|
+
] = f"Error executing {tool}: {step_result.get('error', 'Unknown error')}"
|
875
|
+
else:
|
876
|
+
# Store LLM response output
|
877
|
+
execution_results["tool_outputs"][f"step_{step_num}_llm"] = (
|
878
|
+
step_result["output"]
|
879
|
+
)
|
793
880
|
|
794
881
|
except Exception as e:
|
795
882
|
error_result = {
|
@@ -1520,7 +1607,58 @@ class IterativeLLMAgentNode(LLMAgentNode):
|
|
1520
1607
|
)
|
1521
1608
|
all_insights.extend(goals_achieved)
|
1522
1609
|
|
1523
|
-
#
|
1610
|
+
# If we have no meaningful results from iterations, fall back to base LLM agent
|
1611
|
+
if not all_results and not all_insights:
|
1612
|
+
self.logger.info(
|
1613
|
+
"No iterative results found, falling back to base LLM response"
|
1614
|
+
)
|
1615
|
+
try:
|
1616
|
+
# Use parent's run method to get a proper LLM response
|
1617
|
+
base_response = super().run(**kwargs)
|
1618
|
+
if base_response.get("success") and base_response.get("response"):
|
1619
|
+
return base_response["response"].get("content", "")
|
1620
|
+
except Exception as e:
|
1621
|
+
self.logger.warning(f"Base LLM fallback failed: {e}")
|
1622
|
+
|
1623
|
+
# Create synthesized response using LLM
|
1624
|
+
synthesis_messages = [
|
1625
|
+
{
|
1626
|
+
"role": "system",
|
1627
|
+
"content": """You are an AI assistant synthesizing results from an iterative analysis process.
|
1628
|
+
Create a comprehensive, helpful response based on the findings from multiple iterations of analysis.""",
|
1629
|
+
},
|
1630
|
+
{
|
1631
|
+
"role": "user",
|
1632
|
+
"content": f"""Original query: {user_query}
|
1633
|
+
|
1634
|
+
Results from {len(iterations)} iterations:
|
1635
|
+
{chr(10).join(all_results[:10]) if all_results else "No specific results generated"}
|
1636
|
+
|
1637
|
+
Insights achieved:
|
1638
|
+
{chr(10).join(all_insights[:5]) if all_insights else "No specific insights achieved"}
|
1639
|
+
|
1640
|
+
Please provide a comprehensive response to the original query based on these findings. If the findings are limited,
|
1641
|
+
provide your best analysis of the query directly.""",
|
1642
|
+
},
|
1643
|
+
]
|
1644
|
+
|
1645
|
+
try:
|
1646
|
+
# Use the parent's LLM capabilities to generate synthesis
|
1647
|
+
synthesis_kwargs = {
|
1648
|
+
"provider": kwargs.get("provider", "openai"),
|
1649
|
+
"model": kwargs.get("model", "gpt-4"),
|
1650
|
+
"messages": synthesis_messages,
|
1651
|
+
"temperature": kwargs.get("temperature", 0.7),
|
1652
|
+
"max_tokens": kwargs.get("max_tokens", 1000),
|
1653
|
+
}
|
1654
|
+
|
1655
|
+
synthesis_response = super().run(**synthesis_kwargs)
|
1656
|
+
if synthesis_response.get("success") and synthesis_response.get("response"):
|
1657
|
+
return synthesis_response["response"].get("content", "")
|
1658
|
+
except Exception as e:
|
1659
|
+
self.logger.warning(f"LLM synthesis failed: {e}")
|
1660
|
+
|
1661
|
+
# Fallback to basic synthesis if LLM fails
|
1524
1662
|
synthesis = f"## Analysis Results for: {user_query}\n\n"
|
1525
1663
|
|
1526
1664
|
if all_results:
|
@@ -1539,20 +1677,6 @@ class IterativeLLMAgentNode(LLMAgentNode):
|
|
1539
1677
|
f"- {successful_iterations}/{len(iterations)} iterations successful\n\n"
|
1540
1678
|
)
|
1541
1679
|
|
1542
|
-
# Add confidence and evidence
|
1543
|
-
final_confidence = 0.8 # Mock final confidence
|
1544
|
-
synthesis += f"### Confidence: {final_confidence:.1%}\n"
|
1545
|
-
synthesis += f"Based on analysis using {len(global_discoveries.get('tools', {}))} MCP tools and comprehensive iterative processing.\n\n"
|
1546
|
-
|
1547
|
-
# Add recommendations if analysis-focused
|
1548
|
-
if "analyze" in user_query.lower() or "recommend" in user_query.lower():
|
1549
|
-
synthesis += "### Recommendations:\n"
|
1550
|
-
synthesis += (
|
1551
|
-
"1. Continue monitoring key metrics identified in this analysis\n"
|
1552
|
-
)
|
1553
|
-
synthesis += "2. Consider implementing suggested improvements\n"
|
1554
|
-
synthesis += "3. Review findings with stakeholders for validation\n"
|
1555
|
-
|
1556
1680
|
return synthesis
|
1557
1681
|
|
1558
1682
|
def _update_global_discoveries(
|
kailash/nodes/base.py
CHANGED
@@ -1068,6 +1068,9 @@ class Node(ABC):
|
|
1068
1068
|
if an object can be serialized. Used by validate_outputs()
|
1069
1069
|
to identify problematic values.
|
1070
1070
|
|
1071
|
+
Enhanced to recognize objects with .to_dict() methods and validate their output.
|
1072
|
+
This resolves platform-specific serialization issues between LocalRuntime and Nexus.
|
1073
|
+
|
1071
1074
|
Args:
|
1072
1075
|
obj: Any object to test for JSON serializability
|
1073
1076
|
|
@@ -1081,6 +1084,16 @@ class Node(ABC):
|
|
1081
1084
|
json.dumps(obj)
|
1082
1085
|
return True
|
1083
1086
|
except (TypeError, ValueError):
|
1087
|
+
# Check if object has .to_dict() method for enhanced validation
|
1088
|
+
if hasattr(obj, "to_dict") and callable(getattr(obj, "to_dict")):
|
1089
|
+
try:
|
1090
|
+
dict_result = obj.to_dict()
|
1091
|
+
# Validate that .to_dict() result is actually serializable
|
1092
|
+
json.dumps(dict_result)
|
1093
|
+
return True
|
1094
|
+
except (TypeError, ValueError, AttributeError):
|
1095
|
+
# If .to_dict() fails or returns non-serializable data, fall back to False
|
1096
|
+
return False
|
1084
1097
|
return False
|
1085
1098
|
|
1086
1099
|
def execute(self, **runtime_inputs) -> dict[str, Any]:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: kailash
|
3
|
-
Version: 0.9.
|
3
|
+
Version: 0.9.6
|
4
4
|
Summary: Python SDK for the Kailash container-node architecture
|
5
5
|
Home-page: https://github.com/integrum/kailash-python-sdk
|
6
6
|
Author: Integrum
|
@@ -117,28 +117,27 @@ Dynamic: requires-python
|
|
117
117
|
|
118
118
|
---
|
119
119
|
|
120
|
-
## 🔥 Latest Release: v0.9.
|
120
|
+
## 🔥 Latest Release: v0.9.5 (July 31, 2025)
|
121
121
|
|
122
|
-
**
|
122
|
+
**PythonCodeNode Serialization Stability & Testing**
|
123
123
|
|
124
|
-
###
|
125
|
-
- **
|
126
|
-
- **
|
127
|
-
- **
|
128
|
-
- **Impact**:
|
124
|
+
### 🧪 Comprehensive Regression Test Suite (TODO-129)
|
125
|
+
- **Added**: 33 unit tests validating PythonCodeNode serialization behavior and JSON compatibility
|
126
|
+
- **Added**: 6 integration tests with real infrastructure using only allowed modules
|
127
|
+
- **Added**: 6 E2E tests for complete business workflows and multi-channel consistency
|
128
|
+
- **Impact**: Ensures serialization fix from commit 2fcf8591 (June 11, 2025) stays stable
|
129
129
|
|
130
|
-
###
|
131
|
-
- **
|
132
|
-
- **
|
133
|
-
- **
|
134
|
-
- **Config**: `LocalRuntime(content_aware_success_detection=True)`
|
130
|
+
### 📚 Documentation Clarifications
|
131
|
+
- **Updated**: PythonCodeNode output structure documentation (all outputs wrapped in "result" key)
|
132
|
+
- **Enhanced**: Serialization consistency notes for multi-channel deployment
|
133
|
+
- **Improved**: Testing patterns for node serialization validation
|
135
134
|
|
136
|
-
###
|
137
|
-
-
|
138
|
-
-
|
139
|
-
-
|
135
|
+
### 🛡️ Stability Improvements
|
136
|
+
- **Focus**: Testing and documentation release to ensure PythonCodeNode serialization remains stable
|
137
|
+
- **Validation**: Complete test coverage for serialization edge cases
|
138
|
+
- **Quality**: Comprehensive regression prevention for critical workflow components
|
140
139
|
|
141
|
-
[Full Changelog](sdk-users/6-reference/changelogs/releases/v0.9.
|
140
|
+
[Full Changelog](sdk-users/6-reference/changelogs/releases/v0.9.5-2025-07-31.md) | [Core SDK 0.9.5](https://pypi.org/project/kailash/0.9.5/) | [DataFlow 0.3.4](https://pypi.org/project/kailash-dataflow/0.3.4/)
|
142
141
|
|
143
142
|
## 🎯 What Makes Kailash Different
|
144
143
|
|
@@ -1,4 +1,4 @@
|
|
1
|
-
kailash/__init__.py,sha256=
|
1
|
+
kailash/__init__.py,sha256=4FAW32FbWRGxkLCykiHb-3-1ri0JkYDO_5s0fkjshX0,2771
|
2
2
|
kailash/__main__.py,sha256=vr7TVE5o16V6LsTmRFKG6RDKUXHpIWYdZ6Dok2HkHnI,198
|
3
3
|
kailash/access_control.py,sha256=MjKtkoQ2sg1Mgfe7ovGxVwhAbpJKvaepPWr8dxOueMA,26058
|
4
4
|
kailash/access_control_abac.py,sha256=FPfa_8PuDP3AxTjdWfiH3ntwWO8NodA0py9W8SE5dno,30263
|
@@ -140,7 +140,7 @@ kailash/monitoring/__init__.py,sha256=C5WmkNpk_mmAScqMWiCfkUbjhM5W16dsnRnc3Ial-U
|
|
140
140
|
kailash/monitoring/alerts.py,sha256=eKX4ooPw1EicumPuswlR_nU18UgRETWvFg8FzCW5pVU,21416
|
141
141
|
kailash/monitoring/metrics.py,sha256=SiAnL3o6K0QaJHgfAuWBa-0pTkW5zymhuPEsj4bgOgM,22022
|
142
142
|
kailash/nodes/__init__.py,sha256=p2KSo0dyUBCLClU123qpQ0tyv5S_36PTxosNyW58nyY,1031
|
143
|
-
kailash/nodes/base.py,sha256=
|
143
|
+
kailash/nodes/base.py,sha256=3KPCp2PDLCPGm4VHSHt8QSONLTX9y3UhQ-3ldQf4oUg,82623
|
144
144
|
kailash/nodes/base_async.py,sha256=whxepCiVplrltfzEQuabmnGCpEV5WgfqwgxbLdCyiDk,8864
|
145
145
|
kailash/nodes/base_cycle_aware.py,sha256=Xpze9xZzLepWeLpi9Y3tMn1dm2LVv-omr5TSQuGTtWo,13377
|
146
146
|
kailash/nodes/base_with_acl.py,sha256=ZfrkLPgrEBcNbG0LKvtq6glDxyOYOMRw3VXX4vWX6bI,11852
|
@@ -166,7 +166,7 @@ kailash/nodes/ai/ai_providers.py,sha256=egfiOZzPmZ10d3wBCJ6ST4tRFrrtq0kt1VyCqxVp
|
|
166
166
|
kailash/nodes/ai/embedding_generator.py,sha256=akGCzz7zLRSziqEQCiPwL2qWhRWxuM_1RQh-YtVEddw,31879
|
167
167
|
kailash/nodes/ai/hybrid_search.py,sha256=k26uDDP_bwrIpv7Yl7PBCPvWSyQEmTlBjI1IpbgDsO4,35446
|
168
168
|
kailash/nodes/ai/intelligent_agent_orchestrator.py,sha256=LvBqMKc64zSxFWVCjbLKKel2QwEzoTeJAEgna7rZw00,83097
|
169
|
-
kailash/nodes/ai/iterative_llm_agent.py,sha256=
|
169
|
+
kailash/nodes/ai/iterative_llm_agent.py,sha256=Ed5hOuIrUev7hR5XzAfpfNHRjcZkJYSO_aOZsvvQDkI,98619
|
170
170
|
kailash/nodes/ai/llm_agent.py,sha256=NeNJZbV_VOUbULug2LASwyzLyoUO5wi58Bc9sXTubuc,90181
|
171
171
|
kailash/nodes/ai/models.py,sha256=wsEeUTuegy87mnLtKgSTg7ggCXvC1n3MsL-iZ4qujHs,16393
|
172
172
|
kailash/nodes/ai/self_organizing.py,sha256=B7NwKaBW8OHQBf5b0F9bSs8Wm-5BDJ9IjIkxS9h00mg,62885
|
@@ -403,9 +403,9 @@ kailash/workflow/templates.py,sha256=XQMAKZXC2dlxgMMQhSEOWAF3hIbe9JJt9j_THchhAm8
|
|
403
403
|
kailash/workflow/type_inference.py,sha256=i1F7Yd_Z3elTXrthsLpqGbOnQBIVVVEjhRpI0HrIjd0,24492
|
404
404
|
kailash/workflow/validation.py,sha256=r2zApGiiG8UEn7p5Ji842l8OR1_KftzDkWc7gg0cac0,44675
|
405
405
|
kailash/workflow/visualization.py,sha256=nHBW-Ai8QBMZtn2Nf3EE1_aiMGi9S6Ui_BfpA5KbJPU,23187
|
406
|
-
kailash-0.9.
|
407
|
-
kailash-0.9.
|
408
|
-
kailash-0.9.
|
409
|
-
kailash-0.9.
|
410
|
-
kailash-0.9.
|
411
|
-
kailash-0.9.
|
406
|
+
kailash-0.9.6.dist-info/licenses/LICENSE,sha256=Axe6g7bTrJkToK9h9j2SpRUKKNaDZDCo2lQ2zPxCE6s,1065
|
407
|
+
kailash-0.9.6.dist-info/METADATA,sha256=ITS5v3xASITLatpJV0zFwF24A7gv4afVqPU00beunMQ,22298
|
408
|
+
kailash-0.9.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
409
|
+
kailash-0.9.6.dist-info/entry_points.txt,sha256=M_q3b8PG5W4XbhSgESzIJjh3_4OBKtZFYFsOdkr2vO4,45
|
410
|
+
kailash-0.9.6.dist-info/top_level.txt,sha256=z7GzH2mxl66498pVf5HKwo5wwfPtt9Aq95uZUpH6JV0,8
|
411
|
+
kailash-0.9.6.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|