kite-agent 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kite/__init__.py +46 -0
- kite/ab_testing.py +384 -0
- kite/agent.py +556 -0
- kite/agents/__init__.py +3 -0
- kite/agents/plan_execute.py +191 -0
- kite/agents/react_agent.py +509 -0
- kite/agents/reflective_agent.py +90 -0
- kite/agents/rewoo.py +119 -0
- kite/agents/tot.py +151 -0
- kite/conversation.py +125 -0
- kite/core.py +974 -0
- kite/data_loaders.py +111 -0
- kite/embedding_providers.py +372 -0
- kite/llm_providers.py +1278 -0
- kite/memory/__init__.py +6 -0
- kite/memory/advanced_rag.py +333 -0
- kite/memory/graph_rag.py +719 -0
- kite/memory/session_memory.py +423 -0
- kite/memory/vector_memory.py +579 -0
- kite/monitoring.py +611 -0
- kite/observers.py +107 -0
- kite/optimization/__init__.py +9 -0
- kite/optimization/resource_router.py +80 -0
- kite/persistence.py +42 -0
- kite/pipeline/__init__.py +5 -0
- kite/pipeline/deterministic_pipeline.py +323 -0
- kite/pipeline/reactive_pipeline.py +171 -0
- kite/pipeline_manager.py +15 -0
- kite/routing/__init__.py +6 -0
- kite/routing/aggregator_router.py +325 -0
- kite/routing/llm_router.py +149 -0
- kite/routing/semantic_router.py +228 -0
- kite/safety/__init__.py +6 -0
- kite/safety/circuit_breaker.py +360 -0
- kite/safety/guardrails.py +82 -0
- kite/safety/idempotency_manager.py +304 -0
- kite/safety/kill_switch.py +75 -0
- kite/tool.py +183 -0
- kite/tool_registry.py +87 -0
- kite/tools/__init__.py +21 -0
- kite/tools/code_execution.py +53 -0
- kite/tools/contrib/__init__.py +19 -0
- kite/tools/contrib/calculator.py +26 -0
- kite/tools/contrib/datetime_utils.py +20 -0
- kite/tools/contrib/linkedin.py +428 -0
- kite/tools/contrib/web_search.py +30 -0
- kite/tools/mcp/__init__.py +31 -0
- kite/tools/mcp/database_mcp.py +267 -0
- kite/tools/mcp/gdrive_mcp_server.py +503 -0
- kite/tools/mcp/gmail_mcp_server.py +601 -0
- kite/tools/mcp/postgres_mcp_server.py +490 -0
- kite/tools/mcp/slack_mcp_server.py +538 -0
- kite/tools/mcp/stripe_mcp_server.py +219 -0
- kite/tools/search.py +90 -0
- kite/tools/system_tools.py +54 -0
- kite/tools_manager.py +27 -0
- kite_agent-0.1.0.dist-info/METADATA +621 -0
- kite_agent-0.1.0.dist-info/RECORD +61 -0
- kite_agent-0.1.0.dist-info/WHEEL +5 -0
- kite_agent-0.1.0.dist-info/licenses/LICENSE +21 -0
- kite_agent-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Plan-and-Execute Agent
|
|
3
|
+
Decomposes complex goals into steps before execution.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
from typing import List, Dict, Optional, Any
|
|
8
|
+
from ..agent import Agent
|
|
9
|
+
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class Plan:
|
|
14
|
+
"""A plan with steps."""
|
|
15
|
+
goal: str
|
|
16
|
+
steps: List[str]
|
|
17
|
+
dependencies: Dict[str, List[str]] = field(default_factory=dict)
|
|
18
|
+
|
|
19
|
+
class PlanExecuteAgent(Agent):
|
|
20
|
+
"""
|
|
21
|
+
Agent that implements the Plan-and-Execute pattern.
|
|
22
|
+
1. Plan: Decompose goal into steps upfront.
|
|
23
|
+
2. Execute: Run each step sequentially.
|
|
24
|
+
3. Re-plan: Adjust remaining steps if needed.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self, name, system_prompt, tools, framework, llm=None, max_iterations=10, verbose=False):
|
|
28
|
+
super().__init__(name, system_prompt, tools, framework, llm=llm, max_iterations=max_iterations, verbose=verbose)
|
|
29
|
+
|
|
30
|
+
async def run(self, goal: str, context: Optional[Dict] = None) -> Dict[str, Any]:
|
|
31
|
+
"""Override base run to use plan-and-execute logic for this agent."""
|
|
32
|
+
return await self.run_plan(goal, context)
|
|
33
|
+
|
|
34
|
+
async def run_plan(self, goal: str, context: Optional[Dict] = None) -> Dict[str, Any]:
|
|
35
|
+
"""
|
|
36
|
+
Run the planning and execution loop.
|
|
37
|
+
"""
|
|
38
|
+
print(f"\n[PlanAndExecute] Goal: {goal}")
|
|
39
|
+
|
|
40
|
+
# Step 1: PLAN
|
|
41
|
+
print(" [Step 1] Creating initial plan...")
|
|
42
|
+
plan_obj = await self._create_plan_obj(goal, context)
|
|
43
|
+
|
|
44
|
+
# Limit plan to max_iterations
|
|
45
|
+
if len(plan_obj.steps) > self.max_iterations:
|
|
46
|
+
print(f" [WARNING] Plan has {len(plan_obj.steps)} steps, truncating to {self.max_iterations}")
|
|
47
|
+
plan_obj.steps = plan_obj.steps[:self.max_iterations]
|
|
48
|
+
|
|
49
|
+
print(f" [OK] Generated {len(plan_obj.steps)} steps")
|
|
50
|
+
|
|
51
|
+
results = []
|
|
52
|
+
# Step 2: EXECUTE
|
|
53
|
+
for i, step in enumerate(plan_obj.steps, 1):
|
|
54
|
+
print(f"\n--- Step {i}/{len(plan_obj.steps)}: {step} ---")
|
|
55
|
+
|
|
56
|
+
# Execute step
|
|
57
|
+
result = await self._execute_step(step, results, context)
|
|
58
|
+
results.append(result)
|
|
59
|
+
|
|
60
|
+
# Check if we need to replan
|
|
61
|
+
if result.get("needs_replan"):
|
|
62
|
+
print(" [PlanAndExecute] Replanning required...")
|
|
63
|
+
new_steps = await self._replan(goal, results, plan_obj.steps[i:])
|
|
64
|
+
plan_obj.steps = plan_obj.steps[:i] + new_steps
|
|
65
|
+
print(f" [OK] Updated plan with {len(new_steps)} remaining steps")
|
|
66
|
+
|
|
67
|
+
# Step 3: SYNTHESIZE
|
|
68
|
+
print("\n [Step 3] Synthesizing final answer...")
|
|
69
|
+
final_answer = await self._synthesize_results(goal, results)
|
|
70
|
+
|
|
71
|
+
return {
|
|
72
|
+
"success": all(r.get('success', False) for r in results),
|
|
73
|
+
"goal": goal,
|
|
74
|
+
"plan": plan_obj.steps,
|
|
75
|
+
"results": results,
|
|
76
|
+
"answer": final_answer
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
async def _create_plan_obj(self, goal: str, context: Optional[Dict]) -> Plan:
|
|
80
|
+
"""Create a plan using LLM."""
|
|
81
|
+
tools_desc = "\n".join([f"- {name}: {t.description}" for name, t in self.tools.items()])
|
|
82
|
+
|
|
83
|
+
prompt = f"""You are a strategic planner. Create a step-by-step plan to achieve this goal.
|
|
84
|
+
|
|
85
|
+
Goal: {goal}
|
|
86
|
+
|
|
87
|
+
Available tools:
|
|
88
|
+
{tools_desc}
|
|
89
|
+
|
|
90
|
+
{f"Context: {json.dumps(context)}" if context else "None"}
|
|
91
|
+
|
|
92
|
+
Create a detailed plan with actionable steps.
|
|
93
|
+
Respond with JSON:
|
|
94
|
+
{{
|
|
95
|
+
"reasoning": "Why this plan will work",
|
|
96
|
+
"steps": [
|
|
97
|
+
"Step 1: ...",
|
|
98
|
+
"Step 2: ..."
|
|
99
|
+
]
|
|
100
|
+
}}"""
|
|
101
|
+
|
|
102
|
+
response = await self._llm_complete(prompt)
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
import re
|
|
106
|
+
json_match = re.search(r'\{.*\}', response, re.DOTALL)
|
|
107
|
+
if json_match:
|
|
108
|
+
data = json.loads(json_match.group(0))
|
|
109
|
+
steps = data.get("steps", [])
|
|
110
|
+
|
|
111
|
+
# Robust step parsing: handle if LLM returns dicts instead of strings
|
|
112
|
+
processed_steps = []
|
|
113
|
+
for s in steps:
|
|
114
|
+
if isinstance(s, dict):
|
|
115
|
+
# Use description or some field if available, else stringify
|
|
116
|
+
processed_steps.append(s.get("description", s.get("step", str(s))))
|
|
117
|
+
else:
|
|
118
|
+
processed_steps.append(str(s))
|
|
119
|
+
|
|
120
|
+
print(f" [Plan Reasoning] {data.get('reasoning', 'No reasoning provided')}")
|
|
121
|
+
return Plan(goal=goal, steps=processed_steps)
|
|
122
|
+
return Plan(goal=goal, steps=[response.strip()])
|
|
123
|
+
except Exception as e:
|
|
124
|
+
print(f" [Error] Failed to parse plan JSON: {e}")
|
|
125
|
+
return Plan(goal=goal, steps=[goal])
|
|
126
|
+
|
|
127
|
+
async def _execute_step(self, step: str, previous_results: List, context: Optional[Dict]) -> Dict:
|
|
128
|
+
"""Execute a single step."""
|
|
129
|
+
# Use history from previous results for context
|
|
130
|
+
history = "\n".join([
|
|
131
|
+
f"Step: {r.get('step')}\nResult: {r.get('result')}"
|
|
132
|
+
for r in previous_results[-3:]
|
|
133
|
+
])
|
|
134
|
+
|
|
135
|
+
# We call the super().run to avoid infinite recursion while still using the agent's core engine
|
|
136
|
+
step_result = await super().run(step, context=context)
|
|
137
|
+
|
|
138
|
+
return {
|
|
139
|
+
"step": step,
|
|
140
|
+
"result": step_result.get('response', 'Error'),
|
|
141
|
+
"success": step_result.get('success', False),
|
|
142
|
+
"needs_replan": "ERROR" in str(step_result.get('response', '')).upper() or not step_result.get('success')
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
async def _replan(self, goal: str, completed_steps: List, remaining_steps: List[str]) -> List[str]:
|
|
146
|
+
"""Replan if something went wrong."""
|
|
147
|
+
prompt = f"""Something went wrong or the environment changed. Create a new plan for the remaining work.
|
|
148
|
+
|
|
149
|
+
Original goal: {goal}
|
|
150
|
+
|
|
151
|
+
Completed steps:
|
|
152
|
+
{json.dumps([{str(r['step']): r['result']} for r in completed_steps], indent=2)}
|
|
153
|
+
|
|
154
|
+
Remaining steps originally planned:
|
|
155
|
+
{json.dumps(remaining_steps, indent=2)}
|
|
156
|
+
|
|
157
|
+
Create a new plan to complete the goal.
|
|
158
|
+
Respond with JSON array of new steps:
|
|
159
|
+
["New Step 1", "New Step 2"]"""
|
|
160
|
+
|
|
161
|
+
response = await self._llm_complete(prompt)
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
import re
|
|
165
|
+
json_match = re.search(r'\[.*\]', response, re.DOTALL)
|
|
166
|
+
if json_match:
|
|
167
|
+
return json.loads(json_match.group(0))
|
|
168
|
+
return remaining_steps
|
|
169
|
+
except:
|
|
170
|
+
return remaining_steps
|
|
171
|
+
|
|
172
|
+
async def _synthesize_results(self, goal: str, results: List[Dict]) -> str:
|
|
173
|
+
"""Synthesize all results into final answer."""
|
|
174
|
+
prompt = f"""Synthesize the results to answer the goal.
|
|
175
|
+
|
|
176
|
+
Goal: {goal}
|
|
177
|
+
|
|
178
|
+
Execution Results:
|
|
179
|
+
{json.dumps(results, indent=2)}
|
|
180
|
+
|
|
181
|
+
Provide a clear, comprehensive answer to the original goal."""
|
|
182
|
+
|
|
183
|
+
return await self._llm_complete(prompt)
|
|
184
|
+
|
|
185
|
+
async def _llm_complete(self, prompt: str) -> str:
|
|
186
|
+
"""Helper for LLM completion."""
|
|
187
|
+
if hasattr(self.llm, 'complete_async'):
|
|
188
|
+
return await self.llm.complete_async(prompt)
|
|
189
|
+
else:
|
|
190
|
+
import asyncio
|
|
191
|
+
return await asyncio.to_thread(self.llm.complete, prompt)
|