kite-agent 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kite/__init__.py +46 -0
- kite/ab_testing.py +384 -0
- kite/agent.py +556 -0
- kite/agents/__init__.py +3 -0
- kite/agents/plan_execute.py +191 -0
- kite/agents/react_agent.py +509 -0
- kite/agents/reflective_agent.py +90 -0
- kite/agents/rewoo.py +119 -0
- kite/agents/tot.py +151 -0
- kite/conversation.py +125 -0
- kite/core.py +974 -0
- kite/data_loaders.py +111 -0
- kite/embedding_providers.py +372 -0
- kite/llm_providers.py +1278 -0
- kite/memory/__init__.py +6 -0
- kite/memory/advanced_rag.py +333 -0
- kite/memory/graph_rag.py +719 -0
- kite/memory/session_memory.py +423 -0
- kite/memory/vector_memory.py +579 -0
- kite/monitoring.py +611 -0
- kite/observers.py +107 -0
- kite/optimization/__init__.py +9 -0
- kite/optimization/resource_router.py +80 -0
- kite/persistence.py +42 -0
- kite/pipeline/__init__.py +5 -0
- kite/pipeline/deterministic_pipeline.py +323 -0
- kite/pipeline/reactive_pipeline.py +171 -0
- kite/pipeline_manager.py +15 -0
- kite/routing/__init__.py +6 -0
- kite/routing/aggregator_router.py +325 -0
- kite/routing/llm_router.py +149 -0
- kite/routing/semantic_router.py +228 -0
- kite/safety/__init__.py +6 -0
- kite/safety/circuit_breaker.py +360 -0
- kite/safety/guardrails.py +82 -0
- kite/safety/idempotency_manager.py +304 -0
- kite/safety/kill_switch.py +75 -0
- kite/tool.py +183 -0
- kite/tool_registry.py +87 -0
- kite/tools/__init__.py +21 -0
- kite/tools/code_execution.py +53 -0
- kite/tools/contrib/__init__.py +19 -0
- kite/tools/contrib/calculator.py +26 -0
- kite/tools/contrib/datetime_utils.py +20 -0
- kite/tools/contrib/linkedin.py +428 -0
- kite/tools/contrib/web_search.py +30 -0
- kite/tools/mcp/__init__.py +31 -0
- kite/tools/mcp/database_mcp.py +267 -0
- kite/tools/mcp/gdrive_mcp_server.py +503 -0
- kite/tools/mcp/gmail_mcp_server.py +601 -0
- kite/tools/mcp/postgres_mcp_server.py +490 -0
- kite/tools/mcp/slack_mcp_server.py +538 -0
- kite/tools/mcp/stripe_mcp_server.py +219 -0
- kite/tools/search.py +90 -0
- kite/tools/system_tools.py +54 -0
- kite/tools_manager.py +27 -0
- kite_agent-0.1.0.dist-info/METADATA +621 -0
- kite_agent-0.1.0.dist-info/RECORD +61 -0
- kite_agent-0.1.0.dist-info/WHEEL +5 -0
- kite_agent-0.1.0.dist-info/licenses/LICENSE +21 -0
- kite_agent-0.1.0.dist-info/top_level.txt +1 -0
kite/agents/rewoo.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ReWOO Agent - Reasoning WithOut Observation
|
|
3
|
+
Executes multiple steps in parallel by pre-planning independent tasks.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import re
|
|
8
|
+
import asyncio
|
|
9
|
+
from typing import List, Dict, Optional, Any
|
|
10
|
+
from ..agent import Agent
|
|
11
|
+
|
|
12
|
+
class ReWOOAgent(Agent):
|
|
13
|
+
"""
|
|
14
|
+
Agent that implements the ReWOO (Reasoning WithOut Observation) pattern.
|
|
15
|
+
1. Plan: Create a graph of tasks with variable placeholders (#E1, #E2).
|
|
16
|
+
2. Execute: Resolve dependencies and run tasks (parallel where possible).
|
|
17
|
+
3. Solver: Combine results for final answer.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, name, system_prompt, tools, framework, llm=None, max_iterations=10, verbose=False):
|
|
21
|
+
super().__init__(name, system_prompt, tools, framework, llm=llm, max_iterations=max_iterations, verbose=verbose)
|
|
22
|
+
|
|
23
|
+
async def run_rewoo(self, goal: str, context: Optional[Dict] = None) -> Dict[str, Any]:
|
|
24
|
+
"""
|
|
25
|
+
Run the ReWOO loop.
|
|
26
|
+
"""
|
|
27
|
+
print(f"\n[ReWOO] Goal: {goal}")
|
|
28
|
+
|
|
29
|
+
# Step 1: Create Execution Plan
|
|
30
|
+
print(" [Step 1] Creating execution graph...")
|
|
31
|
+
plan_str = await self._generate_rewoo_plan(goal, context)
|
|
32
|
+
parsing = self._parse_plan(plan_str)
|
|
33
|
+
|
|
34
|
+
# Limit steps to max_iterations
|
|
35
|
+
if len(parsing) > self.max_iterations:
|
|
36
|
+
print(f" [WARNING] ReWOO plan has {len(parsing)} steps, truncating to {self.max_iterations}")
|
|
37
|
+
parsing = parsing[:self.max_iterations]
|
|
38
|
+
|
|
39
|
+
print(f" [OK] Planned {len(parsing)} steps")
|
|
40
|
+
|
|
41
|
+
# Step 2: Execute
|
|
42
|
+
print(" [Step 2] Executing steps...")
|
|
43
|
+
results = await self._execute_plan(parsing, context)
|
|
44
|
+
|
|
45
|
+
# Step 3: Solve
|
|
46
|
+
print(" [Step 3] Solving for final answer...")
|
|
47
|
+
final_answer = await self._solve(goal, results)
|
|
48
|
+
|
|
49
|
+
return {
|
|
50
|
+
"success": True,
|
|
51
|
+
"goal": goal,
|
|
52
|
+
"plan": parsing,
|
|
53
|
+
"results": results,
|
|
54
|
+
"answer": final_answer
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
async def _generate_rewoo_plan(self, goal: str, context: Optional[Dict] = None) -> str:
|
|
58
|
+
tool_desc = "\n".join([f"- {n}: {t.description}" for n, t in self.tools.items()])
|
|
59
|
+
|
|
60
|
+
prompt = f"""You are a ReWOO planner. Create a plan to achieve the goal using available tools.
|
|
61
|
+
Express the plan as a series of steps using placeholders like #E1, #E2 for results.
|
|
62
|
+
|
|
63
|
+
Available Tools:
|
|
64
|
+
{tool_desc}
|
|
65
|
+
|
|
66
|
+
Goal: {goal}
|
|
67
|
+
{f"Context: {json.dumps(context)}" if context else ""}
|
|
68
|
+
|
|
69
|
+
Format:
|
|
70
|
+
Plan: [reasoning]
|
|
71
|
+
#E1 = [tool_name] with [args, can use #E0, etc]
|
|
72
|
+
Plan: [more reasoning]
|
|
73
|
+
#E2 = [tool_name] with [args]
|
|
74
|
+
"""
|
|
75
|
+
if hasattr(self.llm, 'complete_async'):
|
|
76
|
+
return await self.llm.complete_async(prompt)
|
|
77
|
+
return await asyncio.to_thread(self.llm.complete, prompt)
|
|
78
|
+
|
|
79
|
+
def _parse_plan(self, plan_str: str) -> List[Dict]:
|
|
80
|
+
steps = []
|
|
81
|
+
# Find lines starting with #E[digit]
|
|
82
|
+
matches = re.findall(r'#E(\d+)\s*=\s*(\w+)\s+(.*)', plan_str)
|
|
83
|
+
for m in matches:
|
|
84
|
+
steps.append({
|
|
85
|
+
"id": f"#E{m[0]}",
|
|
86
|
+
"tool": m[1],
|
|
87
|
+
"args": m[2].strip()
|
|
88
|
+
})
|
|
89
|
+
return steps
|
|
90
|
+
|
|
91
|
+
async def _execute_plan(self, steps: List[Dict], context: Optional[Dict] = None) -> Dict[str, Any]:
|
|
92
|
+
results = {}
|
|
93
|
+
|
|
94
|
+
for step in steps:
|
|
95
|
+
# Resolve placeholders in args
|
|
96
|
+
resolved_args = step['args']
|
|
97
|
+
for eid, res in results.items():
|
|
98
|
+
if eid in resolved_args:
|
|
99
|
+
resolved_args = resolved_args.replace(eid, str(res))
|
|
100
|
+
|
|
101
|
+
print(f" Running {step['id']} ({step['tool']})...")
|
|
102
|
+
# Execute step
|
|
103
|
+
step_res = await self.run(f"Use {step['tool']} to {resolved_args}", context=context)
|
|
104
|
+
results[step['id']] = step_res.get('response', 'Error')
|
|
105
|
+
|
|
106
|
+
return results
|
|
107
|
+
|
|
108
|
+
async def _solve(self, goal: str, results: Dict[str, Any]) -> str:
|
|
109
|
+
prompt = f"""Based on the following execution results, provide a final comprehensive answer for the goal.
|
|
110
|
+
|
|
111
|
+
Goal: {goal}
|
|
112
|
+
|
|
113
|
+
Results:
|
|
114
|
+
{json.dumps(results, indent=2)}
|
|
115
|
+
|
|
116
|
+
Final Answer:"""
|
|
117
|
+
if hasattr(self.llm, 'complete_async'):
|
|
118
|
+
return await self.llm.complete_async(prompt)
|
|
119
|
+
return await asyncio.to_thread(self.llm.complete, prompt)
|
kite/agents/tot.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tree-of-Thoughts (ToT) Agent
|
|
3
|
+
Implements multi-path reasoning by generating and evaluating multiple "thoughts" at each step.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import asyncio
|
|
8
|
+
from typing import List, Dict, Optional, Any
|
|
9
|
+
from ..agent import Agent
|
|
10
|
+
|
|
11
|
+
class TreeOfThoughtsAgent(Agent):
|
|
12
|
+
"""
|
|
13
|
+
Agent that implements the Tree-of-Thoughts pattern.
|
|
14
|
+
Explores multiple reasoning paths and selects the best one.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, name, system_prompt, tools, framework, llm=None, max_iterations=3, branches=3, verbose=False):
|
|
18
|
+
super().__init__(name, system_prompt, tools, framework, llm=llm, max_iterations=max_iterations, verbose=verbose)
|
|
19
|
+
self.branches = branches
|
|
20
|
+
|
|
21
|
+
async def run(self, problem: str, context: Optional[Dict] = None) -> Dict[str, Any]:
|
|
22
|
+
"""Override base run to use ToT logic."""
|
|
23
|
+
return await self.solve_tot(problem)
|
|
24
|
+
|
|
25
|
+
async def solve_tot(self, goal: str, max_steps: Optional[int] = None, num_thoughts: Optional[int] = None) -> Dict[str, Any]:
|
|
26
|
+
"""
|
|
27
|
+
Run the ToT search loop.
|
|
28
|
+
"""
|
|
29
|
+
depth = max_steps or self.max_iterations
|
|
30
|
+
branches = num_thoughts or self.branches
|
|
31
|
+
|
|
32
|
+
print(f"\n[TreeOfThoughts] Exploring {branches} branches, depth {depth}")
|
|
33
|
+
print(f"Goal: {goal}")
|
|
34
|
+
|
|
35
|
+
# 1. Generate initial thoughts
|
|
36
|
+
initial_thoughts = await self._generate_thoughts_list(goal, depth=0)
|
|
37
|
+
|
|
38
|
+
# 2. Build tree by expanding thoughts recursively
|
|
39
|
+
tree = []
|
|
40
|
+
for i, thought in enumerate(initial_thoughts, 1):
|
|
41
|
+
print(f" Branch {i}/{len(initial_thoughts)}: {thought[:50]}...")
|
|
42
|
+
path = await self._explore_path(goal, thought, 1, depth, branches)
|
|
43
|
+
tree.append(path)
|
|
44
|
+
|
|
45
|
+
# 3. Evaluate and select best path
|
|
46
|
+
print(f" [ToT] Evaluating {len(tree)} exploration paths...")
|
|
47
|
+
best_path = await self._select_best_path(goal, tree)
|
|
48
|
+
|
|
49
|
+
# 4. Final synthesis
|
|
50
|
+
print(" [ToT] Synthesizing final answer from best path...")
|
|
51
|
+
final_answer = await self._generate_answer(goal, best_path)
|
|
52
|
+
|
|
53
|
+
return {
|
|
54
|
+
"success": True,
|
|
55
|
+
"goal": goal,
|
|
56
|
+
"explored_paths": len(tree),
|
|
57
|
+
"best_path": best_path,
|
|
58
|
+
"answer": final_answer
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
async def _generate_thoughts_list(self, problem: str, depth: int, context: str = "") -> List[str]:
|
|
62
|
+
"""Generate N different thoughts/approaches."""
|
|
63
|
+
prompt = f"""Generate {self.branches} different approaches to solve this problem.
|
|
64
|
+
|
|
65
|
+
Problem: {problem}
|
|
66
|
+
Current context: {context if context else "None"}
|
|
67
|
+
|
|
68
|
+
Generate exactly {self.branches} distinct approaches, each on a new line starting with a number (1., 2., etc.)."""
|
|
69
|
+
|
|
70
|
+
response = await self._complete(prompt)
|
|
71
|
+
|
|
72
|
+
# Parse thoughts
|
|
73
|
+
thoughts = []
|
|
74
|
+
for line in response.split('\n'):
|
|
75
|
+
line = line.strip()
|
|
76
|
+
if line and (line[0].isdigit() or line.startswith('-')):
|
|
77
|
+
# Remove numbering
|
|
78
|
+
import re
|
|
79
|
+
thought = re.sub(r'^\d+[\.\)\-]\s*', '', line).strip()
|
|
80
|
+
if thought:
|
|
81
|
+
thoughts.append(thought)
|
|
82
|
+
|
|
83
|
+
return thoughts[:self.branches]
|
|
84
|
+
|
|
85
|
+
async def _explore_path(self, goal: str, initial_thought: str, current_depth: int, max_depth: int, branches: int) -> List[str]:
|
|
86
|
+
"""Recursively explore a reasoning path."""
|
|
87
|
+
path = [initial_thought]
|
|
88
|
+
|
|
89
|
+
if current_depth >= max_depth:
|
|
90
|
+
return path
|
|
91
|
+
|
|
92
|
+
# Generate next thoughts based on current path
|
|
93
|
+
context = " -> ".join(path)
|
|
94
|
+
next_thoughts = await self._generate_thoughts_list(goal, current_depth, context)
|
|
95
|
+
|
|
96
|
+
if next_thoughts:
|
|
97
|
+
# For simplicity, we take the first "best" looking next thought
|
|
98
|
+
# or could branches here if doing BFS. We stick to DFS path here.
|
|
99
|
+
best_next = next_thoughts[0]
|
|
100
|
+
rest_of_path = await self._explore_path(goal, best_next, current_depth + 1, max_depth, branches)
|
|
101
|
+
path.extend(rest_of_path)
|
|
102
|
+
|
|
103
|
+
return path
|
|
104
|
+
|
|
105
|
+
async def _select_best_path(self, goal: str, tree: List[List[str]]) -> List[str]:
|
|
106
|
+
"""Evaluate all paths and select the best one."""
|
|
107
|
+
paths_desc = "\n".join([f"Path {i+1}: {' -> '.join(path)}" for i, path in enumerate(tree)])
|
|
108
|
+
|
|
109
|
+
prompt = f"""Evaluate these reasoning paths and select the best one.
|
|
110
|
+
|
|
111
|
+
Goal: {goal}
|
|
112
|
+
|
|
113
|
+
Reasoning Paths:
|
|
114
|
+
{paths_desc}
|
|
115
|
+
|
|
116
|
+
Which path is most likely to lead to a correct and comprehensive solution?
|
|
117
|
+
Respond with ONLY the path number (e.g., 1, 2, or 3)."""
|
|
118
|
+
|
|
119
|
+
response = await self._complete(prompt)
|
|
120
|
+
|
|
121
|
+
# Parse selection
|
|
122
|
+
try:
|
|
123
|
+
import re
|
|
124
|
+
match = re.search(r'\d+', response)
|
|
125
|
+
if match:
|
|
126
|
+
selection = int(match.group(0)) - 1
|
|
127
|
+
if 0 <= selection < len(tree):
|
|
128
|
+
return tree[selection]
|
|
129
|
+
return tree[0]
|
|
130
|
+
except:
|
|
131
|
+
return tree[0]
|
|
132
|
+
|
|
133
|
+
async def _generate_answer(self, goal: str, path: List[str]) -> str:
|
|
134
|
+
"""Generate final answer from best reasoning path."""
|
|
135
|
+
reasoning = " -> ".join(path)
|
|
136
|
+
|
|
137
|
+
prompt = f"""Generate a final comprehensive answer based on this reasoning path.
|
|
138
|
+
|
|
139
|
+
Goal: {goal}
|
|
140
|
+
Detailed Reasoning: {reasoning}
|
|
141
|
+
|
|
142
|
+
Final Answer:"""
|
|
143
|
+
|
|
144
|
+
return await self._complete(prompt)
|
|
145
|
+
|
|
146
|
+
async def _complete(self, prompt: str) -> str:
|
|
147
|
+
if hasattr(self.llm, 'complete_async'):
|
|
148
|
+
return await self.llm.complete_async(prompt)
|
|
149
|
+
else:
|
|
150
|
+
import asyncio
|
|
151
|
+
return await asyncio.to_thread(self.llm.complete, prompt)
|
kite/conversation.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Conversation Manager
|
|
3
|
+
Handles multi-turn dialogue, context switching, and collaboration between agents.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import json
|
|
8
|
+
from typing import List, Dict, Optional, Any, Callable
|
|
9
|
+
from .agent import Agent
|
|
10
|
+
|
|
11
|
+
class ConversationManager:
|
|
12
|
+
"""
|
|
13
|
+
Orchestrates multi-turn dialogue between multiple agents.
|
|
14
|
+
|
|
15
|
+
Features:
|
|
16
|
+
- Multi-turn history management.
|
|
17
|
+
- Termination conditions (max_turns, consensus).
|
|
18
|
+
- Collaboration patterns (round-robin).
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self,
|
|
22
|
+
agents: List[Agent],
|
|
23
|
+
framework,
|
|
24
|
+
max_turns: int = 10,
|
|
25
|
+
min_turns: int = 3,
|
|
26
|
+
termination_condition: str = "consensus"):
|
|
27
|
+
if not agents or len(agents) == 0:
|
|
28
|
+
raise ValueError("Conversation requires at least one agent")
|
|
29
|
+
if max_turns <= 0:
|
|
30
|
+
raise ValueError("max_turns must be greater than 0")
|
|
31
|
+
|
|
32
|
+
self.agents = agents
|
|
33
|
+
self.framework = framework
|
|
34
|
+
self.max_turns = max_turns
|
|
35
|
+
self.min_turns = min_turns
|
|
36
|
+
self.termination_condition = termination_condition.lower()
|
|
37
|
+
self.history = []
|
|
38
|
+
self.logger = framework.logger
|
|
39
|
+
|
|
40
|
+
async def run(self, initial_input: str) -> Dict[str, Any]:
|
|
41
|
+
"""
|
|
42
|
+
Run the multi-turn conversation.
|
|
43
|
+
"""
|
|
44
|
+
print(f"\n[Conversation] Starting with input: {initial_input}")
|
|
45
|
+
|
|
46
|
+
current_input = initial_input
|
|
47
|
+
turn_count = 0
|
|
48
|
+
consensus_reached = False
|
|
49
|
+
|
|
50
|
+
while turn_count < self.max_turns and not consensus_reached:
|
|
51
|
+
turn_count += 1
|
|
52
|
+
# Round-robin turn taking (simplified for now)
|
|
53
|
+
agent_idx = (turn_count - 1) % len(self.agents)
|
|
54
|
+
current_agent = self.agents[agent_idx]
|
|
55
|
+
|
|
56
|
+
print(f" [Turn {turn_count}] Agent: {current_agent.name}")
|
|
57
|
+
|
|
58
|
+
# Prepare context with full history
|
|
59
|
+
context = {
|
|
60
|
+
"conversation_history": self.history,
|
|
61
|
+
"current_turn": turn_count,
|
|
62
|
+
"max_turns": self.max_turns
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
# Run agent
|
|
66
|
+
res = await current_agent.run(current_input, context=context)
|
|
67
|
+
|
|
68
|
+
if not res['success']:
|
|
69
|
+
print(f" [Error] Agent {current_agent.name} failed: {res.get('error')}")
|
|
70
|
+
break
|
|
71
|
+
|
|
72
|
+
response = res['response']
|
|
73
|
+
self.history.append({
|
|
74
|
+
"turn": turn_count,
|
|
75
|
+
"agent": current_agent.name,
|
|
76
|
+
"content": response
|
|
77
|
+
})
|
|
78
|
+
|
|
79
|
+
# Check for termination
|
|
80
|
+
if self.termination_condition == "consensus":
|
|
81
|
+
consensus_reached = await self._check_consensus(response, turn_count)
|
|
82
|
+
if consensus_reached:
|
|
83
|
+
print(f" [Consensus] Reached by {current_agent.name}")
|
|
84
|
+
|
|
85
|
+
# Set input for next agent (usually the response of the current one)
|
|
86
|
+
current_input = response
|
|
87
|
+
|
|
88
|
+
print(f"\n[Conversation] Finished after {turn_count} turns.")
|
|
89
|
+
|
|
90
|
+
return {
|
|
91
|
+
"success": True,
|
|
92
|
+
"turns": turn_count,
|
|
93
|
+
"history": self.history,
|
|
94
|
+
"final_response": self.history[-1]['content'] if self.history else "No history",
|
|
95
|
+
"termination": "consensus" if consensus_reached else "max_turns"
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
async def _check_consensus(self, last_response: str, turn_count: int) -> bool:
|
|
99
|
+
"""
|
|
100
|
+
Check if consensus or a final conclusion has been reached.
|
|
101
|
+
Only valid after min_turns.
|
|
102
|
+
"""
|
|
103
|
+
if turn_count < self.min_turns:
|
|
104
|
+
return False
|
|
105
|
+
|
|
106
|
+
# Stricter declarative markers
|
|
107
|
+
keywords = [
|
|
108
|
+
"final consensus reached",
|
|
109
|
+
"consensus reached",
|
|
110
|
+
"final conclusion:",
|
|
111
|
+
"i agree, let's reach a consensus",
|
|
112
|
+
"we have reached an agreement"
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
response_lower = last_response.lower()
|
|
116
|
+
for kw in keywords:
|
|
117
|
+
if kw in response_lower:
|
|
118
|
+
return True
|
|
119
|
+
|
|
120
|
+
return False
|
|
121
|
+
|
|
122
|
+
def get_summary(self) -> str:
|
|
123
|
+
"""Synthesize a summary of the conversation."""
|
|
124
|
+
summary = "\n".join([f"{h['agent']}: {h['content'][:100]}..." for h in self.history])
|
|
125
|
+
return summary
|