kite-agent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. kite/__init__.py +46 -0
  2. kite/ab_testing.py +384 -0
  3. kite/agent.py +556 -0
  4. kite/agents/__init__.py +3 -0
  5. kite/agents/plan_execute.py +191 -0
  6. kite/agents/react_agent.py +509 -0
  7. kite/agents/reflective_agent.py +90 -0
  8. kite/agents/rewoo.py +119 -0
  9. kite/agents/tot.py +151 -0
  10. kite/conversation.py +125 -0
  11. kite/core.py +974 -0
  12. kite/data_loaders.py +111 -0
  13. kite/embedding_providers.py +372 -0
  14. kite/llm_providers.py +1278 -0
  15. kite/memory/__init__.py +6 -0
  16. kite/memory/advanced_rag.py +333 -0
  17. kite/memory/graph_rag.py +719 -0
  18. kite/memory/session_memory.py +423 -0
  19. kite/memory/vector_memory.py +579 -0
  20. kite/monitoring.py +611 -0
  21. kite/observers.py +107 -0
  22. kite/optimization/__init__.py +9 -0
  23. kite/optimization/resource_router.py +80 -0
  24. kite/persistence.py +42 -0
  25. kite/pipeline/__init__.py +5 -0
  26. kite/pipeline/deterministic_pipeline.py +323 -0
  27. kite/pipeline/reactive_pipeline.py +171 -0
  28. kite/pipeline_manager.py +15 -0
  29. kite/routing/__init__.py +6 -0
  30. kite/routing/aggregator_router.py +325 -0
  31. kite/routing/llm_router.py +149 -0
  32. kite/routing/semantic_router.py +228 -0
  33. kite/safety/__init__.py +6 -0
  34. kite/safety/circuit_breaker.py +360 -0
  35. kite/safety/guardrails.py +82 -0
  36. kite/safety/idempotency_manager.py +304 -0
  37. kite/safety/kill_switch.py +75 -0
  38. kite/tool.py +183 -0
  39. kite/tool_registry.py +87 -0
  40. kite/tools/__init__.py +21 -0
  41. kite/tools/code_execution.py +53 -0
  42. kite/tools/contrib/__init__.py +19 -0
  43. kite/tools/contrib/calculator.py +26 -0
  44. kite/tools/contrib/datetime_utils.py +20 -0
  45. kite/tools/contrib/linkedin.py +428 -0
  46. kite/tools/contrib/web_search.py +30 -0
  47. kite/tools/mcp/__init__.py +31 -0
  48. kite/tools/mcp/database_mcp.py +267 -0
  49. kite/tools/mcp/gdrive_mcp_server.py +503 -0
  50. kite/tools/mcp/gmail_mcp_server.py +601 -0
  51. kite/tools/mcp/postgres_mcp_server.py +490 -0
  52. kite/tools/mcp/slack_mcp_server.py +538 -0
  53. kite/tools/mcp/stripe_mcp_server.py +219 -0
  54. kite/tools/search.py +90 -0
  55. kite/tools/system_tools.py +54 -0
  56. kite/tools_manager.py +27 -0
  57. kite_agent-0.1.0.dist-info/METADATA +621 -0
  58. kite_agent-0.1.0.dist-info/RECORD +61 -0
  59. kite_agent-0.1.0.dist-info/WHEEL +5 -0
  60. kite_agent-0.1.0.dist-info/licenses/LICENSE +21 -0
  61. kite_agent-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,509 @@
1
+ """
2
+ ReActAgent - Autonomous agent using the Think-Act-Observe pattern.
3
+ """
4
+
5
+ import time
6
+ import json
7
+ import asyncio
8
+ from typing import List, Dict, Optional, Any, Tuple
9
+ from ..agent import Agent
10
+ from ..safety.kill_switch import KillSwitch
11
+
12
+
13
+ class ReActAgent(Agent):
14
+ """
15
+ Autonomous agent that implements the ReAct (Reason + Act) loop.
16
+ It thinks, acts using tools, and observes results until a goal is achieved
17
+ or a safety limit is triggered.
18
+ """
19
+
20
+ def __init__(self,
21
+ name: str,
22
+ system_prompt: str,
23
+ tools: List,
24
+ framework,
25
+ llm=None,
26
+ max_iterations: int = 15,
27
+ kill_switch: Optional[KillSwitch] = None,
28
+ knowledge_sources: List[str] = None,
29
+ verbose: bool = False):
30
+ super().__init__(name, system_prompt, tools, framework, llm=llm, max_iterations=max_iterations, knowledge_sources=knowledge_sources, verbose=verbose, agent_type="react")
31
+ self.kill_switch = kill_switch or KillSwitch(max_time=600, max_iterations=15)
32
+
33
+ async def run(self, user_input: str, context: Optional[Dict] = None) -> Dict:
34
+ """Override base run to use autonomous logic."""
35
+ return await self.run_autonomous(user_input, context)
36
+
37
+ async def run_autonomous(self, goal: str, context: Optional[Dict] = None) -> Dict[str, Any]:
38
+ """
39
+ Run the agent autonomously to achieve a goal.
40
+ """
41
+ state = {
42
+ 'goal': goal,
43
+ 'steps': 0,
44
+ 'history': [], # List of {thought, action, observation}
45
+ 'confirmed_facts': [],
46
+ 'missing_info': [goal],
47
+ 'total_cost': 0.0,
48
+ 'start_time': time.time(),
49
+ 'completed': False,
50
+ 'final_answer': None,
51
+ 'context': context or {},
52
+ 'data': {} # Map of tool_name -> result
53
+ }
54
+
55
+ if self.verbose:
56
+ print(f"\n[{self.name}] Starting advanced autonomous loop for: {goal}")
57
+
58
+ while True:
59
+ # 1. Check Kill Switch
60
+ should_stop, reason = self.kill_switch.check(state)
61
+ if should_stop:
62
+ if self.verbose:
63
+ print(f"[{self.name}] Loop terminated: {reason}")
64
+ break
65
+
66
+ state['steps'] += 1
67
+ if state['steps'] > self.max_iterations:
68
+ self.logger.warning(f"[{self.name}] Max iterations reached.")
69
+ self.framework.event_bus.emit("agent:error", {"agent": self.name, "error": "Max iterations reached"})
70
+ break
71
+
72
+ # 1b. Check for repetitive failures
73
+ if len(state['history']) >= 3:
74
+ last_three = state['history'][-3:]
75
+ actions = [h['action'].get('tool') for h in last_three]
76
+ status = [h['action'].get('status') for h in last_three]
77
+ if len(set(actions)) == 1 and all(s != 'success' for s in status):
78
+ if self.verbose:
79
+ print(f" [{self.name}] Detected loop on failed action '{actions[0]}'. Terminating.")
80
+ state['completed'] = False
81
+ state['final_answer'] = f"Loop detected on failed action: {actions[0]}"
82
+ break
83
+
84
+ self.framework.event_bus.emit("agent:step", {
85
+ "agent": self.name,
86
+ "step": state['steps'],
87
+ "goal": state['goal'],
88
+ "summary": state['history'][-1]['reasoning'] if state['history'] else "Starting task..."
89
+ })
90
+
91
+ # 2. THINK & PLAN - Try native tool calling first
92
+ if self.verbose:
93
+ print(f"[{self.name}] Reflecting and deciding next action...")
94
+
95
+ # Try native tool calling
96
+ success, structured_output = await self._try_native_tool_calling(state)
97
+
98
+ # Fallback to JSON prompting if native not supported
99
+ if not success:
100
+ system_msg, user_msg = self._build_structured_prompt(state)
101
+ messages = [
102
+ {"role": "system", "content": system_msg},
103
+ {"role": "user", "content": user_msg}
104
+ ]
105
+ response = await self._get_llm_response(messages)
106
+ structured_output = self._parse_structured_output(response)
107
+
108
+ try:
109
+ # Support common aliases for keys
110
+ reasoning = structured_output.get('reasoning') or structured_output.get('thought') or structured_output.get('reflection', 'No reasoning.')
111
+ action_name = structured_output.get('tool') or structured_output.get('action') or structured_output.get('name')
112
+ action_args = structured_output.get('arguments') or structured_output.get('args') or structured_output.get('parameters', {})
113
+ is_final = structured_output.get('is_final') or structured_output.get('done', False)
114
+ confidence = structured_output.get('confidence', 1.0)
115
+
116
+ self.framework.event_bus.emit("agent:thought", {
117
+ "agent": self.name,
118
+ "reasoning": reasoning,
119
+ "confidence": confidence
120
+ })
121
+ if self.verbose:
122
+ print(f" [{self.name}] Thinking: {reasoning}")
123
+
124
+ if is_final or state['steps'] >= self.max_iterations:
125
+ state['completed'] = True
126
+ state['final_answer'] = reasoning
127
+ reason = "is_final signal" if is_final else "max_iterations reached"
128
+ if self.verbose:
129
+ print(f" [{self.name}] Mission complete ({reason}). Steps: {state['steps']}")
130
+ self.framework.event_bus.emit("agent:complete", {
131
+ "agent": self.name,
132
+ "answer": state['final_answer'],
133
+ "steps": state['steps'],
134
+ "reason": reason
135
+ })
136
+ break
137
+
138
+ # 3. ACT
139
+ if not action_name or action_name not in self.tools:
140
+ if is_final:
141
+ observation = "Goal marked as complete."
142
+ action_record = {'tool': 'None', 'args': {}, 'status': 'success'}
143
+ else:
144
+ observation = f"Error: Tool '{action_name}' not found. Please choose from: {list(self.tools.keys())} or signal is_final: true."
145
+ action_record = {'tool': action_name, 'args': action_args, 'status': 'failed'}
146
+ else:
147
+ self.framework.event_bus.emit("agent:action", {
148
+ "agent": self.name,
149
+ "tool": action_name,
150
+ "args": action_args
151
+ })
152
+ if self.verbose:
153
+ print(f" [{self.name}] Action: {action_name}({action_args})")
154
+ try:
155
+ # Correct async-safe execution
156
+ result = await self.tools[action_name].execute(**action_args, framework=self.framework)
157
+
158
+ observation = self._format_observation(result)
159
+ action_record = {'tool': action_name, 'args': action_args, 'status': 'success'}
160
+ except Exception as e:
161
+ observation = f"Error executing {action_name}: {str(e)}"
162
+ action_record = {'tool': action_name, 'args': action_args, 'status': 'error'}
163
+
164
+ if not observation or observation == "[]":
165
+ observation = "[] (WARNING: No results found for this query. Do NOT imagine data. Try a broader search or a different tool.)"
166
+
167
+ # Handling for Drifting / Missing Tools
168
+ if not action_name and not is_final:
169
+ available_tools = list(self.tools.keys())
170
+ observation = f"Error: You provided reasoning but NO tool names. Choose from: {available_tools}, or signal is_final: true."
171
+ action_record = {'tool': 'None', 'args': {}, 'status': 'failed'}
172
+
173
+ # Console Visibility for Observations
174
+ obs_preview = observation[:200].replace('\n', ' ') + "..." if len(observation) > 200 else observation
175
+ if self.verbose:
176
+ print(f" [{self.name}] Observation: {obs_preview}")
177
+
178
+ # 4. RECORD
179
+ state['history'].append({
180
+ 'step': state['steps'],
181
+ 'reasoning': reasoning,
182
+ 'action': action_record,
183
+ 'observation': observation,
184
+ 'confidence': confidence
185
+ })
186
+ # EMIT FULL OBSERVATION FOR DASHBOARD
187
+ self.framework.event_bus.emit("agent:observation", {
188
+ "agent": self.name,
189
+ "observation": observation,
190
+ "full_data": result if action_record['status'] == 'success' else None
191
+ })
192
+
193
+ # Dynamic Fact Extraction
194
+ if action_record['status'] == 'success':
195
+ state['confirmed_facts'].append(f"Result of {action_name}: {observation}")
196
+ state['data'][action_name] = result
197
+
198
+ except Exception as e:
199
+ import traceback
200
+ error_type = type(e).__name__
201
+ error_msg = str(e) or repr(e)
202
+ full_error = f"{error_type}: {error_msg}"
203
+
204
+ if self.verbose:
205
+ print(f"[{self.name}] Error in loop: {full_error}")
206
+ # Log actual error to EventBus
207
+ self.framework.event_bus.emit("agent:error", {
208
+ "agent": self.name,
209
+ "error": full_error,
210
+ "traceback": traceback.format_exc()
211
+ })
212
+
213
+ observation = f"Internal Error: {full_error}"
214
+ state['history'].append({
215
+ 'step': state['steps'],
216
+ 'reasoning': f"Self-correction: An internal error occurred ({full_error}).",
217
+ 'action': {'tool': 'recovery', 'args': {}, 'status': 'error'},
218
+ 'observation': observation
219
+ })
220
+ if state['steps'] >= self.max_iterations: break
221
+
222
+ return {
223
+ "success": state['completed'],
224
+ "response": state.get('final_answer') or "No final answer reached. This usually happens if tool results were empty or the mission was impossible.",
225
+ "goal": goal,
226
+ "steps": state['steps'],
227
+ "history": state['history'],
228
+ "agent": self.name,
229
+ "data": state['data']
230
+ }
231
+
232
+
233
+ async def _try_native_tool_calling(self, state: Dict) -> tuple:
234
+ """
235
+ Try native tool calling. Returns (success: bool, result: Dict).
236
+ If success=False, fallback to JSON prompting.
237
+ """
238
+ try:
239
+ # Build tools schema for OpenAI/Groq format
240
+ tools_schemas = []
241
+ for name, tool in self.tools.items():
242
+ tools_schemas.append(tool.to_schema())
243
+
244
+ # Build simple messages
245
+ system_msg = f"{self.system_prompt}\n\nHelp with: {state['goal']}"
246
+
247
+ history_text = ""
248
+ if state['history']:
249
+ recent = state['history'][-5:]
250
+ for h in recent:
251
+ obs_summary = str(h['observation'])[:100]
252
+ history_text += f"\nStep {h['step']}: {h['action'].get('tool')} -> {obs_summary}"
253
+
254
+ user_msg = f"Goal: {state['goal']}\nSteps: {state['steps']}\n{history_text or 'Starting...'}\n\nNext action?"
255
+
256
+ messages = [
257
+ {"role": "system", "content": system_msg},
258
+ {"role": "user", "content": user_msg}
259
+ ]
260
+
261
+ # Try calling with tools parameter
262
+ response = await self.llm.chat_async(messages, tools=tools_schemas, temperature=0.1)
263
+
264
+ # Handle native tool call response
265
+ if isinstance(response, dict) and response.get('tool_calls'):
266
+ tool_call = response['tool_calls'][0]
267
+ function = tool_call.get('function', {})
268
+ import json
269
+ args_str = function.get('arguments', '{}')
270
+ # Parse arguments if string
271
+ if isinstance(args_str, str):
272
+ args = json.loads(args_str)
273
+ else:
274
+ args = args_str
275
+
276
+ return True, {
277
+ "reasoning": response.get('content') or "Using tool",
278
+ "tool": function.get('name'),
279
+ "arguments": args,
280
+ "is_final": False
281
+ }
282
+
283
+ # No tool calls = final answer
284
+ content = response if isinstance(response, str) else response.get('content', '')
285
+ return True, {
286
+ "reasoning": content,
287
+ "is_final": True
288
+ }
289
+
290
+ except (TypeError, AttributeError, KeyError) as e:
291
+ # Native tool calling not supported
292
+ if self.verbose and state['steps'] == 1:
293
+ print(f" [{self.name}] Native tool calling not supported, using JSON prompting")
294
+ return False, {}
295
+
296
+ def _build_structured_prompt(self, state: Dict) -> Tuple[str, str]:
297
+ tool_desc = ""
298
+ for n, t in self.tools.items():
299
+ tool_desc += f"- {n}: {t.description}\n"
300
+
301
+ # Build advanced reflection context (more concise)
302
+ history_text = ""
303
+ if state['history']:
304
+ history_text = "\n### Execution History (Recap)\n"
305
+ # Limit history to last 15 steps to prevent token bloat
306
+ recent_history = state['history'][-15:]
307
+ for h in recent_history:
308
+ # Truncate observation for prompt efficiency
309
+ obs = h['observation']
310
+
311
+ # SMART SUMMARY: If it's a list (common for search results), show the count
312
+ if obs.strip().startswith('[') and obs.strip().endswith(']'):
313
+ try:
314
+ data = json.loads(obs)
315
+ if isinstance(data, list):
316
+ obs_summary = f"[Found {len(data)} items]"
317
+ else:
318
+ obs_summary = obs[:200].replace('\n', ' ') + "..."
319
+ except:
320
+ obs_summary = obs[:200].replace('\n', ' ') + "..."
321
+ else:
322
+ obs_summary = obs[:200].replace('\n', ' ') + "..."
323
+
324
+ history_text += f"\nStep {h['step']}: {h['action'].get('tool')} -> {obs_summary}\n"
325
+
326
+ system_msg = f"""
327
+ {self.system_prompt}
328
+
329
+ ## 🏗 OUTPUT FORMAT REQUIREMENTS
330
+ You MUST respond using ONLY the following JSON structure.
331
+ DO NOT simply describe your actions in text—you MUST call a tool via the JSON "tool" field or set "is_final": true.
332
+
333
+ {{
334
+ "reasoning": "Specifically analyze what was found and what is missing. BE CONCISE.",
335
+ "tool": "tool_name",
336
+ "arguments": {{"param_name": "value"}},
337
+ "is_final": false,
338
+ "answer": "Only provide this if is_final is true."
339
+ }}
340
+
341
+ ## 🛠 Available Tools:
342
+ {tool_desc}
343
+ """
344
+
345
+ user_msg = f"""
346
+ ## 🎯 CURRENT MISSION
347
+ Goal: {state['goal']}
348
+ Total steps taken so far: {state['steps']}
349
+ Total tools calls made: {len([h for h in state['history'] if h['action'].get('tool') and h['action']['tool'] != 'None'])}
350
+
351
+ {history_text}
352
+
353
+ {self._get_native_knowledge(state['goal'])}
354
+
355
+ ## 🚀 YOUR NEXT ACTION:
356
+ Choose the next tool to call or provide the final answer. Remember: If you do not provide the JSON 'tool' field, your action will NOT be executed.
357
+ """
358
+ return system_msg, user_msg
359
+
360
+ async def _get_llm_response(self, messages: List[Dict]) -> str:
361
+ stop_tokens = ["\nObservation:", "\nThought:", "\nAction:", "\nStep"]
362
+ # Use chat_async if available, as it's better for Instruct models
363
+ if hasattr(self.llm, 'chat_async'):
364
+ # Note: format='json' is not supported by all providers (e.g., Groq)
365
+ # Rely on system prompt instructions for JSON formatting instead
366
+ return await self.llm.chat_async(messages, temperature=0.1, stop=stop_tokens)
367
+
368
+ # Fallback to single string if provider doesn't support chat
369
+ full_text = "\n\n".join([f"### {m['role'].upper()}\n{m['content']}" for m in messages])
370
+ if hasattr(self.llm, 'complete_async'):
371
+ return await self.llm.complete_async(full_text, temperature=0.1, stop=stop_tokens)
372
+ import asyncio
373
+ return await asyncio.to_thread(self.llm.complete, full_text, temperature=0.1, stop=stop_tokens)
374
+
375
+ def _clean_json(self, s: str) -> str:
376
+ """Handle common LLM JSON formatting errors."""
377
+ import re
378
+ # Remove markdown code blocks
379
+ s = re.sub(r'```json\s*(.*?)\s*```', r'\1', s, flags=re.DOTALL)
380
+ s = re.sub(r'```\s*(.*?)\s*```', r'\1', s, flags=re.DOTALL)
381
+
382
+ # Remove DeepSeek-style thought blocks
383
+ s = re.sub(r'<think>.*?</think>', '', s, flags=re.DOTALL)
384
+
385
+ # Balance braces if truncated
386
+ open_braces = s.count('{')
387
+ close_braces = s.count('}')
388
+ if open_braces > close_braces:
389
+ s += '}' * (open_braces - close_braces)
390
+
391
+ # Fix single quotes to double quotes for keys/values
392
+ # This is risky but helpful for some models
393
+ # s = re.sub(r"'(.*?)'", r'"\1"', s)
394
+
395
+ # Remove trailing commas before closing braces/brackets
396
+ s = re.sub(r',\s*\}', '}', s)
397
+ s = re.sub(r',\s*\]', ']', s)
398
+
399
+ return s.strip()
400
+
401
+ def _parse_structured_output(self, response: str) -> Dict:
402
+ try:
403
+ clean_res = self._clean_json(response)
404
+ # Find the FIRST '{' and the LAST '}' to extract potential JSON block
405
+ start_idx = clean_res.find('{')
406
+ if start_idx == -1:
407
+ return {"reasoning": f"No JSON found. Raw: {response[:50]}", "is_final": False}
408
+
409
+ # Find the matching closing brace for the FIRST object to handle "Extra data"
410
+ depth = 0
411
+ end_idx = -1
412
+ for i, char in enumerate(clean_res[start_idx:], start=start_idx):
413
+ if char == '{':
414
+ depth += 1
415
+ elif char == '}':
416
+ depth -= 1
417
+ if depth == 0:
418
+ end_idx = i
419
+ break
420
+
421
+ if end_idx == -1:
422
+ # Fallback to rfind if depth counting fails
423
+ end_idx = clean_res.rfind('}')
424
+
425
+ json_str = clean_res[start_idx:end_idx+1]
426
+ data = json.loads(json_str)
427
+ if 'reasoning' not in data and 'thought' in data:
428
+ data['reasoning'] = data['thought']
429
+ return data
430
+
431
+ except Exception as e:
432
+ # Fallback A: Try regex finding the first {...} block
433
+ try:
434
+ import re
435
+ # Pre-clean the response to handle unescaped newlines within JSON strings
436
+ # This is a common issue with smaller LLMs
437
+ fixed_response = re.sub(r'(?<=: ")(.*?)(?=",)', lambda m: m.group(1).replace('\n', ' '), response, flags=re.DOTALL)
438
+
439
+ match = re.search(r'\{.*\}', fixed_response, re.DOTALL)
440
+ if match:
441
+ json_str = match.group()
442
+ # Fix truncated JSON if possible
443
+ if json_str.count('{') > json_str.count('}'):
444
+ json_str += '}' * (json_str.count('{') - json_str.count('}'))
445
+ return json.loads(json_str)
446
+ except: pass
447
+
448
+ print(f"[{self.name}] JSON Parse Error: {e}")
449
+ return {"reasoning": f"Parse Error: {str(e)}. Try to strictly follow the JSON output format.", "is_final": False}
450
+
451
+ def _get_native_knowledge(self, goal: str) -> str:
452
+ """Fetch expert templates from all authorized knowledge sources."""
453
+ if not hasattr(self.framework, 'knowledge'):
454
+ return "No knowledge store available."
455
+
456
+ if not self.knowledge_sources:
457
+ return "No authorized knowledge sources for this agent."
458
+
459
+ knowledge_text = ""
460
+ for source_name in self.knowledge_sources:
461
+ data = self.framework.knowledge.get(source_name)
462
+ if not data:
463
+ continue
464
+
465
+ matched = []
466
+ categories = []
467
+ for key, val in data.items():
468
+ categories.append(key)
469
+
470
+ # Robust matching: handle underscores, hyphens, and semantic overlaps
471
+ norm_key = key.lower().replace("_", " ")
472
+ norm_goal = goal.lower().replace("-", " ")
473
+
474
+ # Check for direct inclusion or word overlap
475
+ is_match = norm_key in norm_goal or norm_goal in norm_key
476
+ if not is_match:
477
+ # check if any major word from key is in goal (minimum 3 chars)
478
+ key_words = [w for w in norm_key.split() if len(w) >= 3]
479
+ if any(w in norm_goal for w in key_words):
480
+ is_match = True
481
+
482
+ if is_match:
483
+ if isinstance(val, list):
484
+ val_str = "\n ".join([f'- {v}' for v in val])
485
+ else:
486
+ val_str = str(val)
487
+ matched.append(f"### {key.replace('_', ' ').upper()}:\n {val_str}")
488
+
489
+ if matched:
490
+ # Increase limit to 10 to allow more breadth for complex missions
491
+ if len(matched) > 10:
492
+ matched = matched[:10]
493
+ knowledge_text += f"\n## 🧠 KNOWLEDGE BASE ({source_name}):\n"
494
+ knowledge_text += "\n\n".join(matched) + "\n"
495
+
496
+ # Always provide a list of all categories to encourage the agent to explore
497
+ knowledge_text += f"\n## 📂 ALL EXPERTISE CATEGORIES ({source_name}):\n"
498
+ knowledge_text += f"Available: {', '.join(categories)}. "
499
+ knowledge_text += "You are encouraged to use queries from any of these if the current ones fail.\n"
500
+
501
+ return knowledge_text if knowledge_text else "No expert templates found."
502
+
503
+ def _format_observation(self, result: Any) -> str:
504
+ if isinstance(result, (dict, list)):
505
+ try:
506
+ return json.dumps(result, indent=2)
507
+ except:
508
+ return str(result)
509
+ return str(result)
@@ -0,0 +1,90 @@
1
+ import logging
2
+ from typing import Any, Dict, List, Optional
3
+ from ..agent import Agent
4
+ from ..core import Kite
5
+
6
+ class ReflectiveAgent(Agent):
7
+ """
8
+ An agent that implements the Reflection pattern.
9
+ It generates an initial response, critiques it, and then refines it.
10
+ """
11
+ def __init__(self,
12
+ name: str,
13
+ system_prompt: str,
14
+ tools: List = None,
15
+ framework: Optional[Kite] = None,
16
+ llm: Any = None,
17
+ critic_prompt: str = None,
18
+ max_reflections: int = 1,
19
+ verbose: bool = False):
20
+ super().__init__(name, system_prompt, tools, framework, llm=llm, verbose=verbose)
21
+ self.logger = logging.getLogger(name)
22
+ self.critic_prompt = critic_prompt or (
23
+ "You are a critical reviewer. Analyze the previous response for accuracy, "
24
+ "completeness, and adherence to instructions. "
25
+ "Identify specific flaws or missing information. "
26
+ "If the response is perfect, simply say 'PERFECT'. "
27
+ "Otherwise, provide a concise critique."
28
+ )
29
+ self.max_reflections = max_reflections
30
+
31
+ async def run(self, input_text: str, context: Optional[Dict] = None) -> Dict:
32
+ """
33
+ Execute the improved Agentic Reflection Loop:
34
+ 1. Generate initial response
35
+ 2. Critique (Reflection)
36
+ 3. Refine (Self-Correction) relative to critique
37
+ """
38
+ # 1. Initial Generation
39
+ self.logger.info(f"[{self.name}] Generating initial response...")
40
+ initial_result = await super().run(input_text, context)
41
+
42
+ if not initial_result.get("success"):
43
+ return initial_result
44
+
45
+ current_response = initial_result.get("response")
46
+
47
+ # 2. Reflection Loop
48
+ for i in range(self.max_reflections):
49
+ self.logger.info(f"[{self.name}] Reflection cycle {i+1}/{self.max_reflections}")
50
+
51
+ # A. Critique
52
+ critique_input = [
53
+ {"role": "system", "content": self.critic_prompt},
54
+ {"role": "user", "content": f"Original Request: {input_text}\n\nProposed Response: {current_response}"}
55
+ ]
56
+
57
+ # We use the same LLM for critique for now (Self-Reflection)
58
+ # In advanced usage, this could be a stronger model.
59
+ critique_response = self.llm.chat(critique_input)
60
+
61
+ if "PERFECT" in critique_response.upper():
62
+ self.logger.info(f"[{self.name}] Critique passed: Response is good.")
63
+ break
64
+
65
+ self.logger.info(f"[{self.name}] Critique: {critique_response}")
66
+
67
+ # Emit reflection event
68
+ if self.framework and self.framework.event_bus:
69
+ self.framework.event_bus.emit(f"agent:{self.name}:reflection", {
70
+ "cycle": i+1,
71
+ "critique": critique_response
72
+ })
73
+
74
+ # B. Refine
75
+ refinement_input = [
76
+ {"role": "system", "content": self.system_prompt},
77
+ {"role": "model", "content": current_response},
78
+ {"role": "user", "content": f"Critique: {critique_response}\n\nPlease regenerate the response, addressing the critique above."}
79
+ ]
80
+
81
+ refined_response = self.llm.chat(refinement_input)
82
+ current_response = refined_response
83
+
84
+ self.logger.info(f"[{self.name}] Refined response generated.")
85
+
86
+ return {
87
+ "success": True,
88
+ "response": current_response,
89
+ "history": self.history # Returns the full thought process
90
+ }