ambivo-agents 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,91 @@
1
+ # ambivo_agents/__init__.py
2
+ """
3
+ Ambivo Agents Framework
4
+ A minimalistic agent framework for building AI applications.
5
+ """
6
+
7
+ __version__ = "1.0.0"
8
+
9
+ # Core imports
10
+ from .core.base import (
11
+ AgentRole,
12
+ MessageType,
13
+ AgentMessage,
14
+ AgentTool,
15
+ ExecutionContext,
16
+ BaseAgent,
17
+ ProviderConfig,
18
+ ProviderTracker,
19
+ AgentSession
20
+ )
21
+
22
+ from .core.memory import (
23
+ MemoryManagerInterface,
24
+ RedisMemoryManager,
25
+ create_redis_memory_manager
26
+ )
27
+
28
+ from .core.llm import (
29
+ LLMServiceInterface,
30
+ MultiProviderLLMService,
31
+ create_multi_provider_llm_service
32
+ )
33
+
34
+ # Service imports
35
+ from .services.factory import AgentFactory
36
+ from .services.agent_service import AgentService, create_agent_service
37
+
38
+ # Agent imports
39
+ from .agents.assistant import AssistantAgent
40
+ from .agents.code_executor import CodeExecutorAgent
41
+ from .agents.knowledge_base import KnowledgeBaseAgent
42
+ from .agents.web_search import WebSearchAgent
43
+ from .agents.web_scraper import WebScraperAgent
44
+ from .agents.media_editor import MediaEditorAgent
45
+ from .agents.youtube_download import YouTubeDownloadAgent
46
+ from .agents.moderator import ModeratorAgent
47
+
48
+ # Configuration
49
+ from .config.loader import load_config, ConfigurationError
50
+
51
+ __all__ = [
52
+ # Core
53
+ "AgentRole",
54
+ "MessageType",
55
+ "AgentMessage",
56
+ "AgentTool",
57
+ "ExecutionContext",
58
+ "BaseAgent",
59
+ "ProviderConfig",
60
+ "ProviderTracker",
61
+ "AgentSession",
62
+
63
+ # Memory
64
+ "MemoryManagerInterface",
65
+ "RedisMemoryManager",
66
+ "create_redis_memory_manager",
67
+
68
+ # LLM
69
+ "LLMServiceInterface",
70
+ "MultiProviderLLMService",
71
+ "create_multi_provider_llm_service",
72
+
73
+ # Services
74
+ "AgentFactory",
75
+ "AgentService",
76
+ "create_agent_service",
77
+
78
+ # Agents
79
+ "AssistantAgent",
80
+ "CodeExecutorAgent",
81
+ "KnowledgeBaseAgent",
82
+ "WebSearchAgent",
83
+ "WebScraperAgent",
84
+ "MediaEditorAgent",
85
+ "YouTubeDownloadAgent",
86
+ "ModeratorAgent",
87
+
88
+ # Configuration
89
+ "load_config",
90
+ "ConfigurationError"
91
+ ]
@@ -0,0 +1,21 @@
1
+ # ambivo_agents/agents/__init__.py
2
+ from .assistant import AssistantAgent
3
+ from .code_executor import CodeExecutorAgent
4
+ from .knowledge_base import KnowledgeBaseAgent
5
+ from .web_search import WebSearchAgent
6
+ from .web_scraper import WebScraperAgent
7
+ from .media_editor import MediaEditorAgent
8
+ from .youtube_download import YouTubeDownloadAgent
9
+ from .moderator import ModeratorAgent
10
+
11
+ __all__ = [
12
+ "AssistantAgent",
13
+ "CodeExecutorAgent",
14
+ "KnowledgeBaseAgent",
15
+ "WebSearchAgent",
16
+ "WebScraperAgent",
17
+ "MediaEditorAgent",
18
+ "YouTubeDownloadAgent",
19
+ "ModeratorAgent"
20
+ ]
21
+
@@ -0,0 +1,203 @@
1
+ # AssistantAgent with BaseAgentHistoryMixin
2
+ import json
3
+ import uuid
4
+ from typing import Dict, Any
5
+
6
+ from ambivo_agents import BaseAgent, AgentRole, ExecutionContext, AgentMessage, MessageType
7
+ from ambivo_agents.core.history import BaseAgentHistoryMixin
8
+
9
+
10
+ class AssistantAgent(BaseAgent, BaseAgentHistoryMixin):
11
+ """General purpose assistant agent with conversation history"""
12
+
13
+ def __init__(self, agent_id: str = None, memory_manager=None, llm_service=None, **kwargs):
14
+ if agent_id is None:
15
+ agent_id = f"assistant_{str(uuid.uuid4())[:8]}"
16
+
17
+ super().__init__(
18
+ agent_id=agent_id,
19
+ role=AgentRole.ASSISTANT,
20
+ memory_manager=memory_manager,
21
+ llm_service=llm_service,
22
+ name="Assistant Agent",
23
+ description="General purpose assistant for user interactions",
24
+ **kwargs
25
+ )
26
+
27
+ # Initialize history mixin
28
+ self.setup_history_mixin()
29
+
30
+ async def _analyze_intent(self, user_message: str, conversation_context: str = "") -> Dict[str, Any]:
31
+ """Analyze user intent with conversation context"""
32
+ if not self.llm_service:
33
+ return self._keyword_based_analysis(user_message)
34
+
35
+ prompt = f"""
36
+ Analyze this user message in the context of general assistance:
37
+
38
+ Conversation Context:
39
+ {conversation_context}
40
+
41
+ Current User Message: {user_message}
42
+
43
+ Respond in JSON format:
44
+ {{
45
+ "primary_intent": "question|request|clarification|continuation|greeting|farewell",
46
+ "requires_context": true/false,
47
+ "context_reference": "what user is referring to",
48
+ "topic": "main subject area",
49
+ "confidence": 0.0-1.0
50
+ }}
51
+ """
52
+
53
+ try:
54
+ response = await self.llm_service.generate_response(prompt)
55
+ import re
56
+ json_match = re.search(r'\{.*\}', response, re.DOTALL)
57
+ if json_match:
58
+ return json.loads(json_match.group())
59
+ else:
60
+ # Fallback if LLM doesn't return JSON
61
+ return self._keyword_based_analysis(user_message)
62
+ except Exception as e:
63
+ return self._keyword_based_analysis(user_message)
64
+
65
+ def _keyword_based_analysis(self, user_message: str) -> Dict[str, Any]:
66
+ """Fallback keyword-based analysis"""
67
+ content_lower = user_message.lower()
68
+
69
+ if any(word in content_lower for word in ['hello', 'hi', 'hey']):
70
+ intent = 'greeting'
71
+ elif any(word in content_lower for word in ['bye', 'goodbye', 'thanks']):
72
+ intent = 'farewell'
73
+ elif any(word in content_lower for word in ['what', 'how', 'why', 'when', 'where']):
74
+ intent = 'question'
75
+ elif any(word in content_lower for word in ['can you', 'please', 'help me']):
76
+ intent = 'request'
77
+ elif any(word in content_lower for word in ['that', 'this', 'it', 'previous']):
78
+ intent = 'continuation'
79
+ else:
80
+ intent = 'question'
81
+
82
+ return {
83
+ "primary_intent": intent,
84
+ "requires_context": any(word in content_lower for word in ['that', 'this', 'it', 'previous']),
85
+ "context_reference": None,
86
+ "topic": "general",
87
+ "confidence": 0.7
88
+ }
89
+
90
+ def _get_conversation_context_summary(self) -> str:
91
+ """Get conversation context summary"""
92
+ try:
93
+ recent_history = self.get_conversation_history_with_context(limit=3)
94
+ context_summary = []
95
+
96
+ for msg in recent_history:
97
+ if msg.get('message_type') == 'user_input':
98
+ content = msg.get('content', '')
99
+ context_summary.append(f"User said: {content[:50]}...")
100
+ elif msg.get('message_type') == 'agent_response':
101
+ content = msg.get('content', '')
102
+ context_summary.append(f"I responded: {content[:50]}...")
103
+
104
+ return "\n".join(context_summary) if context_summary else "No previous conversation"
105
+ except:
106
+ return "No previous conversation"
107
+
108
+ async def _route_request(self, intent_analysis: Dict[str, Any], user_message: str,
109
+ context: ExecutionContext) -> str:
110
+ """Route request based on intent analysis"""
111
+ primary_intent = intent_analysis.get("primary_intent", "question")
112
+ requires_context = intent_analysis.get("requires_context", False)
113
+
114
+ # Route based on primary intent
115
+ if primary_intent == "greeting":
116
+ return "Hello! How can I assist you today?"
117
+
118
+ elif primary_intent == "farewell":
119
+ return "Thank you for using the assistant. Have a great day!"
120
+
121
+ elif primary_intent == "continuation":
122
+ # Handle context references
123
+ conversation_context = self._get_conversation_context_summary()
124
+ if self.llm_service:
125
+ prompt = f"""The user is referring to our previous conversation. Provide a helpful response based on the context.
126
+
127
+ Previous conversation:
128
+ {conversation_context}
129
+
130
+ Current user message: {user_message}
131
+
132
+ Please provide a helpful, contextual response."""
133
+ return await self.llm_service.generate_response(prompt)
134
+ else:
135
+ return f"I understand you're referring to our previous conversation. Could you provide more specific details about what you'd like help with?"
136
+
137
+ elif primary_intent in ["question", "request"]:
138
+ # Build context-aware prompt for LLM
139
+ if requires_context:
140
+ conversation_context = self._get_conversation_context_summary()
141
+ context_prompt = f"\n\nConversation context:\n{conversation_context}\n\n"
142
+ else:
143
+ context_prompt = ""
144
+
145
+ if self.llm_service:
146
+ prompt = f"""You are a helpful assistant. Respond to this user message appropriately.{context_prompt}User message: {user_message}
147
+
148
+ Please provide a helpful, accurate, and contextual response."""
149
+ return await self.llm_service.generate_response(prompt)
150
+ else:
151
+ if requires_context:
152
+ return f"I understand you're asking about something related to our previous conversation. How can I help you with '{user_message}'?"
153
+ else:
154
+ return f"I understand you said: '{user_message}'. How can I help you with that?"
155
+
156
+ else:
157
+ # Default handling
158
+ if self.llm_service:
159
+ prompt = f"""You are a helpful assistant. Respond to this user message: {user_message}"""
160
+ return await self.llm_service.generate_response(prompt)
161
+ else:
162
+ return f"I understand you said: '{user_message}'. How can I help you with that?"
163
+
164
+ async def process_message(self, message: AgentMessage, context: ExecutionContext = None) -> AgentMessage:
165
+ """Process user requests with conversation history"""
166
+ self.memory.store_message(message)
167
+
168
+ try:
169
+ user_message = message.content
170
+
171
+ # Update conversation state
172
+ self.update_conversation_state(user_message)
173
+
174
+ # Get conversation context for LLM analysis
175
+ conversation_context = self._get_conversation_context_summary()
176
+
177
+ # Use LLM to analyze intent
178
+ intent_analysis = await self._analyze_intent(user_message, conversation_context)
179
+
180
+ # Route request based on LLM analysis
181
+ response_content = await self._route_request(intent_analysis, user_message, context)
182
+
183
+ response = self.create_response(
184
+ content=response_content,
185
+ recipient_id=message.sender_id,
186
+ session_id=message.session_id,
187
+ conversation_id=message.conversation_id
188
+ )
189
+
190
+ self.memory.store_message(response)
191
+ return response
192
+
193
+ except Exception as e:
194
+ error_response = self.create_response(
195
+ content=f"I encountered an error processing your request: {str(e)}",
196
+ recipient_id=message.sender_id,
197
+ message_type=MessageType.ERROR,
198
+ session_id=message.session_id,
199
+ conversation_id=message.conversation_id
200
+ )
201
+ return error_response
202
+
203
+
@@ -0,0 +1,133 @@
1
+ # ambivo_agents/agents/code_executor.py
2
+ """
3
+ Code Executor Agent for running code in secure Docker containers.
4
+ """
5
+
6
+ import logging
7
+ import uuid
8
+ from typing import Dict, Any
9
+
10
+ from ..core.base import BaseAgent, AgentRole, AgentMessage, MessageType, ExecutionContext, AgentTool, DockerCodeExecutor
11
+ from ..config.loader import load_config, get_config_section
12
+
13
+
14
+ class CodeExecutorAgent(BaseAgent):
15
+ """Agent specialized in code execution"""
16
+
17
+ def __init__(self, agent_id: str=None, memory_manager=None, llm_service=None, **kwargs):
18
+ if agent_id is None:
19
+ agent_id = f"code_executor_{str(uuid.uuid4())[:8]}"
20
+
21
+ super().__init__(
22
+ agent_id=agent_id,
23
+ role=AgentRole.CODE_EXECUTOR,
24
+ memory_manager=memory_manager,
25
+ llm_service=llm_service,
26
+ name="Code Executor Agent",
27
+ description="Agent for secure code execution using Docker containers",
28
+ **kwargs
29
+ )
30
+
31
+ # Load Docker configuration from YAML
32
+ try:
33
+ config = load_config()
34
+ docker_config = config.get('docker', {})
35
+ except Exception as e:
36
+ logging.warning(f"Could not load Docker config from YAML: {e}")
37
+ docker_config = {}
38
+
39
+ self.docker_executor = DockerCodeExecutor(docker_config)
40
+ self._add_code_tools()
41
+
42
+ def _add_code_tools(self):
43
+ """Add code execution tools"""
44
+ self.add_tool(AgentTool(
45
+ name="execute_python",
46
+ description="Execute Python code in a secure Docker container",
47
+ function=self._execute_python_code,
48
+ parameters_schema={
49
+ "type": "object",
50
+ "properties": {
51
+ "code": {"type": "string", "description": "Python code to execute"},
52
+ "files": {"type": "object", "description": "Additional files needed"}
53
+ },
54
+ "required": ["code"]
55
+ }
56
+ ))
57
+
58
+ self.add_tool(AgentTool(
59
+ name="execute_bash",
60
+ description="Execute bash commands in a secure Docker container",
61
+ function=self._execute_bash_code,
62
+ parameters_schema={
63
+ "type": "object",
64
+ "properties": {
65
+ "code": {"type": "string", "description": "Bash commands to execute"},
66
+ "files": {"type": "object", "description": "Additional files needed"}
67
+ },
68
+ "required": ["code"]
69
+ }
70
+ ))
71
+
72
+ async def _execute_python_code(self, code: str, files: Dict[str, str] = None) -> Dict[str, Any]:
73
+ """Execute Python code safely"""
74
+ return self.docker_executor.execute_code(code, "python", files)
75
+
76
+ async def _execute_bash_code(self, code: str, files: Dict[str, str] = None) -> Dict[str, Any]:
77
+ """Execute bash commands safely"""
78
+ return self.docker_executor.execute_code(code, "bash", files)
79
+
80
+ async def process_message(self, message: AgentMessage, context: ExecutionContext=None) -> AgentMessage:
81
+ """Process code execution requests"""
82
+ self.memory.store_message(message)
83
+
84
+ try:
85
+ content = message.content
86
+
87
+ if "```python" in content:
88
+ code_start = content.find("```python") + 9
89
+ code_end = content.find("```", code_start)
90
+ code = content[code_start:code_end].strip()
91
+
92
+ result = await self._execute_python_code(code)
93
+
94
+ if result['success']:
95
+ response_content = f"Code executed successfully:\n\n```\n{result['output']}\n```\n\nExecution time: {result['execution_time']:.2f}s"
96
+ else:
97
+ response_content = f"Code execution failed:\n\n```\n{result['error']}\n```"
98
+
99
+ elif "```bash" in content:
100
+ code_start = content.find("```bash") + 7
101
+ code_end = content.find("```", code_start)
102
+ code = content[code_start:code_end].strip()
103
+
104
+ result = await self._execute_bash_code(code)
105
+
106
+ if result['success']:
107
+ response_content = f"Commands executed successfully:\n\n```\n{result['output']}\n```\n\nExecution time: {result['execution_time']:.2f}s"
108
+ else:
109
+ response_content = f"Command execution failed:\n\n```\n{result['error']}\n```"
110
+
111
+ else:
112
+ response_content = "Please provide code wrapped in ```python or ```bash code blocks for execution."
113
+
114
+ response = self.create_response(
115
+ content=response_content,
116
+ recipient_id=message.sender_id,
117
+ session_id=message.session_id,
118
+ conversation_id=message.conversation_id
119
+ )
120
+
121
+ self.memory.store_message(response)
122
+ return response
123
+
124
+ except Exception as e:
125
+ logging.error(f"Code executor error: {e}")
126
+ error_response = self.create_response(
127
+ content=f"Error in code execution: {str(e)}",
128
+ recipient_id=message.sender_id,
129
+ message_type=MessageType.ERROR,
130
+ session_id=message.session_id,
131
+ conversation_id=message.conversation_id
132
+ )
133
+ return error_response
@@ -0,0 +1,222 @@
1
+ # CodeExecutorAgent with BaseAgentHistoryMixin
2
+ import re
3
+ import uuid
4
+
5
+ import json
6
+ import uuid
7
+ from typing import Dict, Any
8
+
9
+ from ambivo_agents import BaseAgent, AgentRole, ExecutionContext, AgentMessage, MessageType, load_config
10
+ from ambivo_agents.core.history import BaseAgentHistoryMixin, ContextType
11
+ from ambivo_agents.executors import DockerCodeExecutor
12
+
13
+
14
+ class CodeExecutorAgent(BaseAgent, BaseAgentHistoryMixin):
15
+ """Agent specialized in code execution with execution history"""
16
+
17
+ def __init__(self, agent_id: str = None, memory_manager=None, llm_service=None, **kwargs):
18
+ if agent_id is None:
19
+ agent_id = f"code_executor_{str(uuid.uuid4())[:8]}"
20
+
21
+ super().__init__(
22
+ agent_id=agent_id,
23
+ role=AgentRole.CODE_EXECUTOR,
24
+ memory_manager=memory_manager,
25
+ llm_service=llm_service,
26
+ name="Code Executor Agent",
27
+ description="Agent for secure code execution using Docker containers",
28
+ **kwargs
29
+ )
30
+
31
+ # Initialize history mixin
32
+ self.setup_history_mixin()
33
+
34
+ # Load Docker configuration from YAML
35
+ try:
36
+ config = load_config()
37
+ docker_config = config.get('docker', {})
38
+ except Exception as e:
39
+ docker_config = {}
40
+
41
+ self.docker_executor = DockerCodeExecutor(docker_config)
42
+ self._add_code_tools()
43
+
44
+ # Add code-specific context extractors
45
+ self.register_context_extractor(
46
+ ContextType.CODE_REFERENCE,
47
+ lambda text: re.findall(r'```(?:python|bash|javascript)?\n?(.*?)\n?```', text, re.DOTALL)
48
+ )
49
+
50
+ async def _analyze_intent(self, user_message: str, conversation_context: str = "") -> Dict[str, Any]:
51
+ """Analyze code execution intent with previous execution context"""
52
+ if not self.llm_service:
53
+ return self._keyword_based_analysis(user_message)
54
+
55
+ prompt = f"""
56
+ Analyze this user message in the context of code execution:
57
+
58
+ Previous Execution Context:
59
+ {conversation_context}
60
+
61
+ Current User Message: {user_message}
62
+
63
+ Respond in JSON format:
64
+ {{
65
+ "primary_intent": "execute_code|modify_code|debug_code|explain_code|continue_execution",
66
+ "language": "python|bash|javascript",
67
+ "references_previous": true/false,
68
+ "code_blocks": ["extracted code"],
69
+ "execution_type": "new|modification|continuation",
70
+ "confidence": 0.0-1.0
71
+ }}
72
+ """
73
+
74
+ try:
75
+ response = await self.llm_service.generate_response(prompt)
76
+ import re
77
+ json_match = re.search(r'\{.*\}', response, re.DOTALL)
78
+ if json_match:
79
+ return json.loads(json_match.group())
80
+ else:
81
+ return self._extract_intent_from_llm_response(response, user_message)
82
+ except Exception as e:
83
+ return self._keyword_based_analysis(user_message)
84
+
85
+ def _keyword_based_analysis(self, user_message: str) -> Dict[str, Any]:
86
+ """Fallback keyword-based analysis for code execution"""
87
+ content_lower = user_message.lower()
88
+
89
+ if "```python" in user_message:
90
+ intent = 'execute_code'
91
+ language = 'python'
92
+ elif "```bash" in user_message:
93
+ intent = 'execute_code'
94
+ language = 'bash'
95
+ elif any(word in content_lower for word in ['run', 'execute', 'test']):
96
+ intent = 'execute_code'
97
+ language = 'python'
98
+ elif any(word in content_lower for word in ['modify', 'change', 'update']):
99
+ intent = 'modify_code'
100
+ language = 'python'
101
+ elif any(word in content_lower for word in ['debug', 'fix', 'error']):
102
+ intent = 'debug_code'
103
+ language = 'python'
104
+ else:
105
+ intent = 'execute_code'
106
+ language = 'python'
107
+
108
+ # Extract code blocks
109
+ code_blocks = re.findall(r'```(?:python|bash)?\n?(.*?)\n?```', user_message, re.DOTALL)
110
+
111
+ return {
112
+ "primary_intent": intent,
113
+ "language": language,
114
+ "references_previous": any(word in content_lower for word in ['that', 'previous', 'last', 'again']),
115
+ "code_blocks": code_blocks,
116
+ "execution_type": "new",
117
+ "confidence": 0.8
118
+ }
119
+
120
+ def _get_conversation_context_summary(self) -> str:
121
+ """Get code execution context summary"""
122
+ try:
123
+ recent_history = self.get_conversation_history_with_context(
124
+ limit=3,
125
+ context_types=[ContextType.CODE_REFERENCE]
126
+ )
127
+
128
+ context_summary = []
129
+ for msg in recent_history:
130
+ if msg.get('message_type') == 'user_input':
131
+ extracted_context = msg.get('extracted_context', {})
132
+ code_refs = extracted_context.get('code_reference', [])
133
+
134
+ if code_refs:
135
+ context_summary.append(f"Previous code: {code_refs[0][:100]}...")
136
+ elif msg.get('message_type') == 'agent_response':
137
+ content = msg.get('content', '')
138
+ if 'executed successfully' in content.lower():
139
+ context_summary.append("Previous execution: successful")
140
+ elif 'failed' in content.lower():
141
+ context_summary.append("Previous execution: failed")
142
+
143
+ return "\n".join(context_summary) if context_summary else "No previous code execution"
144
+ except:
145
+ return "No previous code execution"
146
+
147
+ async def _route_request(self, intent_analysis: Dict[str, Any], user_message: str,
148
+ context: ExecutionContext) -> str:
149
+ """Route code execution request based on intent analysis"""
150
+ primary_intent = intent_analysis.get("primary_intent", "execute_code")
151
+ language = intent_analysis.get("language", "python")
152
+ code_blocks = intent_analysis.get("code_blocks", [])
153
+ references_previous = intent_analysis.get("references_previous", False)
154
+
155
+ if primary_intent == "execute_code":
156
+ if code_blocks:
157
+ # Execute the provided code
158
+ code = code_blocks[0]
159
+ if language == "python":
160
+ result = await self._execute_python_code(code)
161
+ elif language == "bash":
162
+ result = await self._execute_bash_code(code)
163
+ else:
164
+ return f"Unsupported language: {language}"
165
+
166
+ if result['success']:
167
+ return f"Code executed successfully:\n\n```\n{result['output']}\n```\n\nExecution time: {result['execution_time']:.2f}s"
168
+ else:
169
+ return f"Code execution failed:\n\n```\n{result['error']}\n```"
170
+ else:
171
+ return "Please provide code wrapped in ```python or ```bash code blocks for execution."
172
+
173
+ elif primary_intent == "modify_code":
174
+ if references_previous:
175
+ return "I can help modify code. Please provide the specific changes you want to make or show me the modified code."
176
+ else:
177
+ return "Please provide the code you want to modify."
178
+
179
+ elif primary_intent == "debug_code":
180
+ return "I can help debug code. Please provide the code that's having issues and describe the problem."
181
+
182
+ else:
183
+ return "Please provide code wrapped in ```python or ```bash code blocks for execution."
184
+
185
+ async def process_message(self, message: AgentMessage, context: ExecutionContext = None) -> AgentMessage:
186
+ """Process code execution requests with execution history"""
187
+ self.memory.store_message(message)
188
+
189
+ try:
190
+ user_message = message.content
191
+
192
+ # Update conversation state
193
+ self.update_conversation_state(user_message)
194
+
195
+ # Get conversation context for analysis
196
+ conversation_context = self._get_conversation_context_summary()
197
+
198
+ # Use LLM to analyze intent
199
+ intent_analysis = await self._analyze_intent(user_message, conversation_context)
200
+
201
+ # Route request based on analysis
202
+ response_content = await self._route_request(intent_analysis, user_message, context)
203
+
204
+ response = self.create_response(
205
+ content=response_content,
206
+ recipient_id=message.sender_id,
207
+ session_id=message.session_id,
208
+ conversation_id=message.conversation_id
209
+ )
210
+
211
+ self.memory.store_message(response)
212
+ return response
213
+
214
+ except Exception as e:
215
+ error_response = self.create_response(
216
+ content=f"Error in code execution: {str(e)}",
217
+ recipient_id=message.sender_id,
218
+ message_type=MessageType.ERROR,
219
+ session_id=message.session_id,
220
+ conversation_id=message.conversation_id
221
+ )
222
+ return error_response