aiecs 1.3.8__py3-none-any.whl → 1.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (37) hide show
  1. aiecs/__init__.py +1 -1
  2. aiecs/domain/__init__.py +120 -0
  3. aiecs/domain/agent/__init__.py +184 -0
  4. aiecs/domain/agent/base_agent.py +691 -0
  5. aiecs/domain/agent/exceptions.py +99 -0
  6. aiecs/domain/agent/hybrid_agent.py +495 -0
  7. aiecs/domain/agent/integration/__init__.py +23 -0
  8. aiecs/domain/agent/integration/context_compressor.py +219 -0
  9. aiecs/domain/agent/integration/context_engine_adapter.py +258 -0
  10. aiecs/domain/agent/integration/retry_policy.py +228 -0
  11. aiecs/domain/agent/integration/role_config.py +217 -0
  12. aiecs/domain/agent/lifecycle.py +298 -0
  13. aiecs/domain/agent/llm_agent.py +309 -0
  14. aiecs/domain/agent/memory/__init__.py +13 -0
  15. aiecs/domain/agent/memory/conversation.py +216 -0
  16. aiecs/domain/agent/migration/__init__.py +15 -0
  17. aiecs/domain/agent/migration/conversion.py +171 -0
  18. aiecs/domain/agent/migration/legacy_wrapper.py +97 -0
  19. aiecs/domain/agent/models.py +263 -0
  20. aiecs/domain/agent/observability.py +443 -0
  21. aiecs/domain/agent/persistence.py +287 -0
  22. aiecs/domain/agent/prompts/__init__.py +25 -0
  23. aiecs/domain/agent/prompts/builder.py +164 -0
  24. aiecs/domain/agent/prompts/formatters.py +192 -0
  25. aiecs/domain/agent/prompts/template.py +264 -0
  26. aiecs/domain/agent/registry.py +261 -0
  27. aiecs/domain/agent/tool_agent.py +267 -0
  28. aiecs/domain/agent/tools/__init__.py +13 -0
  29. aiecs/domain/agent/tools/schema_generator.py +222 -0
  30. aiecs/main.py +2 -2
  31. aiecs/tools/search_tool/__init__.py +1 -0
  32. {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/METADATA +1 -1
  33. {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/RECORD +37 -10
  34. {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/WHEEL +0 -0
  35. {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/entry_points.txt +0 -0
  36. {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/licenses/LICENSE +0 -0
  37. {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,309 @@
1
+ """
2
+ LLM Agent
3
+
4
+ Agent implementation powered by LLM for text generation and reasoning.
5
+ """
6
+
7
+ import logging
8
+ from typing import Dict, List, Any, Optional
9
+ from datetime import datetime
10
+
11
+ from aiecs.llm import BaseLLMClient, LLMMessage, LLMResponse
12
+
13
+ from .base_agent import BaseAIAgent
14
+ from .models import AgentType, AgentConfiguration
15
+ from .exceptions import TaskExecutionError, AgentInitializationError
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class LLMAgent(BaseAIAgent):
21
+ """
22
+ LLM-powered agent for text generation and reasoning.
23
+
24
+ This agent uses an LLM client to process tasks and generate responses.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ agent_id: str,
30
+ name: str,
31
+ llm_client: BaseLLMClient,
32
+ config: AgentConfiguration,
33
+ description: Optional[str] = None,
34
+ version: str = "1.0.0",
35
+ ):
36
+ """
37
+ Initialize LLM agent.
38
+
39
+ Args:
40
+ agent_id: Unique agent identifier
41
+ name: Agent name
42
+ llm_client: LLM client instance
43
+ config: Agent configuration
44
+ description: Optional description
45
+ version: Agent version
46
+ """
47
+ super().__init__(
48
+ agent_id=agent_id,
49
+ name=name,
50
+ agent_type=AgentType.CONVERSATIONAL,
51
+ config=config,
52
+ description=description or "LLM-powered conversational agent",
53
+ version=version,
54
+ )
55
+
56
+ self.llm_client = llm_client
57
+ self._system_prompt: Optional[str] = None
58
+ self._conversation_history: List[LLMMessage] = []
59
+
60
+ logger.info(f"LLMAgent initialized: {agent_id} with client {llm_client.provider_name}")
61
+
62
+ async def _initialize(self) -> None:
63
+ """Initialize LLM agent."""
64
+ # Build system prompt
65
+ self._system_prompt = self._build_system_prompt()
66
+ logger.debug(f"LLMAgent {self.agent_id} initialized with system prompt")
67
+
68
+ async def _shutdown(self) -> None:
69
+ """Shutdown LLM agent."""
70
+ # Clear conversation history
71
+ self._conversation_history.clear()
72
+
73
+ # Close LLM client if it has a close method
74
+ if hasattr(self.llm_client, 'close'):
75
+ await self.llm_client.close()
76
+
77
+ logger.info(f"LLMAgent {self.agent_id} shut down")
78
+
79
+ def _build_system_prompt(self) -> str:
80
+ """Build system prompt from configuration."""
81
+ parts = []
82
+
83
+ if self._config.goal:
84
+ parts.append(f"Goal: {self._config.goal}")
85
+
86
+ if self._config.backstory:
87
+ parts.append(f"Background: {self._config.backstory}")
88
+
89
+ if self._config.domain_knowledge:
90
+ parts.append(f"Domain Knowledge: {self._config.domain_knowledge}")
91
+
92
+ if self._config.reasoning_guidance:
93
+ parts.append(f"Reasoning Approach: {self._config.reasoning_guidance}")
94
+
95
+ if not parts:
96
+ return "You are a helpful AI assistant."
97
+
98
+ return "\n\n".join(parts)
99
+
100
+ async def execute_task(
101
+ self,
102
+ task: Dict[str, Any],
103
+ context: Dict[str, Any]
104
+ ) -> Dict[str, Any]:
105
+ """
106
+ Execute a task using the LLM.
107
+
108
+ Args:
109
+ task: Task specification with 'description' or 'prompt'
110
+ context: Execution context
111
+
112
+ Returns:
113
+ Execution result with 'output', 'reasoning', 'tokens_used'
114
+
115
+ Raises:
116
+ TaskExecutionError: If task execution fails
117
+ """
118
+ start_time = datetime.utcnow()
119
+
120
+ try:
121
+ # Extract task description
122
+ task_description = task.get('description') or task.get('prompt') or task.get('task')
123
+ if not task_description:
124
+ raise TaskExecutionError(
125
+ "Task must contain 'description', 'prompt', or 'task' field",
126
+ agent_id=self.agent_id
127
+ )
128
+
129
+ # Transition to busy state
130
+ self._transition_state(self.state.__class__.BUSY)
131
+ self._current_task_id = task.get('task_id')
132
+
133
+ # Build messages
134
+ messages = self._build_messages(task_description, context)
135
+
136
+ # Call LLM
137
+ response = await self.llm_client.generate_text(
138
+ messages=messages,
139
+ model=self._config.llm_model,
140
+ temperature=self._config.temperature,
141
+ max_tokens=self._config.max_tokens,
142
+ )
143
+
144
+ # Extract result
145
+ output = response.content
146
+
147
+ # Store in conversation history if enabled
148
+ if self._config.memory_enabled:
149
+ self._conversation_history.append(LLMMessage(role="user", content=task_description))
150
+ self._conversation_history.append(LLMMessage(role="assistant", content=output))
151
+
152
+ # Calculate execution time
153
+ execution_time = (datetime.utcnow() - start_time).total_seconds()
154
+
155
+ # Update metrics
156
+ self.update_metrics(
157
+ execution_time=execution_time,
158
+ success=True,
159
+ tokens_used=getattr(response, 'total_tokens', None),
160
+ )
161
+
162
+ # Transition back to active
163
+ self._transition_state(self.state.__class__.ACTIVE)
164
+ self._current_task_id = None
165
+ self.last_active_at = datetime.utcnow()
166
+
167
+ return {
168
+ "success": True,
169
+ "output": output,
170
+ "provider": response.provider,
171
+ "model": response.model,
172
+ "tokens_used": getattr(response, 'total_tokens', None),
173
+ "execution_time": execution_time,
174
+ "timestamp": datetime.utcnow().isoformat(),
175
+ }
176
+
177
+ except Exception as e:
178
+ logger.error(f"Task execution failed for {self.agent_id}: {e}")
179
+
180
+ # Update metrics for failure
181
+ execution_time = (datetime.utcnow() - start_time).total_seconds()
182
+ self.update_metrics(execution_time=execution_time, success=False)
183
+
184
+ # Transition to error state
185
+ self._transition_state(self.state.__class__.ERROR)
186
+ self._current_task_id = None
187
+
188
+ raise TaskExecutionError(
189
+ f"Task execution failed: {str(e)}",
190
+ agent_id=self.agent_id,
191
+ task_id=task.get('task_id')
192
+ )
193
+
194
+ async def process_message(
195
+ self,
196
+ message: str,
197
+ sender_id: Optional[str] = None
198
+ ) -> Dict[str, Any]:
199
+ """
200
+ Process an incoming message.
201
+
202
+ Args:
203
+ message: Message content
204
+ sender_id: Optional sender identifier
205
+
206
+ Returns:
207
+ Response dictionary with 'response', 'tokens_used'
208
+ """
209
+ try:
210
+ # Build task from message
211
+ task = {
212
+ "description": message,
213
+ "task_id": f"msg_{datetime.utcnow().timestamp()}",
214
+ }
215
+
216
+ # Execute as task
217
+ result = await self.execute_task(task, {"sender_id": sender_id})
218
+
219
+ return {
220
+ "response": result.get("output"),
221
+ "tokens_used": result.get("tokens_used"),
222
+ "timestamp": result.get("timestamp"),
223
+ }
224
+
225
+ except Exception as e:
226
+ logger.error(f"Message processing failed for {self.agent_id}: {e}")
227
+ raise
228
+
229
+ def _build_messages(
230
+ self,
231
+ user_message: str,
232
+ context: Dict[str, Any]
233
+ ) -> List[LLMMessage]:
234
+ """
235
+ Build LLM messages from task and context.
236
+
237
+ Args:
238
+ user_message: User message
239
+ context: Context dictionary
240
+
241
+ Returns:
242
+ List of LLM messages
243
+ """
244
+ messages = []
245
+
246
+ # Add system prompt
247
+ if self._system_prompt:
248
+ messages.append(LLMMessage(role="system", content=self._system_prompt))
249
+
250
+ # Add conversation history if available and memory enabled
251
+ if self._config.memory_enabled and self._conversation_history:
252
+ # Limit history to prevent token overflow
253
+ max_history = 10 # Keep last 10 exchanges
254
+ messages.extend(self._conversation_history[-max_history:])
255
+
256
+ # Add additional context if provided
257
+ if context:
258
+ context_str = self._format_context(context)
259
+ if context_str:
260
+ messages.append(LLMMessage(role="system", content=f"Additional Context:\n{context_str}"))
261
+
262
+ # Add user message
263
+ messages.append(LLMMessage(role="user", content=user_message))
264
+
265
+ return messages
266
+
267
+ def _format_context(self, context: Dict[str, Any]) -> str:
268
+ """Format context dictionary as string."""
269
+ relevant_fields = []
270
+
271
+ # Filter out internal fields
272
+ for key, value in context.items():
273
+ if not key.startswith('_') and value is not None:
274
+ relevant_fields.append(f"{key}: {value}")
275
+
276
+ return "\n".join(relevant_fields) if relevant_fields else ""
277
+
278
+ def clear_conversation_history(self) -> None:
279
+ """Clear conversation history."""
280
+ self._conversation_history.clear()
281
+ logger.info(f"LLMAgent {self.agent_id} conversation history cleared")
282
+
283
+ def get_conversation_history(self) -> List[Dict[str, str]]:
284
+ """Get conversation history."""
285
+ return [
286
+ {"role": msg.role, "content": msg.content}
287
+ for msg in self._conversation_history
288
+ ]
289
+
290
+ @classmethod
291
+ def from_dict(cls, data: Dict[str, Any]) -> "LLMAgent":
292
+ """
293
+ Deserialize LLMAgent from dictionary.
294
+
295
+ Note: LLM client must be provided separately as it cannot be serialized.
296
+
297
+ Args:
298
+ data: Dictionary representation
299
+
300
+ Returns:
301
+ LLMAgent instance
302
+ """
303
+ # This is a placeholder - actual implementation would require
304
+ # providing the LLM client separately
305
+ raise NotImplementedError(
306
+ "LLMAgent.from_dict requires LLM client to be provided separately. "
307
+ "Use constructor instead."
308
+ )
309
+
@@ -0,0 +1,13 @@
1
+ """
2
+ Agent Memory Module
3
+
4
+ Conversation memory and history management.
5
+ """
6
+
7
+ from .conversation import ConversationMemory, Session
8
+
9
+ __all__ = [
10
+ "ConversationMemory",
11
+ "Session",
12
+ ]
13
+
@@ -0,0 +1,216 @@
1
+ """
2
+ Conversation Memory
3
+
4
+ Multi-turn conversation handling with session management.
5
+ """
6
+
7
+ import logging
8
+ from typing import Dict, List, Optional
9
+ from datetime import datetime
10
+ from dataclasses import dataclass, field
11
+
12
+ from aiecs.llm import LLMMessage
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ @dataclass
18
+ class Session:
19
+ """Conversation session."""
20
+ session_id: str
21
+ agent_id: str
22
+ created_at: datetime = field(default_factory=datetime.utcnow)
23
+ last_activity: datetime = field(default_factory=datetime.utcnow)
24
+ messages: List[LLMMessage] = field(default_factory=list)
25
+ metadata: Dict = field(default_factory=dict)
26
+
27
+ def add_message(self, role: str, content: str) -> None:
28
+ """Add message to session."""
29
+ self.messages.append(LLMMessage(role=role, content=content))
30
+ self.last_activity = datetime.utcnow()
31
+
32
+ def get_recent_messages(self, limit: int) -> List[LLMMessage]:
33
+ """Get recent messages."""
34
+ return self.messages[-limit:] if limit else self.messages
35
+
36
+ def clear(self) -> None:
37
+ """Clear session messages."""
38
+ self.messages.clear()
39
+
40
+
41
+ class ConversationMemory:
42
+ """
43
+ Manages multi-turn conversations with session isolation.
44
+
45
+ Example:
46
+ memory = ConversationMemory(agent_id="agent-1")
47
+ session_id = memory.create_session()
48
+ memory.add_message(session_id, "user", "Hello")
49
+ memory.add_message(session_id, "assistant", "Hi there!")
50
+ history = memory.get_history(session_id)
51
+ """
52
+
53
+ def __init__(self, agent_id: str, max_sessions: int = 100):
54
+ """
55
+ Initialize conversation memory.
56
+
57
+ Args:
58
+ agent_id: Agent identifier
59
+ max_sessions: Maximum number of sessions to keep
60
+ """
61
+ self.agent_id = agent_id
62
+ self.max_sessions = max_sessions
63
+ self._sessions: Dict[str, Session] = {}
64
+ logger.info(f"ConversationMemory initialized for agent {agent_id}")
65
+
66
+ def create_session(self, session_id: Optional[str] = None) -> str:
67
+ """
68
+ Create a new conversation session.
69
+
70
+ Args:
71
+ session_id: Optional custom session ID
72
+
73
+ Returns:
74
+ Session ID
75
+ """
76
+ if session_id is None:
77
+ session_id = f"session_{datetime.utcnow().timestamp()}"
78
+
79
+ if session_id in self._sessions:
80
+ logger.warning(f"Session {session_id} already exists")
81
+ return session_id
82
+
83
+ self._sessions[session_id] = Session(
84
+ session_id=session_id,
85
+ agent_id=self.agent_id
86
+ )
87
+
88
+ # Cleanup old sessions if limit exceeded
89
+ if len(self._sessions) > self.max_sessions:
90
+ self._cleanup_old_sessions()
91
+
92
+ logger.debug(f"Session {session_id} created")
93
+ return session_id
94
+
95
+ def add_message(
96
+ self,
97
+ session_id: str,
98
+ role: str,
99
+ content: str
100
+ ) -> None:
101
+ """
102
+ Add message to session.
103
+
104
+ Args:
105
+ session_id: Session ID
106
+ role: Message role
107
+ content: Message content
108
+ """
109
+ if session_id not in self._sessions:
110
+ logger.warning(f"Session {session_id} not found, creating it")
111
+ self.create_session(session_id)
112
+
113
+ self._sessions[session_id].add_message(role, content)
114
+
115
+ def get_history(
116
+ self,
117
+ session_id: str,
118
+ limit: Optional[int] = None
119
+ ) -> List[LLMMessage]:
120
+ """
121
+ Get conversation history for session.
122
+
123
+ Args:
124
+ session_id: Session ID
125
+ limit: Optional limit on number of messages
126
+
127
+ Returns:
128
+ List of messages
129
+ """
130
+ if session_id not in self._sessions:
131
+ return []
132
+
133
+ session = self._sessions[session_id]
134
+ return session.get_recent_messages(limit) if limit else session.messages.copy()
135
+
136
+ def format_history(
137
+ self,
138
+ session_id: str,
139
+ limit: Optional[int] = None
140
+ ) -> str:
141
+ """
142
+ Format conversation history as string.
143
+
144
+ Args:
145
+ session_id: Session ID
146
+ limit: Optional limit on number of messages
147
+
148
+ Returns:
149
+ Formatted history string
150
+ """
151
+ history = self.get_history(session_id, limit)
152
+ lines = []
153
+ for msg in history:
154
+ lines.append(f"{msg.role.upper()}: {msg.content}")
155
+ return "\n".join(lines)
156
+
157
+ def clear_session(self, session_id: str) -> None:
158
+ """
159
+ Clear session messages.
160
+
161
+ Args:
162
+ session_id: Session ID
163
+ """
164
+ if session_id in self._sessions:
165
+ self._sessions[session_id].clear()
166
+ logger.debug(f"Session {session_id} cleared")
167
+
168
+ def delete_session(self, session_id: str) -> None:
169
+ """
170
+ Delete session.
171
+
172
+ Args:
173
+ session_id: Session ID
174
+ """
175
+ if session_id in self._sessions:
176
+ del self._sessions[session_id]
177
+ logger.debug(f"Session {session_id} deleted")
178
+
179
+ def get_session(self, session_id: str) -> Optional[Session]:
180
+ """
181
+ Get session object.
182
+
183
+ Args:
184
+ session_id: Session ID
185
+
186
+ Returns:
187
+ Session or None
188
+ """
189
+ return self._sessions.get(session_id)
190
+
191
+ def list_sessions(self) -> List[str]:
192
+ """List all session IDs."""
193
+ return list(self._sessions.keys())
194
+
195
+ def _cleanup_old_sessions(self) -> None:
196
+ """Remove oldest sessions to maintain limit."""
197
+ # Sort by last activity
198
+ sorted_sessions = sorted(
199
+ self._sessions.items(),
200
+ key=lambda x: x[1].last_activity
201
+ )
202
+
203
+ # Remove oldest sessions
204
+ num_to_remove = len(self._sessions) - self.max_sessions
205
+ for session_id, _ in sorted_sessions[:num_to_remove]:
206
+ del self._sessions[session_id]
207
+ logger.debug(f"Removed old session {session_id}")
208
+
209
+ def get_stats(self) -> Dict:
210
+ """Get memory statistics."""
211
+ return {
212
+ "agent_id": self.agent_id,
213
+ "total_sessions": len(self._sessions),
214
+ "total_messages": sum(len(s.messages) for s in self._sessions.values()),
215
+ }
216
+
@@ -0,0 +1,15 @@
1
+ """
2
+ Migration Utilities
3
+
4
+ Tools for migrating from legacy agents and LangChain to BaseAIAgent.
5
+ """
6
+
7
+ from .legacy_wrapper import LegacyAgentWrapper
8
+ from .conversion import convert_langchain_prompt, convert_legacy_config
9
+
10
+ __all__ = [
11
+ "LegacyAgentWrapper",
12
+ "convert_langchain_prompt",
13
+ "convert_legacy_config",
14
+ ]
15
+