aiecs 1.3.8__py3-none-any.whl → 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +1 -1
- aiecs/domain/__init__.py +120 -0
- aiecs/domain/agent/__init__.py +184 -0
- aiecs/domain/agent/base_agent.py +691 -0
- aiecs/domain/agent/exceptions.py +99 -0
- aiecs/domain/agent/hybrid_agent.py +495 -0
- aiecs/domain/agent/integration/__init__.py +23 -0
- aiecs/domain/agent/integration/context_compressor.py +219 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +258 -0
- aiecs/domain/agent/integration/retry_policy.py +228 -0
- aiecs/domain/agent/integration/role_config.py +217 -0
- aiecs/domain/agent/lifecycle.py +298 -0
- aiecs/domain/agent/llm_agent.py +309 -0
- aiecs/domain/agent/memory/__init__.py +13 -0
- aiecs/domain/agent/memory/conversation.py +216 -0
- aiecs/domain/agent/migration/__init__.py +15 -0
- aiecs/domain/agent/migration/conversion.py +171 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +97 -0
- aiecs/domain/agent/models.py +263 -0
- aiecs/domain/agent/observability.py +443 -0
- aiecs/domain/agent/persistence.py +287 -0
- aiecs/domain/agent/prompts/__init__.py +25 -0
- aiecs/domain/agent/prompts/builder.py +164 -0
- aiecs/domain/agent/prompts/formatters.py +192 -0
- aiecs/domain/agent/prompts/template.py +264 -0
- aiecs/domain/agent/registry.py +261 -0
- aiecs/domain/agent/tool_agent.py +267 -0
- aiecs/domain/agent/tools/__init__.py +13 -0
- aiecs/domain/agent/tools/schema_generator.py +222 -0
- aiecs/main.py +2 -2
- {aiecs-1.3.8.dist-info → aiecs-1.4.0.dist-info}/METADATA +1 -1
- {aiecs-1.3.8.dist-info → aiecs-1.4.0.dist-info}/RECORD +36 -9
- {aiecs-1.3.8.dist-info → aiecs-1.4.0.dist-info}/WHEEL +0 -0
- {aiecs-1.3.8.dist-info → aiecs-1.4.0.dist-info}/entry_points.txt +0 -0
- {aiecs-1.3.8.dist-info → aiecs-1.4.0.dist-info}/licenses/LICENSE +0 -0
- {aiecs-1.3.8.dist-info → aiecs-1.4.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent Persistence
|
|
3
|
+
|
|
4
|
+
Interfaces and implementations for saving/loading agent state.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import json
|
|
9
|
+
from typing import Dict, Any, Optional, Protocol
|
|
10
|
+
from abc import ABC, abstractmethod
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
|
|
13
|
+
from .base_agent import BaseAIAgent
|
|
14
|
+
from .exceptions import SerializationError
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AgentPersistence(Protocol):
|
|
20
|
+
"""Protocol for agent persistence implementations."""
|
|
21
|
+
|
|
22
|
+
async def save(self, agent: BaseAIAgent) -> None:
|
|
23
|
+
"""
|
|
24
|
+
Save agent state.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
agent: Agent to save
|
|
28
|
+
"""
|
|
29
|
+
...
|
|
30
|
+
|
|
31
|
+
async def load(self, agent_id: str) -> Dict[str, Any]:
|
|
32
|
+
"""
|
|
33
|
+
Load agent state.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
agent_id: Agent identifier
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Agent state dictionary
|
|
40
|
+
"""
|
|
41
|
+
...
|
|
42
|
+
|
|
43
|
+
async def exists(self, agent_id: str) -> bool:
|
|
44
|
+
"""
|
|
45
|
+
Check if agent state exists.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
agent_id: Agent identifier
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
True if exists
|
|
52
|
+
"""
|
|
53
|
+
...
|
|
54
|
+
|
|
55
|
+
async def delete(self, agent_id: str) -> None:
|
|
56
|
+
"""
|
|
57
|
+
Delete agent state.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
agent_id: Agent identifier
|
|
61
|
+
"""
|
|
62
|
+
...
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class InMemoryPersistence:
|
|
66
|
+
"""In-memory agent persistence (for testing/development)."""
|
|
67
|
+
|
|
68
|
+
def __init__(self):
|
|
69
|
+
"""Initialize in-memory storage."""
|
|
70
|
+
self._storage: Dict[str, Dict[str, Any]] = {}
|
|
71
|
+
logger.info("InMemoryPersistence initialized")
|
|
72
|
+
|
|
73
|
+
async def save(self, agent: BaseAIAgent) -> None:
|
|
74
|
+
"""Save agent state to memory."""
|
|
75
|
+
try:
|
|
76
|
+
state = agent.to_dict()
|
|
77
|
+
# Convert any remaining datetime objects to ISO strings
|
|
78
|
+
state = self._serialize_datetimes(state)
|
|
79
|
+
self._storage[agent.agent_id] = {
|
|
80
|
+
"state": state,
|
|
81
|
+
"saved_at": datetime.utcnow().isoformat(),
|
|
82
|
+
}
|
|
83
|
+
logger.debug(f"Agent {agent.agent_id} saved to memory")
|
|
84
|
+
except Exception as e:
|
|
85
|
+
logger.error(f"Failed to save agent {agent.agent_id}: {e}")
|
|
86
|
+
raise SerializationError(f"Failed to save agent: {str(e)}")
|
|
87
|
+
|
|
88
|
+
def _serialize_datetimes(self, obj: Any) -> Any:
|
|
89
|
+
"""Recursively serialize datetime objects to ISO strings."""
|
|
90
|
+
import json
|
|
91
|
+
if isinstance(obj, dict):
|
|
92
|
+
return {k: self._serialize_datetimes(v) for k, v in obj.items()}
|
|
93
|
+
elif isinstance(obj, list):
|
|
94
|
+
return [self._serialize_datetimes(item) for item in obj]
|
|
95
|
+
elif isinstance(obj, datetime):
|
|
96
|
+
return obj.isoformat()
|
|
97
|
+
else:
|
|
98
|
+
return obj
|
|
99
|
+
|
|
100
|
+
async def load(self, agent_id: str) -> Dict[str, Any]:
|
|
101
|
+
"""Load agent state from memory."""
|
|
102
|
+
if agent_id not in self._storage:
|
|
103
|
+
raise KeyError(f"Agent {agent_id} not found in storage")
|
|
104
|
+
|
|
105
|
+
data = self._storage[agent_id]
|
|
106
|
+
logger.debug(f"Agent {agent_id} loaded from memory")
|
|
107
|
+
return data["state"]
|
|
108
|
+
|
|
109
|
+
async def exists(self, agent_id: str) -> bool:
|
|
110
|
+
"""Check if agent exists in memory."""
|
|
111
|
+
return agent_id in self._storage
|
|
112
|
+
|
|
113
|
+
async def delete(self, agent_id: str) -> None:
|
|
114
|
+
"""Delete agent from memory."""
|
|
115
|
+
if agent_id in self._storage:
|
|
116
|
+
del self._storage[agent_id]
|
|
117
|
+
logger.debug(f"Agent {agent_id} deleted from memory")
|
|
118
|
+
|
|
119
|
+
def clear(self) -> None:
|
|
120
|
+
"""Clear all stored agents."""
|
|
121
|
+
self._storage.clear()
|
|
122
|
+
logger.info("InMemoryPersistence cleared")
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class FilePersistence:
|
|
126
|
+
"""File-based agent persistence."""
|
|
127
|
+
|
|
128
|
+
def __init__(self, base_path: str = "./agent_states"):
|
|
129
|
+
"""
|
|
130
|
+
Initialize file-based storage.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
base_path: Base directory for agent states
|
|
134
|
+
"""
|
|
135
|
+
import os
|
|
136
|
+
|
|
137
|
+
self.base_path = base_path
|
|
138
|
+
os.makedirs(base_path, exist_ok=True)
|
|
139
|
+
logger.info(f"FilePersistence initialized with base_path: {base_path}")
|
|
140
|
+
|
|
141
|
+
def _get_file_path(self, agent_id: str) -> str:
|
|
142
|
+
"""Get file path for agent."""
|
|
143
|
+
import os
|
|
144
|
+
# Sanitize agent_id for filesystem
|
|
145
|
+
safe_id = agent_id.replace("/", "_").replace("\\", "_")
|
|
146
|
+
return os.path.join(self.base_path, f"{safe_id}.json")
|
|
147
|
+
|
|
148
|
+
async def save(self, agent: BaseAIAgent) -> None:
|
|
149
|
+
"""Save agent state to file."""
|
|
150
|
+
try:
|
|
151
|
+
state = agent.to_dict()
|
|
152
|
+
# Convert any remaining datetime objects to ISO strings for JSON serialization
|
|
153
|
+
state = self._serialize_datetimes(state)
|
|
154
|
+
file_path = self._get_file_path(agent.agent_id)
|
|
155
|
+
|
|
156
|
+
data = {
|
|
157
|
+
"state": state,
|
|
158
|
+
"saved_at": datetime.utcnow().isoformat(),
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
with open(file_path, 'w') as f:
|
|
162
|
+
json.dump(data, f, indent=2, default=str) # default=str handles any remaining non-serializable objects
|
|
163
|
+
|
|
164
|
+
logger.debug(f"Agent {agent.agent_id} saved to {file_path}")
|
|
165
|
+
except Exception as e:
|
|
166
|
+
logger.error(f"Failed to save agent {agent.agent_id}: {e}")
|
|
167
|
+
raise SerializationError(f"Failed to save agent: {str(e)}")
|
|
168
|
+
|
|
169
|
+
def _serialize_datetimes(self, obj: Any) -> Any:
|
|
170
|
+
"""Recursively serialize datetime objects to ISO strings."""
|
|
171
|
+
if isinstance(obj, dict):
|
|
172
|
+
return {k: self._serialize_datetimes(v) for k, v in obj.items()}
|
|
173
|
+
elif isinstance(obj, list):
|
|
174
|
+
return [self._serialize_datetimes(item) for item in obj]
|
|
175
|
+
elif isinstance(obj, datetime):
|
|
176
|
+
return obj.isoformat()
|
|
177
|
+
else:
|
|
178
|
+
return obj
|
|
179
|
+
|
|
180
|
+
async def load(self, agent_id: str) -> Dict[str, Any]:
|
|
181
|
+
"""Load agent state from file."""
|
|
182
|
+
file_path = self._get_file_path(agent_id)
|
|
183
|
+
|
|
184
|
+
try:
|
|
185
|
+
with open(file_path, 'r') as f:
|
|
186
|
+
data = json.load(f)
|
|
187
|
+
|
|
188
|
+
logger.debug(f"Agent {agent_id} loaded from {file_path}")
|
|
189
|
+
return data["state"]
|
|
190
|
+
except FileNotFoundError:
|
|
191
|
+
raise KeyError(f"Agent {agent_id} not found in storage")
|
|
192
|
+
except Exception as e:
|
|
193
|
+
logger.error(f"Failed to load agent {agent_id}: {e}")
|
|
194
|
+
raise SerializationError(f"Failed to load agent: {str(e)}")
|
|
195
|
+
|
|
196
|
+
async def exists(self, agent_id: str) -> bool:
|
|
197
|
+
"""Check if agent file exists."""
|
|
198
|
+
import os
|
|
199
|
+
file_path = self._get_file_path(agent_id)
|
|
200
|
+
return os.path.exists(file_path)
|
|
201
|
+
|
|
202
|
+
async def delete(self, agent_id: str) -> None:
|
|
203
|
+
"""Delete agent file."""
|
|
204
|
+
import os
|
|
205
|
+
file_path = self._get_file_path(agent_id)
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
if os.path.exists(file_path):
|
|
209
|
+
os.remove(file_path)
|
|
210
|
+
logger.debug(f"Agent {agent_id} deleted from {file_path}")
|
|
211
|
+
except Exception as e:
|
|
212
|
+
logger.error(f"Failed to delete agent {agent_id}: {e}")
|
|
213
|
+
raise
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
class AgentStateSerializer:
|
|
217
|
+
"""
|
|
218
|
+
Helper class for serializing/deserializing agent state.
|
|
219
|
+
|
|
220
|
+
Handles complex types that need special serialization.
|
|
221
|
+
"""
|
|
222
|
+
|
|
223
|
+
@staticmethod
|
|
224
|
+
def serialize(agent: BaseAIAgent) -> Dict[str, Any]:
|
|
225
|
+
"""
|
|
226
|
+
Serialize agent to dictionary.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
agent: Agent to serialize
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
Serialized state dictionary
|
|
233
|
+
"""
|
|
234
|
+
return agent.to_dict()
|
|
235
|
+
|
|
236
|
+
@staticmethod
|
|
237
|
+
def deserialize(data: Dict[str, Any]) -> Dict[str, Any]:
|
|
238
|
+
"""
|
|
239
|
+
Deserialize agent state.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
data: Serialized state
|
|
243
|
+
|
|
244
|
+
Returns:
|
|
245
|
+
Deserialized state dictionary
|
|
246
|
+
|
|
247
|
+
Note: This returns a state dictionary, not an agent instance.
|
|
248
|
+
Agent reconstruction requires the appropriate agent class.
|
|
249
|
+
"""
|
|
250
|
+
# In the future, this could handle type conversion, validation, etc.
|
|
251
|
+
return data
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
# Global persistence instance
|
|
255
|
+
_global_persistence: Optional[AgentPersistence] = None
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def get_global_persistence() -> AgentPersistence:
|
|
259
|
+
"""
|
|
260
|
+
Get or create global persistence instance.
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
Global persistence instance (defaults to InMemoryPersistence)
|
|
264
|
+
"""
|
|
265
|
+
global _global_persistence
|
|
266
|
+
if _global_persistence is None:
|
|
267
|
+
_global_persistence = InMemoryPersistence()
|
|
268
|
+
return _global_persistence
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def set_global_persistence(persistence: AgentPersistence) -> None:
|
|
272
|
+
"""
|
|
273
|
+
Set global persistence instance.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
persistence: Persistence implementation to use
|
|
277
|
+
"""
|
|
278
|
+
global _global_persistence
|
|
279
|
+
_global_persistence = persistence
|
|
280
|
+
logger.info(f"Global persistence set to {type(persistence).__name__}")
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def reset_global_persistence() -> None:
|
|
284
|
+
"""Reset global persistence (primarily for testing)."""
|
|
285
|
+
global _global_persistence
|
|
286
|
+
_global_persistence = None
|
|
287
|
+
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Prompt Templates Module
|
|
3
|
+
|
|
4
|
+
Native prompt template system replacing LangChain templates.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .template import PromptTemplate, ChatPromptTemplate, MessageTemplate, TemplateMissingVariableError
|
|
8
|
+
from .builder import MessageBuilder
|
|
9
|
+
from .formatters import (
|
|
10
|
+
format_conversation_history,
|
|
11
|
+
format_tool_result,
|
|
12
|
+
truncate_context,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"PromptTemplate",
|
|
17
|
+
"ChatPromptTemplate",
|
|
18
|
+
"MessageTemplate",
|
|
19
|
+
"TemplateMissingVariableError",
|
|
20
|
+
"MessageBuilder",
|
|
21
|
+
"format_conversation_history",
|
|
22
|
+
"format_tool_result",
|
|
23
|
+
"truncate_context",
|
|
24
|
+
]
|
|
25
|
+
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Message Builder
|
|
3
|
+
|
|
4
|
+
Helper for constructing LLMMessage lists.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import List, Dict, Any, Optional
|
|
8
|
+
from aiecs.llm import LLMMessage
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class MessageBuilder:
|
|
12
|
+
"""
|
|
13
|
+
Builder for constructing LLM message sequences.
|
|
14
|
+
|
|
15
|
+
Example:
|
|
16
|
+
builder = MessageBuilder()
|
|
17
|
+
builder.add_system("You are a helpful assistant")
|
|
18
|
+
builder.add_user("What is AI?")
|
|
19
|
+
messages = builder.build()
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self):
|
|
23
|
+
"""Initialize message builder."""
|
|
24
|
+
self.messages: List[LLMMessage] = []
|
|
25
|
+
|
|
26
|
+
def add_system(self, content: str) -> "MessageBuilder":
|
|
27
|
+
"""
|
|
28
|
+
Add system message.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
content: Message content
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Self for chaining
|
|
35
|
+
"""
|
|
36
|
+
self.messages.append(LLMMessage(role="system", content=content))
|
|
37
|
+
return self
|
|
38
|
+
|
|
39
|
+
def add_user(self, content: str) -> "MessageBuilder":
|
|
40
|
+
"""
|
|
41
|
+
Add user message.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
content: Message content
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Self for chaining
|
|
48
|
+
"""
|
|
49
|
+
self.messages.append(LLMMessage(role="user", content=content))
|
|
50
|
+
return self
|
|
51
|
+
|
|
52
|
+
def add_assistant(self, content: str) -> "MessageBuilder":
|
|
53
|
+
"""
|
|
54
|
+
Add assistant message.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
content: Message content
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Self for chaining
|
|
61
|
+
"""
|
|
62
|
+
self.messages.append(LLMMessage(role="assistant", content=content))
|
|
63
|
+
return self
|
|
64
|
+
|
|
65
|
+
def add_message(self, role: str, content: str) -> "MessageBuilder":
|
|
66
|
+
"""
|
|
67
|
+
Add message with custom role.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
role: Message role
|
|
71
|
+
content: Message content
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Self for chaining
|
|
75
|
+
"""
|
|
76
|
+
self.messages.append(LLMMessage(role=role, content=content))
|
|
77
|
+
return self
|
|
78
|
+
|
|
79
|
+
def add_messages(self, messages: List[LLMMessage]) -> "MessageBuilder":
|
|
80
|
+
"""
|
|
81
|
+
Add multiple messages.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
messages: List of messages to add
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Self for chaining
|
|
88
|
+
"""
|
|
89
|
+
self.messages.extend(messages)
|
|
90
|
+
return self
|
|
91
|
+
|
|
92
|
+
def add_context(self, context: Dict[str, Any], prefix: str = "Context:") -> "MessageBuilder":
|
|
93
|
+
"""
|
|
94
|
+
Add context as a system message.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
context: Context dictionary
|
|
98
|
+
prefix: Prefix for context message
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Self for chaining
|
|
102
|
+
"""
|
|
103
|
+
context_str = self._format_context(context)
|
|
104
|
+
if context_str:
|
|
105
|
+
self.add_system(f"{prefix}\n{context_str}")
|
|
106
|
+
return self
|
|
107
|
+
|
|
108
|
+
def add_conversation_history(
|
|
109
|
+
self,
|
|
110
|
+
history: List[Dict[str, str]],
|
|
111
|
+
max_messages: Optional[int] = None
|
|
112
|
+
) -> "MessageBuilder":
|
|
113
|
+
"""
|
|
114
|
+
Add conversation history.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
history: List of {role, content} dicts
|
|
118
|
+
max_messages: Optional limit on number of messages
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Self for chaining
|
|
122
|
+
"""
|
|
123
|
+
if max_messages:
|
|
124
|
+
history = history[-max_messages:]
|
|
125
|
+
|
|
126
|
+
for msg in history:
|
|
127
|
+
self.add_message(msg.get("role", "user"), msg.get("content", ""))
|
|
128
|
+
|
|
129
|
+
return self
|
|
130
|
+
|
|
131
|
+
def clear(self) -> "MessageBuilder":
|
|
132
|
+
"""
|
|
133
|
+
Clear all messages.
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
Self for chaining
|
|
137
|
+
"""
|
|
138
|
+
self.messages.clear()
|
|
139
|
+
return self
|
|
140
|
+
|
|
141
|
+
def build(self) -> List[LLMMessage]:
|
|
142
|
+
"""
|
|
143
|
+
Build and return message list.
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
List of LLMMessage instances
|
|
147
|
+
"""
|
|
148
|
+
return self.messages.copy()
|
|
149
|
+
|
|
150
|
+
def _format_context(self, context: Dict[str, Any]) -> str:
|
|
151
|
+
"""Format context dictionary as string."""
|
|
152
|
+
lines = []
|
|
153
|
+
for key, value in context.items():
|
|
154
|
+
if not key.startswith('_') and value is not None:
|
|
155
|
+
lines.append(f"{key}: {value}")
|
|
156
|
+
return "\n".join(lines) if lines else ""
|
|
157
|
+
|
|
158
|
+
def __len__(self) -> int:
|
|
159
|
+
"""Get number of messages."""
|
|
160
|
+
return len(self.messages)
|
|
161
|
+
|
|
162
|
+
def __repr__(self) -> str:
|
|
163
|
+
return f"MessageBuilder(messages={len(self.messages)})"
|
|
164
|
+
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Prompt Formatters
|
|
3
|
+
|
|
4
|
+
Utilities for formatting prompts with context.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import List, Dict, Any, Optional
|
|
8
|
+
from aiecs.llm import LLMMessage
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def format_conversation_history(
|
|
12
|
+
history: List[LLMMessage],
|
|
13
|
+
max_messages: Optional[int] = None,
|
|
14
|
+
format_style: str = "compact"
|
|
15
|
+
) -> str:
|
|
16
|
+
"""
|
|
17
|
+
Format conversation history as string.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
history: List of LLMMessage instances
|
|
21
|
+
max_messages: Optional limit on number of messages
|
|
22
|
+
format_style: "compact" or "detailed"
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
Formatted conversation string
|
|
26
|
+
"""
|
|
27
|
+
if max_messages:
|
|
28
|
+
history = history[-max_messages:]
|
|
29
|
+
|
|
30
|
+
if format_style == "compact":
|
|
31
|
+
lines = []
|
|
32
|
+
for msg in history:
|
|
33
|
+
lines.append(f"{msg.role.upper()}: {msg.content}")
|
|
34
|
+
return "\n".join(lines)
|
|
35
|
+
|
|
36
|
+
elif format_style == "detailed":
|
|
37
|
+
lines = []
|
|
38
|
+
for i, msg in enumerate(history):
|
|
39
|
+
lines.append(f"[{i+1}] {msg.role.upper()}")
|
|
40
|
+
lines.append(msg.content)
|
|
41
|
+
lines.append("") # Empty line between messages
|
|
42
|
+
return "\n".join(lines)
|
|
43
|
+
|
|
44
|
+
else:
|
|
45
|
+
raise ValueError(f"Unknown format_style: {format_style}")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def format_tool_result(
|
|
49
|
+
tool_name: str,
|
|
50
|
+
result: Any,
|
|
51
|
+
success: bool = True,
|
|
52
|
+
error: Optional[str] = None
|
|
53
|
+
) -> str:
|
|
54
|
+
"""
|
|
55
|
+
Format tool execution result.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
tool_name: Tool name
|
|
59
|
+
result: Tool result (if successful)
|
|
60
|
+
success: Whether execution succeeded
|
|
61
|
+
error: Error message (if failed)
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Formatted tool result string
|
|
65
|
+
"""
|
|
66
|
+
if success:
|
|
67
|
+
return f"Tool '{tool_name}' returned:\n{result}"
|
|
68
|
+
else:
|
|
69
|
+
return f"Tool '{tool_name}' failed: {error}"
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def truncate_context(
|
|
73
|
+
text: str,
|
|
74
|
+
max_length: int,
|
|
75
|
+
strategy: str = "middle",
|
|
76
|
+
placeholder: str = "..."
|
|
77
|
+
) -> str:
|
|
78
|
+
"""
|
|
79
|
+
Truncate text to fit within max_length.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
text: Text to truncate
|
|
83
|
+
max_length: Maximum length
|
|
84
|
+
strategy: "start", "middle", or "end"
|
|
85
|
+
placeholder: Placeholder for truncated content
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Truncated text
|
|
89
|
+
"""
|
|
90
|
+
if len(text) <= max_length:
|
|
91
|
+
return text
|
|
92
|
+
|
|
93
|
+
if strategy == "end":
|
|
94
|
+
# Keep start, truncate end
|
|
95
|
+
return text[:max_length - len(placeholder)] + placeholder
|
|
96
|
+
|
|
97
|
+
elif strategy == "start":
|
|
98
|
+
# Truncate start, keep end
|
|
99
|
+
return placeholder + text[-(max_length - len(placeholder)):]
|
|
100
|
+
|
|
101
|
+
elif strategy == "middle":
|
|
102
|
+
# Keep start and end, truncate middle
|
|
103
|
+
half = (max_length - len(placeholder)) // 2
|
|
104
|
+
return text[:half] + placeholder + text[-half:]
|
|
105
|
+
|
|
106
|
+
else:
|
|
107
|
+
raise ValueError(f"Unknown strategy: {strategy}")
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def format_list_items(items: List[str], style: str = "bullets") -> str:
|
|
111
|
+
"""
|
|
112
|
+
Format list items.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
items: List of items
|
|
116
|
+
style: "bullets", "numbered", or "compact"
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
Formatted list string
|
|
120
|
+
"""
|
|
121
|
+
if style == "bullets":
|
|
122
|
+
return "\n".join(f"• {item}" for item in items)
|
|
123
|
+
|
|
124
|
+
elif style == "numbered":
|
|
125
|
+
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
|
|
126
|
+
|
|
127
|
+
elif style == "compact":
|
|
128
|
+
return ", ".join(items)
|
|
129
|
+
|
|
130
|
+
else:
|
|
131
|
+
raise ValueError(f"Unknown style: {style}")
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def format_key_value_pairs(
|
|
135
|
+
data: Dict[str, Any],
|
|
136
|
+
separator: str = ": ",
|
|
137
|
+
exclude_keys: Optional[List[str]] = None
|
|
138
|
+
) -> str:
|
|
139
|
+
"""
|
|
140
|
+
Format dictionary as key-value pairs.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
data: Dictionary to format
|
|
144
|
+
separator: Separator between key and value
|
|
145
|
+
exclude_keys: Keys to exclude
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
Formatted string
|
|
149
|
+
"""
|
|
150
|
+
exclude_keys = exclude_keys or []
|
|
151
|
+
lines = []
|
|
152
|
+
|
|
153
|
+
for key, value in data.items():
|
|
154
|
+
if key in exclude_keys or key.startswith('_'):
|
|
155
|
+
continue
|
|
156
|
+
lines.append(f"{key}{separator}{value}")
|
|
157
|
+
|
|
158
|
+
return "\n".join(lines)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def inject_context_in_prompt(
|
|
162
|
+
prompt: str,
|
|
163
|
+
context: Dict[str, Any],
|
|
164
|
+
context_marker: str = "{context}"
|
|
165
|
+
) -> str:
|
|
166
|
+
"""
|
|
167
|
+
Inject context into prompt at marker position.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
prompt: Prompt template with context marker
|
|
171
|
+
context: Context dictionary
|
|
172
|
+
context_marker: Marker to replace with context
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
Prompt with context injected
|
|
176
|
+
"""
|
|
177
|
+
context_str = format_key_value_pairs(context)
|
|
178
|
+
return prompt.replace(context_marker, context_str)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def estimate_token_count(text: str) -> int:
|
|
182
|
+
"""
|
|
183
|
+
Rough estimation of token count.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
text: Text to estimate
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
Estimated token count (4 chars ≈ 1 token)
|
|
190
|
+
"""
|
|
191
|
+
return len(text) // 4
|
|
192
|
+
|