webagents 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webagents/__init__.py +18 -0
- webagents/__main__.py +55 -0
- webagents/agents/__init__.py +13 -0
- webagents/agents/core/__init__.py +19 -0
- webagents/agents/core/base_agent.py +1834 -0
- webagents/agents/core/handoffs.py +293 -0
- webagents/agents/handoffs/__init__.py +0 -0
- webagents/agents/interfaces/__init__.py +0 -0
- webagents/agents/lifecycle/__init__.py +0 -0
- webagents/agents/skills/__init__.py +109 -0
- webagents/agents/skills/base.py +136 -0
- webagents/agents/skills/core/__init__.py +8 -0
- webagents/agents/skills/core/guardrails/__init__.py +0 -0
- webagents/agents/skills/core/llm/__init__.py +0 -0
- webagents/agents/skills/core/llm/anthropic/__init__.py +1 -0
- webagents/agents/skills/core/llm/litellm/__init__.py +10 -0
- webagents/agents/skills/core/llm/litellm/skill.py +538 -0
- webagents/agents/skills/core/llm/openai/__init__.py +1 -0
- webagents/agents/skills/core/llm/xai/__init__.py +1 -0
- webagents/agents/skills/core/mcp/README.md +375 -0
- webagents/agents/skills/core/mcp/__init__.py +15 -0
- webagents/agents/skills/core/mcp/skill.py +731 -0
- webagents/agents/skills/core/memory/__init__.py +11 -0
- webagents/agents/skills/core/memory/long_term_memory/__init__.py +10 -0
- webagents/agents/skills/core/memory/long_term_memory/memory_skill.py +639 -0
- webagents/agents/skills/core/memory/short_term_memory/__init__.py +9 -0
- webagents/agents/skills/core/memory/short_term_memory/skill.py +341 -0
- webagents/agents/skills/core/memory/vector_memory/skill.py +447 -0
- webagents/agents/skills/core/planning/__init__.py +9 -0
- webagents/agents/skills/core/planning/planner.py +343 -0
- webagents/agents/skills/ecosystem/__init__.py +0 -0
- webagents/agents/skills/ecosystem/crewai/__init__.py +1 -0
- webagents/agents/skills/ecosystem/database/__init__.py +1 -0
- webagents/agents/skills/ecosystem/filesystem/__init__.py +0 -0
- webagents/agents/skills/ecosystem/google/__init__.py +0 -0
- webagents/agents/skills/ecosystem/google/calendar/__init__.py +6 -0
- webagents/agents/skills/ecosystem/google/calendar/skill.py +306 -0
- webagents/agents/skills/ecosystem/n8n/__init__.py +0 -0
- webagents/agents/skills/ecosystem/openai_agents/__init__.py +0 -0
- webagents/agents/skills/ecosystem/web/__init__.py +0 -0
- webagents/agents/skills/ecosystem/zapier/__init__.py +0 -0
- webagents/agents/skills/robutler/__init__.py +11 -0
- webagents/agents/skills/robutler/auth/README.md +63 -0
- webagents/agents/skills/robutler/auth/__init__.py +17 -0
- webagents/agents/skills/robutler/auth/skill.py +354 -0
- webagents/agents/skills/robutler/crm/__init__.py +18 -0
- webagents/agents/skills/robutler/crm/skill.py +368 -0
- webagents/agents/skills/robutler/discovery/README.md +281 -0
- webagents/agents/skills/robutler/discovery/__init__.py +16 -0
- webagents/agents/skills/robutler/discovery/skill.py +230 -0
- webagents/agents/skills/robutler/kv/__init__.py +6 -0
- webagents/agents/skills/robutler/kv/skill.py +80 -0
- webagents/agents/skills/robutler/message_history/__init__.py +9 -0
- webagents/agents/skills/robutler/message_history/skill.py +270 -0
- webagents/agents/skills/robutler/messages/__init__.py +0 -0
- webagents/agents/skills/robutler/nli/__init__.py +13 -0
- webagents/agents/skills/robutler/nli/skill.py +687 -0
- webagents/agents/skills/robutler/notifications/__init__.py +5 -0
- webagents/agents/skills/robutler/notifications/skill.py +141 -0
- webagents/agents/skills/robutler/payments/__init__.py +41 -0
- webagents/agents/skills/robutler/payments/exceptions.py +255 -0
- webagents/agents/skills/robutler/payments/skill.py +610 -0
- webagents/agents/skills/robutler/storage/__init__.py +10 -0
- webagents/agents/skills/robutler/storage/files/__init__.py +9 -0
- webagents/agents/skills/robutler/storage/files/skill.py +445 -0
- webagents/agents/skills/robutler/storage/json/__init__.py +9 -0
- webagents/agents/skills/robutler/storage/json/skill.py +336 -0
- webagents/agents/skills/robutler/storage/kv/skill.py +88 -0
- webagents/agents/skills/robutler/storage.py +389 -0
- webagents/agents/tools/__init__.py +0 -0
- webagents/agents/tools/decorators.py +426 -0
- webagents/agents/tracing/__init__.py +0 -0
- webagents/agents/workflows/__init__.py +0 -0
- webagents/scripts/__init__.py +0 -0
- webagents/server/__init__.py +28 -0
- webagents/server/context/__init__.py +0 -0
- webagents/server/context/context_vars.py +121 -0
- webagents/server/core/__init__.py +0 -0
- webagents/server/core/app.py +843 -0
- webagents/server/core/middleware.py +69 -0
- webagents/server/core/models.py +98 -0
- webagents/server/core/monitoring.py +59 -0
- webagents/server/endpoints/__init__.py +0 -0
- webagents/server/interfaces/__init__.py +0 -0
- webagents/server/middleware.py +330 -0
- webagents/server/models.py +92 -0
- webagents/server/monitoring.py +659 -0
- webagents/utils/__init__.py +0 -0
- webagents/utils/logging.py +359 -0
- webagents-0.1.0.dist-info/METADATA +230 -0
- webagents-0.1.0.dist-info/RECORD +94 -0
- webagents-0.1.0.dist-info/WHEEL +4 -0
- webagents-0.1.0.dist-info/entry_points.txt +2 -0
- webagents-0.1.0.dist-info/licenses/LICENSE +20 -0
@@ -0,0 +1,341 @@
|
|
1
|
+
"""
|
2
|
+
Short-term Memory Skill - WebAgents V2.0
|
3
|
+
|
4
|
+
Manages conversation context, message filtering, and immediate memory operations.
|
5
|
+
Handles recent message history, context windowing, and conversation summarization.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import time
|
9
|
+
from typing import Dict, Any, List, Optional
|
10
|
+
from collections import deque
|
11
|
+
from dataclasses import dataclass
|
12
|
+
|
13
|
+
from ....base import Skill
|
14
|
+
from .....tools.decorators import tool, hook
|
15
|
+
from webagents.utils.logging import get_logger, log_skill_event, timer
|
16
|
+
|
17
|
+
|
18
|
+
@dataclass
|
19
|
+
class MessageContext:
|
20
|
+
"""Represents a message with context metadata"""
|
21
|
+
role: str
|
22
|
+
content: str
|
23
|
+
timestamp: float
|
24
|
+
token_count: Optional[int] = None
|
25
|
+
importance: float = 1.0 # 0.0 to 1.0, higher = more important
|
26
|
+
metadata: Dict[str, Any] = None
|
27
|
+
|
28
|
+
def __post_init__(self):
|
29
|
+
if self.metadata is None:
|
30
|
+
self.metadata = {}
|
31
|
+
|
32
|
+
|
33
|
+
class ShortTermMemorySkill(Skill):
|
34
|
+
"""
|
35
|
+
Short-term memory skill for conversation context and message filtering
|
36
|
+
|
37
|
+
Features:
|
38
|
+
- Message history management with configurable window size
|
39
|
+
- Intelligent message filtering and prioritization
|
40
|
+
- Context summarization for long conversations
|
41
|
+
- Token count tracking and optimization
|
42
|
+
- Conversation state management
|
43
|
+
"""
|
44
|
+
|
45
|
+
def __init__(self, config: Dict[str, Any] = None):
|
46
|
+
super().__init__(config, scope="all")
|
47
|
+
|
48
|
+
# Configuration
|
49
|
+
self.max_messages = config.get('max_messages', 50) if config else 50
|
50
|
+
self.max_tokens = config.get('max_tokens', 4000) if config else 4000
|
51
|
+
self.importance_threshold = config.get('importance_threshold', 0.3) if config else 0.3
|
52
|
+
|
53
|
+
# Message storage
|
54
|
+
self.message_history = deque(maxlen=self.max_messages)
|
55
|
+
self.conversation_summary = ""
|
56
|
+
self.total_tokens = 0
|
57
|
+
|
58
|
+
# State tracking
|
59
|
+
self.conversation_id = None
|
60
|
+
self.last_activity = time.time()
|
61
|
+
|
62
|
+
async def initialize(self, agent: 'BaseAgent') -> None:
|
63
|
+
"""Initialize short-term memory skill"""
|
64
|
+
from webagents.utils.logging import get_logger, log_skill_event
|
65
|
+
|
66
|
+
self.agent = agent
|
67
|
+
self.logger = get_logger('skill.memory.short_term', agent.name)
|
68
|
+
|
69
|
+
log_skill_event(agent.name, 'short_term_memory', 'initialized', {
|
70
|
+
'max_messages': self.max_messages,
|
71
|
+
'max_tokens': self.max_tokens,
|
72
|
+
'importance_threshold': self.importance_threshold
|
73
|
+
})
|
74
|
+
|
75
|
+
@hook("on_connection", priority=15)
|
76
|
+
async def setup_memory_context(self, context) -> Any:
|
77
|
+
"""Setup memory context for new connections"""
|
78
|
+
self.logger.debug("Setting up memory context for new connection")
|
79
|
+
|
80
|
+
# Initialize conversation tracking
|
81
|
+
self.conversation_id = context.get("request_id")
|
82
|
+
self.last_activity = time.time()
|
83
|
+
|
84
|
+
# Store memory state in context for other skills to access
|
85
|
+
context.set("memory_state", {
|
86
|
+
"conversation_id": self.conversation_id,
|
87
|
+
"message_count": len(self.message_history),
|
88
|
+
"total_tokens": self.total_tokens
|
89
|
+
})
|
90
|
+
|
91
|
+
return context
|
92
|
+
|
93
|
+
@hook("on_message", priority=20)
|
94
|
+
async def process_message_memory(self, context) -> Any:
|
95
|
+
"""Process and store new messages in memory"""
|
96
|
+
messages = context.get("messages", [])
|
97
|
+
|
98
|
+
if not messages:
|
99
|
+
return context
|
100
|
+
|
101
|
+
# Process the latest message
|
102
|
+
latest_message = messages[-1]
|
103
|
+
|
104
|
+
with timer("message_processing", self.agent.name):
|
105
|
+
await self._add_message_to_memory(
|
106
|
+
role=latest_message.get("role", "user"),
|
107
|
+
content=latest_message.get("content", ""),
|
108
|
+
metadata={"source": "conversation"}
|
109
|
+
)
|
110
|
+
|
111
|
+
# Update context with memory information
|
112
|
+
context.set("memory_stats", {
|
113
|
+
"messages_stored": len(self.message_history),
|
114
|
+
"total_tokens": self.total_tokens,
|
115
|
+
"last_activity": self.last_activity
|
116
|
+
})
|
117
|
+
|
118
|
+
return context
|
119
|
+
|
120
|
+
@tool(description="Add a message to short-term memory with importance weighting")
|
121
|
+
async def add_message(self, role: str, content: str, importance: float = 1.0,
|
122
|
+
metadata: Optional[Dict[str, Any]] = None, context=None) -> str:
|
123
|
+
"""Add a message to short-term memory"""
|
124
|
+
|
125
|
+
await self._add_message_to_memory(role, content, importance, metadata or {})
|
126
|
+
|
127
|
+
if context:
|
128
|
+
context.track_usage(1, "short_term_memory_storage")
|
129
|
+
|
130
|
+
return f"Message stored in short-term memory (importance: {importance})"
|
131
|
+
|
132
|
+
@tool(description="Retrieve recent conversation history")
|
133
|
+
async def get_recent_messages(self, count: int = 10, min_importance: float = 0.0,
|
134
|
+
context=None) -> List[Dict[str, Any]]:
|
135
|
+
"""Retrieve recent messages from short-term memory"""
|
136
|
+
|
137
|
+
# Filter messages by importance and recency
|
138
|
+
filtered_messages = [
|
139
|
+
{
|
140
|
+
"role": msg.role,
|
141
|
+
"content": msg.content,
|
142
|
+
"timestamp": msg.timestamp,
|
143
|
+
"importance": msg.importance,
|
144
|
+
"metadata": msg.metadata
|
145
|
+
}
|
146
|
+
for msg in list(self.message_history)[-count:]
|
147
|
+
if msg.importance >= min_importance
|
148
|
+
]
|
149
|
+
|
150
|
+
self.logger.info(f"Retrieved {len(filtered_messages)} recent messages")
|
151
|
+
|
152
|
+
if context:
|
153
|
+
context.track_usage(0.5, "short_term_memory_retrieval")
|
154
|
+
|
155
|
+
return filtered_messages
|
156
|
+
|
157
|
+
@tool(description="Get conversation summary for context compression")
|
158
|
+
async def get_conversation_summary(self, context=None) -> str:
|
159
|
+
"""Get a summary of the current conversation"""
|
160
|
+
|
161
|
+
if not self.conversation_summary and len(self.message_history) > 5:
|
162
|
+
# Generate summary from recent messages
|
163
|
+
with timer("conversation_summarization", self.agent.name):
|
164
|
+
await self._generate_summary()
|
165
|
+
|
166
|
+
if context:
|
167
|
+
context.track_usage(2, "conversation_summarization")
|
168
|
+
|
169
|
+
return self.conversation_summary or "No conversation summary available"
|
170
|
+
|
171
|
+
@tool(description="Clear short-term memory and start fresh")
|
172
|
+
async def clear_memory(self, keep_summary: bool = True, context=None) -> str:
|
173
|
+
"""Clear short-term memory"""
|
174
|
+
|
175
|
+
messages_cleared = len(self.message_history)
|
176
|
+
|
177
|
+
if not keep_summary:
|
178
|
+
self.conversation_summary = ""
|
179
|
+
|
180
|
+
self.message_history.clear()
|
181
|
+
self.total_tokens = 0
|
182
|
+
self.last_activity = time.time()
|
183
|
+
|
184
|
+
self.logger.info(f"Cleared {messages_cleared} messages from short-term memory")
|
185
|
+
|
186
|
+
if context:
|
187
|
+
context.track_usage(1, "memory_clearing")
|
188
|
+
|
189
|
+
return f"Cleared {messages_cleared} messages from short-term memory"
|
190
|
+
|
191
|
+
@tool(description="Get memory statistics and health info")
|
192
|
+
async def get_memory_stats(self, context=None) -> Dict[str, Any]:
|
193
|
+
"""Get current memory statistics"""
|
194
|
+
|
195
|
+
stats = {
|
196
|
+
"message_count": len(self.message_history),
|
197
|
+
"total_tokens": self.total_tokens,
|
198
|
+
"max_messages": self.max_messages,
|
199
|
+
"max_tokens": self.max_tokens,
|
200
|
+
"memory_utilization": len(self.message_history) / self.max_messages,
|
201
|
+
"token_utilization": self.total_tokens / self.max_tokens,
|
202
|
+
"conversation_id": self.conversation_id,
|
203
|
+
"last_activity": self.last_activity,
|
204
|
+
"has_summary": bool(self.conversation_summary)
|
205
|
+
}
|
206
|
+
|
207
|
+
if context:
|
208
|
+
context.track_usage(0.1, "memory_stats_retrieval")
|
209
|
+
|
210
|
+
return stats
|
211
|
+
|
212
|
+
# Private helper methods
|
213
|
+
|
214
|
+
async def _add_message_to_memory(self, role: str, content: str,
|
215
|
+
importance: float = 1.0, metadata: Dict[str, Any] = None):
|
216
|
+
"""Internal method to add message to memory"""
|
217
|
+
|
218
|
+
# Estimate token count (rough approximation)
|
219
|
+
token_count = len(content.split()) * 1.3 # Rough tokens per word
|
220
|
+
|
221
|
+
message = MessageContext(
|
222
|
+
role=role,
|
223
|
+
content=content,
|
224
|
+
timestamp=time.time(),
|
225
|
+
token_count=int(token_count),
|
226
|
+
importance=importance,
|
227
|
+
metadata=metadata or {}
|
228
|
+
)
|
229
|
+
|
230
|
+
# Add to memory
|
231
|
+
self.message_history.append(message)
|
232
|
+
self.total_tokens += message.token_count
|
233
|
+
self.last_activity = time.time()
|
234
|
+
|
235
|
+
# Check if we need to compress memory
|
236
|
+
if self._needs_compression():
|
237
|
+
await self._compress_memory()
|
238
|
+
|
239
|
+
self.logger.debug(f"Added message to memory: {len(content)} chars, {token_count} tokens")
|
240
|
+
|
241
|
+
def _needs_compression(self) -> bool:
|
242
|
+
"""Check if memory needs compression"""
|
243
|
+
return (
|
244
|
+
len(self.message_history) >= self.max_messages * 0.9 or
|
245
|
+
self.total_tokens >= self.max_tokens * 0.9
|
246
|
+
)
|
247
|
+
|
248
|
+
async def _compress_memory(self):
|
249
|
+
"""Compress memory by removing less important messages"""
|
250
|
+
|
251
|
+
self.logger.info("Compressing short-term memory")
|
252
|
+
|
253
|
+
# Convert to list for easier manipulation
|
254
|
+
messages = list(self.message_history)
|
255
|
+
|
256
|
+
# Sort by importance (keep most important)
|
257
|
+
messages.sort(key=lambda m: m.importance, reverse=True)
|
258
|
+
|
259
|
+
# Keep top 70% by importance, but always keep recent messages
|
260
|
+
keep_count = int(self.max_messages * 0.7)
|
261
|
+
recent_count = min(10, len(messages) // 4)
|
262
|
+
|
263
|
+
# Always keep recent messages regardless of importance
|
264
|
+
recent_messages = list(self.message_history)[-recent_count:]
|
265
|
+
important_messages = messages[:keep_count - recent_count]
|
266
|
+
|
267
|
+
# Combine and remove duplicates
|
268
|
+
kept_messages = []
|
269
|
+
seen_content = set()
|
270
|
+
|
271
|
+
for msg in important_messages + recent_messages:
|
272
|
+
if msg.content not in seen_content:
|
273
|
+
kept_messages.append(msg)
|
274
|
+
seen_content.add(msg.content)
|
275
|
+
|
276
|
+
# Update memory
|
277
|
+
self.message_history.clear()
|
278
|
+
self.message_history.extend(kept_messages)
|
279
|
+
|
280
|
+
# Recalculate token count
|
281
|
+
self.total_tokens = sum(msg.token_count for msg in self.message_history if msg.token_count)
|
282
|
+
|
283
|
+
self.logger.info(f"Memory compressed: kept {len(kept_messages)} messages, {self.total_tokens} tokens")
|
284
|
+
|
285
|
+
async def _generate_summary(self):
|
286
|
+
"""Generate a summary of the conversation"""
|
287
|
+
|
288
|
+
if len(self.message_history) < 3:
|
289
|
+
return
|
290
|
+
|
291
|
+
# Simple extractive summarization (in production, could use LLM)
|
292
|
+
messages = list(self.message_history)
|
293
|
+
|
294
|
+
# Get key messages based on importance and recency
|
295
|
+
key_messages = [
|
296
|
+
msg for msg in messages
|
297
|
+
if msg.importance > 0.7 or msg in messages[-5:]
|
298
|
+
]
|
299
|
+
|
300
|
+
if key_messages:
|
301
|
+
summary_parts = []
|
302
|
+
current_topic = ""
|
303
|
+
|
304
|
+
for msg in key_messages[-10:]: # Last 10 key messages
|
305
|
+
if len(msg.content) > 20: # Ignore very short messages
|
306
|
+
summary_parts.append(f"{msg.role}: {msg.content[:100]}...")
|
307
|
+
|
308
|
+
self.conversation_summary = "\n".join(summary_parts)
|
309
|
+
self.logger.debug(f"Generated conversation summary: {len(self.conversation_summary)} chars")
|
310
|
+
|
311
|
+
# Context integration methods
|
312
|
+
|
313
|
+
def get_context_messages(self, max_tokens: int = None) -> List[Dict[str, Any]]:
|
314
|
+
"""Get messages formatted for LLM context"""
|
315
|
+
|
316
|
+
target_tokens = max_tokens or self.max_tokens
|
317
|
+
messages = []
|
318
|
+
current_tokens = 0
|
319
|
+
|
320
|
+
# Add messages from newest to oldest until we hit token limit
|
321
|
+
for message in reversed(self.message_history):
|
322
|
+
if current_tokens + message.token_count <= target_tokens:
|
323
|
+
messages.append({
|
324
|
+
"role": message.role,
|
325
|
+
"content": message.content
|
326
|
+
})
|
327
|
+
current_tokens += message.token_count
|
328
|
+
else:
|
329
|
+
break
|
330
|
+
|
331
|
+
# Reverse to get chronological order
|
332
|
+
messages.reverse()
|
333
|
+
|
334
|
+
# If we have a summary and not many messages fit, prepend summary
|
335
|
+
if len(messages) < 3 and self.conversation_summary:
|
336
|
+
messages.insert(0, {
|
337
|
+
"role": "system",
|
338
|
+
"content": f"Previous conversation summary: {self.conversation_summary}"
|
339
|
+
})
|
340
|
+
|
341
|
+
return messages
|