swarms 7.8.4__py3-none-any.whl → 7.8.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swarms/agents/ape_agent.py +5 -22
- swarms/agents/consistency_agent.py +1 -1
- swarms/agents/i_agent.py +1 -1
- swarms/agents/reasoning_agents.py +99 -3
- swarms/agents/reasoning_duo.py +1 -1
- swarms/cli/main.py +1 -1
- swarms/communication/__init__.py +1 -0
- swarms/communication/duckdb_wrap.py +32 -2
- swarms/communication/pulsar_struct.py +45 -19
- swarms/communication/redis_wrap.py +56 -11
- swarms/communication/supabase_wrap.py +1659 -0
- swarms/prompts/prompt.py +0 -3
- swarms/schemas/agent_completion_response.py +71 -0
- swarms/schemas/agent_rag_schema.py +7 -0
- swarms/schemas/conversation_schema.py +9 -0
- swarms/schemas/llm_agent_schema.py +99 -81
- swarms/schemas/swarms_api_schemas.py +164 -0
- swarms/structs/__init__.py +14 -11
- swarms/structs/agent.py +219 -199
- swarms/structs/agent_rag_handler.py +685 -0
- swarms/structs/base_swarm.py +2 -1
- swarms/structs/conversation.py +608 -87
- swarms/structs/csv_to_agent.py +153 -100
- swarms/structs/deep_research_swarm.py +197 -193
- swarms/structs/dynamic_conversational_swarm.py +18 -7
- swarms/structs/hiearchical_swarm.py +1 -1
- swarms/structs/hybrid_hiearchical_peer_swarm.py +2 -18
- swarms/structs/image_batch_processor.py +261 -0
- swarms/structs/interactive_groupchat.py +356 -0
- swarms/structs/ma_blocks.py +75 -0
- swarms/structs/majority_voting.py +1 -1
- swarms/structs/mixture_of_agents.py +1 -1
- swarms/structs/multi_agent_router.py +3 -2
- swarms/structs/rearrange.py +3 -3
- swarms/structs/sequential_workflow.py +3 -3
- swarms/structs/swarm_matcher.py +500 -411
- swarms/structs/swarm_router.py +15 -97
- swarms/structs/swarming_architectures.py +1 -1
- swarms/tools/mcp_client_call.py +3 -0
- swarms/utils/__init__.py +10 -2
- swarms/utils/check_all_model_max_tokens.py +43 -0
- swarms/utils/generate_keys.py +0 -27
- swarms/utils/history_output_formatter.py +5 -20
- swarms/utils/litellm_wrapper.py +208 -60
- swarms/utils/output_types.py +24 -0
- swarms/utils/vllm_wrapper.py +5 -6
- swarms/utils/xml_utils.py +37 -2
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/METADATA +31 -55
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/RECORD +53 -48
- swarms/structs/multi_agent_collab.py +0 -242
- swarms/structs/output_types.py +0 -6
- swarms/utils/markdown_message.py +0 -21
- swarms/utils/visualizer.py +0 -510
- swarms/utils/wrapper_clusterop.py +0 -127
- /swarms/{tools → schemas}/tool_schema_base_model.py +0 -0
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/LICENSE +0 -0
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/WHEEL +0 -0
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,685 @@
|
|
1
|
+
import time
|
2
|
+
from typing import Any, Dict, List, Optional
|
3
|
+
|
4
|
+
from loguru import logger
|
5
|
+
from swarms.utils.litellm_tokenizer import count_tokens
|
6
|
+
from pydantic import BaseModel, Field, field_validator
|
7
|
+
|
8
|
+
|
9
|
+
class RAGConfig(BaseModel):
|
10
|
+
"""Configuration class for RAG operations"""
|
11
|
+
|
12
|
+
similarity_threshold: float = Field(
|
13
|
+
default=0.7,
|
14
|
+
ge=0.0,
|
15
|
+
le=1.0,
|
16
|
+
description="Similarity threshold for memory retrieval",
|
17
|
+
)
|
18
|
+
max_results: int = Field(
|
19
|
+
default=5,
|
20
|
+
gt=0,
|
21
|
+
description="Maximum number of results to return from memory",
|
22
|
+
)
|
23
|
+
context_window_tokens: int = Field(
|
24
|
+
default=2000,
|
25
|
+
gt=0,
|
26
|
+
description="Maximum number of tokens in the context window",
|
27
|
+
)
|
28
|
+
auto_save_to_memory: bool = Field(
|
29
|
+
default=True,
|
30
|
+
description="Whether to automatically save responses to memory",
|
31
|
+
)
|
32
|
+
save_every_n_loops: int = Field(
|
33
|
+
default=5, gt=0, description="Save to memory every N loops"
|
34
|
+
)
|
35
|
+
min_content_length: int = Field(
|
36
|
+
default=50,
|
37
|
+
gt=0,
|
38
|
+
description="Minimum content length to save to memory",
|
39
|
+
)
|
40
|
+
query_every_loop: bool = Field(
|
41
|
+
default=False,
|
42
|
+
description="Whether to query memory every loop",
|
43
|
+
)
|
44
|
+
enable_conversation_summaries: bool = Field(
|
45
|
+
default=True,
|
46
|
+
description="Whether to enable conversation summaries",
|
47
|
+
)
|
48
|
+
relevance_keywords: Optional[List[str]] = Field(
|
49
|
+
default=None, description="Keywords to check for relevance"
|
50
|
+
)
|
51
|
+
|
52
|
+
@field_validator("relevance_keywords", mode="before")
|
53
|
+
def set_default_keywords(cls, v):
|
54
|
+
if v is None:
|
55
|
+
return [
|
56
|
+
"important",
|
57
|
+
"key",
|
58
|
+
"critical",
|
59
|
+
"summary",
|
60
|
+
"conclusion",
|
61
|
+
]
|
62
|
+
return v
|
63
|
+
|
64
|
+
class Config:
|
65
|
+
arbitrary_types_allowed = True
|
66
|
+
validate_assignment = True
|
67
|
+
json_schema_extra = {
|
68
|
+
"example": {
|
69
|
+
"similarity_threshold": 0.7,
|
70
|
+
"max_results": 5,
|
71
|
+
"context_window_tokens": 2000,
|
72
|
+
"auto_save_to_memory": True,
|
73
|
+
"save_every_n_loops": 5,
|
74
|
+
"min_content_length": 50,
|
75
|
+
"query_every_loop": False,
|
76
|
+
"enable_conversation_summaries": True,
|
77
|
+
"relevance_keywords": [
|
78
|
+
"important",
|
79
|
+
"key",
|
80
|
+
"critical",
|
81
|
+
"summary",
|
82
|
+
"conclusion",
|
83
|
+
],
|
84
|
+
}
|
85
|
+
}
|
86
|
+
|
87
|
+
|
88
|
+
class AgentRAGHandler:
|
89
|
+
"""
|
90
|
+
Handles all RAG (Retrieval-Augmented Generation) operations for agents.
|
91
|
+
Provides memory querying, storage, and context management capabilities.
|
92
|
+
"""
|
93
|
+
|
94
|
+
def __init__(
|
95
|
+
self,
|
96
|
+
long_term_memory: Optional[Any] = None,
|
97
|
+
config: Optional[RAGConfig] = None,
|
98
|
+
agent_name: str = "Unknown",
|
99
|
+
max_context_length: int = 158_000,
|
100
|
+
verbose: bool = False,
|
101
|
+
):
|
102
|
+
"""
|
103
|
+
Initialize the RAG handler.
|
104
|
+
|
105
|
+
Args:
|
106
|
+
long_term_memory: The long-term memory store (must implement add() and query() methods)
|
107
|
+
config: RAG configuration settings
|
108
|
+
agent_name: Name of the agent using this handler
|
109
|
+
verbose: Enable verbose logging
|
110
|
+
"""
|
111
|
+
self.long_term_memory = long_term_memory
|
112
|
+
self.config = config or RAGConfig()
|
113
|
+
self.agent_name = agent_name
|
114
|
+
self.verbose = verbose
|
115
|
+
self.max_context_length = max_context_length
|
116
|
+
|
117
|
+
self._loop_counter = 0
|
118
|
+
self._conversation_history = []
|
119
|
+
self._important_memories = []
|
120
|
+
|
121
|
+
# Validate memory interface
|
122
|
+
if (
|
123
|
+
self.long_term_memory
|
124
|
+
and not self._validate_memory_interface()
|
125
|
+
):
|
126
|
+
logger.warning(
|
127
|
+
"Long-term memory doesn't implement required interface"
|
128
|
+
)
|
129
|
+
|
130
|
+
def _validate_memory_interface(self) -> bool:
|
131
|
+
"""Validate that the memory object has required methods"""
|
132
|
+
required_methods = ["add", "query"]
|
133
|
+
for method in required_methods:
|
134
|
+
if not hasattr(self.long_term_memory, method):
|
135
|
+
logger.error(
|
136
|
+
f"Memory object missing required method: {method}"
|
137
|
+
)
|
138
|
+
return False
|
139
|
+
return True
|
140
|
+
|
141
|
+
def is_enabled(self) -> bool:
|
142
|
+
"""Check if RAG is enabled (has valid memory store)"""
|
143
|
+
return self.long_term_memory is not None
|
144
|
+
|
145
|
+
def query_memory(
|
146
|
+
self,
|
147
|
+
query: str,
|
148
|
+
context_type: str = "general",
|
149
|
+
loop_count: Optional[int] = None,
|
150
|
+
) -> str:
|
151
|
+
"""
|
152
|
+
Query the long-term memory and return formatted context.
|
153
|
+
|
154
|
+
Args:
|
155
|
+
query: The query string to search for
|
156
|
+
context_type: Type of context being queried (for logging)
|
157
|
+
loop_count: Current loop number (for logging)
|
158
|
+
|
159
|
+
Returns:
|
160
|
+
Formatted string of relevant memories, empty string if no results
|
161
|
+
"""
|
162
|
+
if not self.is_enabled():
|
163
|
+
return ""
|
164
|
+
|
165
|
+
try:
|
166
|
+
if self.verbose:
|
167
|
+
logger.info(
|
168
|
+
f"🔍 [{self.agent_name}] Querying RAG for {context_type}: {query[:100]}..."
|
169
|
+
)
|
170
|
+
|
171
|
+
# Query the memory store
|
172
|
+
results = self.long_term_memory.query(
|
173
|
+
query=query,
|
174
|
+
top_k=self.config.max_results,
|
175
|
+
similarity_threshold=self.config.similarity_threshold,
|
176
|
+
)
|
177
|
+
|
178
|
+
if not results:
|
179
|
+
if self.verbose:
|
180
|
+
logger.info(
|
181
|
+
f"No relevant memories found for query: {context_type}"
|
182
|
+
)
|
183
|
+
return ""
|
184
|
+
|
185
|
+
# Format results for context
|
186
|
+
formatted_context = self._format_memory_results(
|
187
|
+
results, context_type, loop_count
|
188
|
+
)
|
189
|
+
|
190
|
+
# Ensure context fits within token limits
|
191
|
+
if (
|
192
|
+
count_tokens(formatted_context)
|
193
|
+
> self.config.context_window_tokens
|
194
|
+
):
|
195
|
+
formatted_context = self._truncate_context(
|
196
|
+
formatted_context
|
197
|
+
)
|
198
|
+
|
199
|
+
if self.verbose:
|
200
|
+
logger.info(
|
201
|
+
f"✅ Retrieved {len(results)} relevant memories for {context_type}"
|
202
|
+
)
|
203
|
+
|
204
|
+
return formatted_context
|
205
|
+
|
206
|
+
except Exception as e:
|
207
|
+
logger.error(f"Error querying long-term memory: {e}")
|
208
|
+
return ""
|
209
|
+
|
210
|
+
def _format_memory_results(
|
211
|
+
self,
|
212
|
+
results: List[Any],
|
213
|
+
context_type: str,
|
214
|
+
loop_count: Optional[int] = None,
|
215
|
+
) -> str:
|
216
|
+
"""Format memory results into a structured context string"""
|
217
|
+
if not results:
|
218
|
+
return ""
|
219
|
+
|
220
|
+
loop_info = f" (Loop {loop_count})" if loop_count else ""
|
221
|
+
header = (
|
222
|
+
f"📚 Relevant Knowledge - {context_type.title()}{loop_info}:\n"
|
223
|
+
+ "=" * 50
|
224
|
+
+ "\n"
|
225
|
+
)
|
226
|
+
|
227
|
+
formatted_sections = [header]
|
228
|
+
|
229
|
+
for i, result in enumerate(results, 1):
|
230
|
+
(
|
231
|
+
content,
|
232
|
+
score,
|
233
|
+
source,
|
234
|
+
metadata,
|
235
|
+
) = self._extract_result_fields(result)
|
236
|
+
|
237
|
+
section = f"""
|
238
|
+
[Memory {i}] Relevance: {score} | Source: {source}
|
239
|
+
{'-' * 40}
|
240
|
+
{content}
|
241
|
+
{'-' * 40}
|
242
|
+
"""
|
243
|
+
formatted_sections.append(section)
|
244
|
+
|
245
|
+
formatted_sections.append(f"\n{'='*50}\n")
|
246
|
+
return "\n".join(formatted_sections)
|
247
|
+
|
248
|
+
def _extract_result_fields(self, result: Any) -> tuple:
|
249
|
+
"""Extract content, score, source, and metadata from a result object"""
|
250
|
+
if isinstance(result, dict):
|
251
|
+
content = result.get(
|
252
|
+
"content", result.get("text", str(result))
|
253
|
+
)
|
254
|
+
score = result.get(
|
255
|
+
"score", result.get("similarity", "N/A")
|
256
|
+
)
|
257
|
+
metadata = result.get("metadata", {})
|
258
|
+
source = metadata.get(
|
259
|
+
"source", result.get("source", "Unknown")
|
260
|
+
)
|
261
|
+
else:
|
262
|
+
content = str(result)
|
263
|
+
score = "N/A"
|
264
|
+
source = "Unknown"
|
265
|
+
metadata = {}
|
266
|
+
|
267
|
+
return content, score, source, metadata
|
268
|
+
|
269
|
+
def _truncate_context(self, content: str) -> str:
|
270
|
+
"""Truncate content to fit within token limits using smart truncation"""
|
271
|
+
max_chars = (
|
272
|
+
self.config.context_window_tokens * 3
|
273
|
+
) # Rough token-to-char ratio
|
274
|
+
|
275
|
+
if len(content) <= max_chars:
|
276
|
+
return content
|
277
|
+
|
278
|
+
# Try to cut at section boundaries first
|
279
|
+
sections = content.split("=" * 50)
|
280
|
+
if len(sections) > 2: # Header + sections + footer
|
281
|
+
truncated_sections = [sections[0]] # Keep header
|
282
|
+
current_length = len(sections[0])
|
283
|
+
|
284
|
+
for section in sections[1:-1]: # Skip footer
|
285
|
+
if current_length + len(section) > max_chars * 0.9:
|
286
|
+
break
|
287
|
+
truncated_sections.append(section)
|
288
|
+
current_length += len(section)
|
289
|
+
|
290
|
+
truncated_sections.append(
|
291
|
+
f"\n[... {len(sections) - len(truncated_sections)} more memories truncated for length ...]\n"
|
292
|
+
)
|
293
|
+
truncated_sections.append(sections[-1]) # Keep footer
|
294
|
+
return "=" * (50).join(truncated_sections)
|
295
|
+
|
296
|
+
# Fallback: simple truncation at sentence boundary
|
297
|
+
truncated = content[:max_chars]
|
298
|
+
last_period = truncated.rfind(".")
|
299
|
+
if last_period > max_chars * 0.8:
|
300
|
+
truncated = truncated[: last_period + 1]
|
301
|
+
|
302
|
+
return (
|
303
|
+
truncated + "\n\n[... content truncated for length ...]"
|
304
|
+
)
|
305
|
+
|
306
|
+
def should_save_response(
|
307
|
+
self,
|
308
|
+
response: str,
|
309
|
+
loop_count: int,
|
310
|
+
has_tool_usage: bool = False,
|
311
|
+
) -> bool:
|
312
|
+
"""
|
313
|
+
Determine if a response should be saved to long-term memory.
|
314
|
+
|
315
|
+
Args:
|
316
|
+
response: The response text to evaluate
|
317
|
+
loop_count: Current loop number
|
318
|
+
has_tool_usage: Whether tools were used in this response
|
319
|
+
|
320
|
+
Returns:
|
321
|
+
Boolean indicating whether to save the response
|
322
|
+
"""
|
323
|
+
if (
|
324
|
+
not self.is_enabled()
|
325
|
+
or not self.config.auto_save_to_memory
|
326
|
+
):
|
327
|
+
return False
|
328
|
+
|
329
|
+
# Content length check
|
330
|
+
if len(response.strip()) < self.config.min_content_length:
|
331
|
+
return False
|
332
|
+
|
333
|
+
save_conditions = [
|
334
|
+
# Substantial content
|
335
|
+
len(response) > 200,
|
336
|
+
# Contains important keywords
|
337
|
+
any(
|
338
|
+
keyword in response.lower()
|
339
|
+
for keyword in self.config.relevance_keywords
|
340
|
+
),
|
341
|
+
# Periodic saves
|
342
|
+
loop_count % self.config.save_every_n_loops == 0,
|
343
|
+
# Tool usage indicates potentially important information
|
344
|
+
has_tool_usage,
|
345
|
+
# Complex responses (multiple sentences)
|
346
|
+
response.count(".") >= 3,
|
347
|
+
# Contains structured data or lists
|
348
|
+
any(
|
349
|
+
marker in response
|
350
|
+
for marker in ["- ", "1. ", "2. ", "* ", "```"]
|
351
|
+
),
|
352
|
+
]
|
353
|
+
|
354
|
+
return any(save_conditions)
|
355
|
+
|
356
|
+
def save_to_memory(
|
357
|
+
self,
|
358
|
+
content: str,
|
359
|
+
metadata: Optional[Dict] = None,
|
360
|
+
content_type: str = "response",
|
361
|
+
) -> bool:
|
362
|
+
"""
|
363
|
+
Save content to long-term memory with metadata.
|
364
|
+
|
365
|
+
Args:
|
366
|
+
content: The content to save
|
367
|
+
metadata: Additional metadata to store
|
368
|
+
content_type: Type of content being saved
|
369
|
+
|
370
|
+
Returns:
|
371
|
+
Boolean indicating success
|
372
|
+
"""
|
373
|
+
if not self.is_enabled():
|
374
|
+
return False
|
375
|
+
|
376
|
+
if (
|
377
|
+
not content
|
378
|
+
or len(content.strip()) < self.config.min_content_length
|
379
|
+
):
|
380
|
+
return False
|
381
|
+
|
382
|
+
try:
|
383
|
+
# Create default metadata
|
384
|
+
default_metadata = {
|
385
|
+
"timestamp": time.time(),
|
386
|
+
"agent_name": self.agent_name,
|
387
|
+
"content_type": content_type,
|
388
|
+
"loop_count": self._loop_counter,
|
389
|
+
"saved_at": time.strftime("%Y-%m-%d %H:%M:%S"),
|
390
|
+
}
|
391
|
+
|
392
|
+
# Merge with provided metadata
|
393
|
+
if metadata:
|
394
|
+
default_metadata.update(metadata)
|
395
|
+
|
396
|
+
if self.verbose:
|
397
|
+
logger.info(
|
398
|
+
f"💾 [{self.agent_name}] Saving to long-term memory: {content[:100]}..."
|
399
|
+
)
|
400
|
+
|
401
|
+
success = self.long_term_memory.add(
|
402
|
+
content, metadata=default_metadata
|
403
|
+
)
|
404
|
+
|
405
|
+
if success and self.verbose:
|
406
|
+
logger.info(
|
407
|
+
f"✅ Successfully saved {content_type} to long-term memory"
|
408
|
+
)
|
409
|
+
|
410
|
+
# Track important memories
|
411
|
+
if content_type in [
|
412
|
+
"final_response",
|
413
|
+
"conversation_summary",
|
414
|
+
]:
|
415
|
+
self._important_memories.append(
|
416
|
+
{
|
417
|
+
"content": content[:200],
|
418
|
+
"timestamp": time.time(),
|
419
|
+
"type": content_type,
|
420
|
+
}
|
421
|
+
)
|
422
|
+
|
423
|
+
return success
|
424
|
+
|
425
|
+
except Exception as e:
|
426
|
+
logger.error(f"Error saving to long-term memory: {e}")
|
427
|
+
return False
|
428
|
+
|
429
|
+
def create_conversation_summary(
|
430
|
+
self,
|
431
|
+
task: str,
|
432
|
+
final_response: str,
|
433
|
+
total_loops: int,
|
434
|
+
tools_used: List[str] = None,
|
435
|
+
) -> str:
|
436
|
+
"""Create a comprehensive summary of the conversation"""
|
437
|
+
tools_info = (
|
438
|
+
f"Tools Used: {', '.join(tools_used)}"
|
439
|
+
if tools_used
|
440
|
+
else "Tools Used: None"
|
441
|
+
)
|
442
|
+
|
443
|
+
summary = f"""
|
444
|
+
CONVERSATION SUMMARY
|
445
|
+
====================
|
446
|
+
Agent: {self.agent_name}
|
447
|
+
Timestamp: {time.strftime('%Y-%m-%d %H:%M:%S')}
|
448
|
+
|
449
|
+
ORIGINAL TASK:
|
450
|
+
{task}
|
451
|
+
|
452
|
+
FINAL RESPONSE:
|
453
|
+
{final_response}
|
454
|
+
|
455
|
+
EXECUTION DETAILS:
|
456
|
+
- Total Reasoning Loops: {total_loops}
|
457
|
+
- {tools_info}
|
458
|
+
- Memory Queries Made: {len(self._conversation_history)}
|
459
|
+
|
460
|
+
KEY INSIGHTS:
|
461
|
+
{self._extract_key_insights(final_response)}
|
462
|
+
====================
|
463
|
+
"""
|
464
|
+
return summary
|
465
|
+
|
466
|
+
def _extract_key_insights(self, response: str) -> str:
|
467
|
+
"""Extract key insights from the response for summary"""
|
468
|
+
# Simple keyword-based extraction
|
469
|
+
insights = []
|
470
|
+
sentences = response.split(".")
|
471
|
+
|
472
|
+
for sentence in sentences:
|
473
|
+
if any(
|
474
|
+
keyword in sentence.lower()
|
475
|
+
for keyword in self.config.relevance_keywords[:5]
|
476
|
+
):
|
477
|
+
insights.append(sentence.strip())
|
478
|
+
|
479
|
+
if insights:
|
480
|
+
return "\n- " + "\n- ".join(
|
481
|
+
insights[:3]
|
482
|
+
) # Top 3 insights
|
483
|
+
return "No specific insights extracted"
|
484
|
+
|
485
|
+
def handle_loop_memory_operations(
|
486
|
+
self,
|
487
|
+
task: str,
|
488
|
+
response: str,
|
489
|
+
loop_count: int,
|
490
|
+
conversation_context: str = "",
|
491
|
+
has_tool_usage: bool = False,
|
492
|
+
) -> str:
|
493
|
+
"""
|
494
|
+
Handle all memory operations for a single loop iteration.
|
495
|
+
|
496
|
+
Args:
|
497
|
+
task: Original task
|
498
|
+
response: Current response
|
499
|
+
loop_count: Current loop number
|
500
|
+
conversation_context: Current conversation context
|
501
|
+
has_tool_usage: Whether tools were used
|
502
|
+
|
503
|
+
Returns:
|
504
|
+
Retrieved context string (empty if no relevant memories)
|
505
|
+
"""
|
506
|
+
self._loop_counter = loop_count
|
507
|
+
retrieved_context = ""
|
508
|
+
|
509
|
+
# 1. Query memory if enabled for this loop
|
510
|
+
if self.config.query_every_loop and loop_count > 1:
|
511
|
+
query_context = f"Task: {task}\nCurrent Context: {conversation_context[-500:]}"
|
512
|
+
retrieved_context = self.query_memory(
|
513
|
+
query_context,
|
514
|
+
context_type=f"loop_{loop_count}",
|
515
|
+
loop_count=loop_count,
|
516
|
+
)
|
517
|
+
|
518
|
+
# 2. Save response if criteria met
|
519
|
+
if self.should_save_response(
|
520
|
+
response, loop_count, has_tool_usage
|
521
|
+
):
|
522
|
+
self.save_to_memory(
|
523
|
+
content=response,
|
524
|
+
metadata={
|
525
|
+
"task_preview": task[:200],
|
526
|
+
"loop_count": loop_count,
|
527
|
+
"has_tool_usage": has_tool_usage,
|
528
|
+
},
|
529
|
+
content_type="loop_response",
|
530
|
+
)
|
531
|
+
|
532
|
+
return retrieved_context
|
533
|
+
|
534
|
+
def handle_initial_memory_query(self, task: str) -> str:
|
535
|
+
"""Handle the initial memory query before reasoning loops begin"""
|
536
|
+
if not self.is_enabled():
|
537
|
+
return ""
|
538
|
+
|
539
|
+
return self.query_memory(task, context_type="initial_task")
|
540
|
+
|
541
|
+
def handle_final_memory_consolidation(
|
542
|
+
self,
|
543
|
+
task: str,
|
544
|
+
final_response: str,
|
545
|
+
total_loops: int,
|
546
|
+
tools_used: List[str] = None,
|
547
|
+
) -> bool:
|
548
|
+
"""Handle final memory consolidation after all loops complete"""
|
549
|
+
if (
|
550
|
+
not self.is_enabled()
|
551
|
+
or not self.config.enable_conversation_summaries
|
552
|
+
):
|
553
|
+
return False
|
554
|
+
|
555
|
+
# Create and save conversation summary
|
556
|
+
summary = self.create_conversation_summary(
|
557
|
+
task, final_response, total_loops, tools_used
|
558
|
+
)
|
559
|
+
|
560
|
+
return self.save_to_memory(
|
561
|
+
content=summary,
|
562
|
+
metadata={
|
563
|
+
"task": task[:200],
|
564
|
+
"total_loops": total_loops,
|
565
|
+
"tools_used": tools_used or [],
|
566
|
+
},
|
567
|
+
content_type="conversation_summary",
|
568
|
+
)
|
569
|
+
|
570
|
+
def search_memories(
|
571
|
+
self,
|
572
|
+
query: str,
|
573
|
+
top_k: int = None,
|
574
|
+
similarity_threshold: float = None,
|
575
|
+
) -> List[Dict]:
|
576
|
+
"""
|
577
|
+
Search long-term memory and return raw results.
|
578
|
+
|
579
|
+
Args:
|
580
|
+
query: Search query
|
581
|
+
top_k: Number of results to return (uses config default if None)
|
582
|
+
similarity_threshold: Similarity threshold (uses config default if None)
|
583
|
+
|
584
|
+
Returns:
|
585
|
+
List of memory results
|
586
|
+
"""
|
587
|
+
if not self.is_enabled():
|
588
|
+
return []
|
589
|
+
|
590
|
+
try:
|
591
|
+
results = self.long_term_memory.query(
|
592
|
+
query=query,
|
593
|
+
top_k=top_k or self.config.max_results,
|
594
|
+
similarity_threshold=similarity_threshold
|
595
|
+
or self.config.similarity_threshold,
|
596
|
+
)
|
597
|
+
return results if results else []
|
598
|
+
except Exception as e:
|
599
|
+
logger.error(f"Error searching memories: {e}")
|
600
|
+
return []
|
601
|
+
|
602
|
+
def get_memory_stats(self) -> Dict[str, Any]:
|
603
|
+
"""Get statistics about memory usage and operations"""
|
604
|
+
return {
|
605
|
+
"is_enabled": self.is_enabled(),
|
606
|
+
"config": self.config.__dict__,
|
607
|
+
"loops_processed": self._loop_counter,
|
608
|
+
"important_memories_count": len(self._important_memories),
|
609
|
+
"last_important_memories": (
|
610
|
+
self._important_memories[-3:]
|
611
|
+
if self._important_memories
|
612
|
+
else []
|
613
|
+
),
|
614
|
+
"memory_store_type": (
|
615
|
+
type(self.long_term_memory).__name__
|
616
|
+
if self.long_term_memory
|
617
|
+
else None
|
618
|
+
),
|
619
|
+
}
|
620
|
+
|
621
|
+
def clear_session_data(self):
|
622
|
+
"""Clear session-specific data (not the long-term memory store)"""
|
623
|
+
self._loop_counter = 0
|
624
|
+
self._conversation_history.clear()
|
625
|
+
self._important_memories.clear()
|
626
|
+
|
627
|
+
if self.verbose:
|
628
|
+
logger.info(f"[{self.agent_name}] Session data cleared")
|
629
|
+
|
630
|
+
def update_config(self, **kwargs):
|
631
|
+
"""Update RAG configuration parameters"""
|
632
|
+
for key, value in kwargs.items():
|
633
|
+
if hasattr(self.config, key):
|
634
|
+
setattr(self.config, key, value)
|
635
|
+
if self.verbose:
|
636
|
+
logger.info(
|
637
|
+
f"Updated RAG config: {key} = {value}"
|
638
|
+
)
|
639
|
+
else:
|
640
|
+
logger.warning(f"Unknown config parameter: {key}")
|
641
|
+
|
642
|
+
|
643
|
+
# # Example memory interface that your RAG implementation should follow
|
644
|
+
# class ExampleMemoryInterface:
|
645
|
+
# """Example interface for long-term memory implementations"""
|
646
|
+
|
647
|
+
# def add(self, content: str, metadata: Dict = None) -> bool:
|
648
|
+
# """
|
649
|
+
# Add content to the memory store.
|
650
|
+
|
651
|
+
# Args:
|
652
|
+
# content: Text content to store
|
653
|
+
# metadata: Additional metadata dictionary
|
654
|
+
|
655
|
+
# Returns:
|
656
|
+
# Boolean indicating success
|
657
|
+
# """
|
658
|
+
# # Your vector database implementation here
|
659
|
+
# return True
|
660
|
+
|
661
|
+
# def query(
|
662
|
+
# self,
|
663
|
+
# query: str,
|
664
|
+
# top_k: int = 5,
|
665
|
+
# similarity_threshold: float = 0.7
|
666
|
+
# ) -> List[Dict]:
|
667
|
+
# """
|
668
|
+
# Query the memory store for relevant content.
|
669
|
+
|
670
|
+
# Args:
|
671
|
+
# query: Search query string
|
672
|
+
# top_k: Maximum number of results to return
|
673
|
+
# similarity_threshold: Minimum similarity score
|
674
|
+
|
675
|
+
# Returns:
|
676
|
+
# List of dictionaries with keys: 'content', 'score', 'metadata'
|
677
|
+
# """
|
678
|
+
# # Your vector database query implementation here
|
679
|
+
# return [
|
680
|
+
# {
|
681
|
+
# 'content': 'Example memory content',
|
682
|
+
# 'score': 0.85,
|
683
|
+
# 'metadata': {'source': 'example', 'timestamp': time.time()}
|
684
|
+
# }
|
685
|
+
# ]
|
swarms/structs/base_swarm.py
CHANGED
@@ -12,6 +12,7 @@ from typing import (
|
|
12
12
|
List,
|
13
13
|
Optional,
|
14
14
|
Sequence,
|
15
|
+
Union,
|
15
16
|
)
|
16
17
|
|
17
18
|
import yaml
|
@@ -76,7 +77,7 @@ class BaseSwarm(ABC):
|
|
76
77
|
self,
|
77
78
|
name: Optional[str] = None,
|
78
79
|
description: Optional[str] = None,
|
79
|
-
agents: Optional[List[Agent]] = None,
|
80
|
+
agents: Optional[List[Union[Agent, Callable]]] = None,
|
80
81
|
models: Optional[List[Any]] = None,
|
81
82
|
max_loops: Optional[int] = 200,
|
82
83
|
callbacks: Optional[Sequence[callable]] = None,
|