swarms 7.9.9__py3-none-any.whl → 8.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,675 @@
1
+ import json
2
+ import pickle
3
+ import hashlib
4
+ import threading
5
+ import time
6
+ from functools import lru_cache, wraps
7
+ from typing import List, Dict, Any, Optional, Callable
8
+ from pathlib import Path
9
+ import weakref
10
+ from concurrent.futures import ThreadPoolExecutor
11
+ import os
12
+
13
+ from loguru import logger
14
+
15
+ # Import the Agent class - adjust path as needed
16
+ try:
17
+ from swarms.structs.agent import Agent
18
+ except ImportError:
19
+ # Fallback for development/testing
20
+ Agent = Any
21
+
22
+
23
+ class AgentCache:
24
+ """
25
+ A comprehensive caching system for Agent objects with multiple strategies:
26
+ - Memory-based LRU cache
27
+ - Weak reference cache to prevent memory leaks
28
+ - Persistent disk cache for agent configurations
29
+ - Lazy loading with background preloading
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ max_memory_cache_size: int = 128,
35
+ cache_dir: Optional[str] = None,
36
+ enable_persistent_cache: bool = True,
37
+ auto_save_interval: int = 300, # 5 minutes
38
+ enable_weak_refs: bool = True,
39
+ ):
40
+ """
41
+ Initialize the AgentCache.
42
+
43
+ Args:
44
+ max_memory_cache_size: Maximum number of agents to keep in memory cache
45
+ cache_dir: Directory for persistent cache storage
46
+ enable_persistent_cache: Whether to enable disk-based caching
47
+ auto_save_interval: Interval in seconds for auto-saving cache
48
+ enable_weak_refs: Whether to use weak references to prevent memory leaks
49
+ """
50
+ self.max_memory_cache_size = max_memory_cache_size
51
+ self.cache_dir = Path(cache_dir or "agent_cache")
52
+ self.enable_persistent_cache = enable_persistent_cache
53
+ self.auto_save_interval = auto_save_interval
54
+ self.enable_weak_refs = enable_weak_refs
55
+
56
+ # Memory caches
57
+ self._memory_cache: Dict[str, Agent] = {}
58
+ self._weak_cache: weakref.WeakValueDictionary = (
59
+ weakref.WeakValueDictionary()
60
+ )
61
+ self._access_times: Dict[str, float] = {}
62
+ self._lock = threading.RLock()
63
+
64
+ # Cache statistics
65
+ self._hits = 0
66
+ self._misses = 0
67
+ self._load_times: Dict[str, float] = {}
68
+
69
+ # Background tasks
70
+ self._auto_save_thread: Optional[threading.Thread] = None
71
+ self._shutdown_event = threading.Event()
72
+
73
+ # Initialize cache directory
74
+ if self.enable_persistent_cache:
75
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
76
+
77
+ # Start auto-save thread
78
+ self._start_auto_save_thread()
79
+
80
+ def _start_auto_save_thread(self):
81
+ """Start the auto-save background thread."""
82
+ if (
83
+ self.enable_persistent_cache
84
+ and self.auto_save_interval > 0
85
+ ):
86
+ self._auto_save_thread = threading.Thread(
87
+ target=self._auto_save_loop,
88
+ daemon=True,
89
+ name="AgentCache-AutoSave",
90
+ )
91
+ self._auto_save_thread.start()
92
+
93
+ def _auto_save_loop(self):
94
+ """Background loop for auto-saving cache."""
95
+ while not self._shutdown_event.wait(self.auto_save_interval):
96
+ try:
97
+ self.save_cache_to_disk()
98
+ except Exception as e:
99
+ logger.error(f"Error in auto-save: {e}")
100
+
101
+ def _generate_cache_key(
102
+ self, agent_config: Dict[str, Any]
103
+ ) -> str:
104
+ """Generate a unique cache key from agent configuration."""
105
+ # Create a stable hash from the configuration
106
+ config_str = json.dumps(
107
+ agent_config, sort_keys=True, default=str
108
+ )
109
+ return hashlib.md5(config_str.encode()).hexdigest()
110
+
111
+ def _evict_lru(self):
112
+ """Evict least recently used items from memory cache."""
113
+ if len(self._memory_cache) >= self.max_memory_cache_size:
114
+ # Find the least recently used item
115
+ lru_key = min(
116
+ self._access_times.items(), key=lambda x: x[1]
117
+ )[0]
118
+
119
+ # Save to persistent cache before evicting
120
+ if self.enable_persistent_cache:
121
+ self._save_agent_to_disk(
122
+ lru_key, self._memory_cache[lru_key]
123
+ )
124
+
125
+ # Remove from memory
126
+ del self._memory_cache[lru_key]
127
+ del self._access_times[lru_key]
128
+
129
+ logger.debug(f"Evicted agent {lru_key} from memory cache")
130
+
131
+ def _save_agent_to_disk(self, cache_key: str, agent: Agent):
132
+ """Save agent to persistent cache."""
133
+ try:
134
+ cache_file = self.cache_dir / f"{cache_key}.pkl"
135
+ with open(cache_file, "wb") as f:
136
+ pickle.dump(agent.to_dict(), f)
137
+ logger.debug(f"Saved agent {cache_key} to disk cache")
138
+ except Exception as e:
139
+ logger.error(f"Error saving agent to disk: {e}")
140
+
141
+ def _load_agent_from_disk(
142
+ self, cache_key: str
143
+ ) -> Optional[Agent]:
144
+ """Load agent from persistent cache."""
145
+ try:
146
+ cache_file = self.cache_dir / f"{cache_key}.pkl"
147
+ if cache_file.exists():
148
+ with open(cache_file, "rb") as f:
149
+ agent_dict = pickle.load(f)
150
+
151
+ # Reconstruct agent from dictionary
152
+ agent = Agent(**agent_dict)
153
+ logger.debug(
154
+ f"Loaded agent {cache_key} from disk cache"
155
+ )
156
+ return agent
157
+ except Exception as e:
158
+ logger.error(f"Error loading agent from disk: {e}")
159
+ return None
160
+
161
+ def get_agent(
162
+ self, agent_config: Dict[str, Any]
163
+ ) -> Optional[Agent]:
164
+ """
165
+ Get an agent from cache, loading if necessary.
166
+
167
+ Args:
168
+ agent_config: Configuration dictionary for the agent
169
+
170
+ Returns:
171
+ Cached or newly loaded Agent instance
172
+ """
173
+ cache_key = self._generate_cache_key(agent_config)
174
+
175
+ with self._lock:
176
+ # Check memory cache first
177
+ if cache_key in self._memory_cache:
178
+ self._access_times[cache_key] = time.time()
179
+ self._hits += 1
180
+ logger.debug(
181
+ f"Cache hit (memory) for agent {cache_key}"
182
+ )
183
+ return self._memory_cache[cache_key]
184
+
185
+ # Check weak reference cache
186
+ if (
187
+ self.enable_weak_refs
188
+ and cache_key in self._weak_cache
189
+ ):
190
+ agent = self._weak_cache[cache_key]
191
+ if agent is not None:
192
+ # Move back to memory cache
193
+ self._memory_cache[cache_key] = agent
194
+ self._access_times[cache_key] = time.time()
195
+ self._hits += 1
196
+ logger.debug(
197
+ f"Cache hit (weak ref) for agent {cache_key}"
198
+ )
199
+ return agent
200
+
201
+ # Check persistent cache
202
+ if self.enable_persistent_cache:
203
+ agent = self._load_agent_from_disk(cache_key)
204
+ if agent is not None:
205
+ self._evict_lru()
206
+ self._memory_cache[cache_key] = agent
207
+ self._access_times[cache_key] = time.time()
208
+ if self.enable_weak_refs:
209
+ self._weak_cache[cache_key] = agent
210
+ self._hits += 1
211
+ logger.debug(
212
+ f"Cache hit (disk) for agent {cache_key}"
213
+ )
214
+ return agent
215
+
216
+ # Cache miss - need to create new agent
217
+ self._misses += 1
218
+ logger.debug(f"Cache miss for agent {cache_key}")
219
+ return None
220
+
221
+ def put_agent(self, agent_config: Dict[str, Any], agent: Agent):
222
+ """
223
+ Put an agent into the cache.
224
+
225
+ Args:
226
+ agent_config: Configuration dictionary for the agent
227
+ agent: The Agent instance to cache
228
+ """
229
+ cache_key = self._generate_cache_key(agent_config)
230
+
231
+ with self._lock:
232
+ self._evict_lru()
233
+ self._memory_cache[cache_key] = agent
234
+ self._access_times[cache_key] = time.time()
235
+
236
+ if self.enable_weak_refs:
237
+ self._weak_cache[cache_key] = agent
238
+
239
+ logger.debug(f"Added agent {cache_key} to cache")
240
+
241
+ def preload_agents(self, agent_configs: List[Dict[str, Any]]):
242
+ """
243
+ Preload agents in the background for faster access.
244
+
245
+ Args:
246
+ agent_configs: List of agent configurations to preload
247
+ """
248
+
249
+ def _preload_worker(config):
250
+ try:
251
+ cache_key = self._generate_cache_key(config)
252
+ if cache_key not in self._memory_cache:
253
+ start_time = time.time()
254
+ agent = Agent(**config)
255
+ load_time = time.time() - start_time
256
+
257
+ self.put_agent(config, agent)
258
+ self._load_times[cache_key] = load_time
259
+ logger.debug(
260
+ f"Preloaded agent {cache_key} in {load_time:.3f}s"
261
+ )
262
+ except Exception as e:
263
+ logger.error(f"Error preloading agent: {e}")
264
+
265
+ # Use thread pool for concurrent preloading
266
+ max_workers = min(len(agent_configs), os.cpu_count())
267
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
268
+ executor.map(_preload_worker, agent_configs)
269
+
270
+ def get_cache_stats(self) -> Dict[str, Any]:
271
+ """Get cache performance statistics."""
272
+ total_requests = self._hits + self._misses
273
+ hit_rate = (
274
+ (self._hits / total_requests * 100)
275
+ if total_requests > 0
276
+ else 0
277
+ )
278
+
279
+ return {
280
+ "hits": self._hits,
281
+ "misses": self._misses,
282
+ "hit_rate_percent": round(hit_rate, 2),
283
+ "memory_cache_size": len(self._memory_cache),
284
+ "weak_cache_size": len(self._weak_cache),
285
+ "average_load_time": (
286
+ sum(self._load_times.values()) / len(self._load_times)
287
+ if self._load_times
288
+ else 0
289
+ ),
290
+ "total_agents_loaded": len(self._load_times),
291
+ }
292
+
293
+ def clear_cache(self):
294
+ """Clear all caches."""
295
+ with self._lock:
296
+ self._memory_cache.clear()
297
+ self._weak_cache.clear()
298
+ self._access_times.clear()
299
+ logger.info("Cleared all caches")
300
+
301
+ def save_cache_to_disk(self):
302
+ """Save current memory cache to disk."""
303
+ if not self.enable_persistent_cache:
304
+ return
305
+
306
+ with self._lock:
307
+ saved_count = 0
308
+ for cache_key, agent in self._memory_cache.items():
309
+ try:
310
+ self._save_agent_to_disk(cache_key, agent)
311
+ saved_count += 1
312
+ except Exception as e:
313
+ logger.error(
314
+ f"Error saving agent {cache_key}: {e}"
315
+ )
316
+
317
+ logger.info(f"Saved {saved_count} agents to disk cache")
318
+
319
+ def shutdown(self):
320
+ """Shutdown the cache system gracefully."""
321
+ self._shutdown_event.set()
322
+ if self._auto_save_thread:
323
+ self._auto_save_thread.join(timeout=5)
324
+
325
+ # Final save
326
+ if self.enable_persistent_cache:
327
+ self.save_cache_to_disk()
328
+
329
+ logger.info("AgentCache shutdown complete")
330
+
331
+
332
+ # Global cache instance
333
+ _global_cache: Optional[AgentCache] = None
334
+
335
+
336
+ def get_global_cache() -> AgentCache:
337
+ """Get or create the global agent cache instance."""
338
+ global _global_cache
339
+ if _global_cache is None:
340
+ _global_cache = AgentCache()
341
+ return _global_cache
342
+
343
+
344
+ def cached_agent_loader(
345
+ agents: List[Agent],
346
+ cache_instance: Optional[AgentCache] = None,
347
+ preload: bool = True,
348
+ parallel_loading: bool = True,
349
+ ) -> List[Agent]:
350
+ """
351
+ Load a list of agents with caching for super fast performance.
352
+
353
+ Args:
354
+ agents: List of Agent instances to cache/load
355
+ cache_instance: Optional cache instance (uses global cache if None)
356
+ preload: Whether to preload agents in background
357
+ parallel_loading: Whether to load agents in parallel
358
+
359
+ Returns:
360
+ List of Agent instances (cached versions if available)
361
+
362
+ Examples:
363
+ # Basic usage
364
+ agents = [Agent(agent_name="Agent1", model_name="gpt-4"), ...]
365
+ cached_agents = cached_agent_loader(agents)
366
+
367
+ # With custom cache
368
+ cache = AgentCache(max_memory_cache_size=256)
369
+ cached_agents = cached_agent_loader(agents, cache_instance=cache)
370
+
371
+ # Preload for even faster subsequent access
372
+ cached_agent_loader(agents, preload=True)
373
+ cached_agents = cached_agent_loader(agents) # Super fast!
374
+ """
375
+ cache = cache_instance or get_global_cache()
376
+
377
+ start_time = time.time()
378
+
379
+ # Extract configurations from agents for caching
380
+ agent_configs = []
381
+ for agent in agents:
382
+ config = _extract_agent_config(agent)
383
+ agent_configs.append(config)
384
+
385
+ if preload:
386
+ # Preload agents in background
387
+ cache.preload_agents(agent_configs)
388
+
389
+ def _load_single_agent(agent: Agent) -> Agent:
390
+ """Load a single agent with caching."""
391
+ config = _extract_agent_config(agent)
392
+
393
+ # Try to get from cache first
394
+ cached_agent = cache.get_agent(config)
395
+
396
+ if cached_agent is None:
397
+ # Cache miss - use the provided agent and cache it
398
+ load_start = time.time()
399
+
400
+ # Add to cache for future use
401
+ cache.put_agent(config, agent)
402
+ load_time = time.time() - load_start
403
+
404
+ logger.debug(
405
+ f"Cached new agent {agent.agent_name} in {load_time:.3f}s"
406
+ )
407
+ return agent
408
+ else:
409
+ logger.debug(
410
+ f"Retrieved cached agent {cached_agent.agent_name}"
411
+ )
412
+ return cached_agent
413
+
414
+ # Load agents (parallel or sequential)
415
+ if parallel_loading and len(agents) > 1:
416
+ max_workers = min(len(agents), os.cpu_count())
417
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
418
+ cached_agents = list(
419
+ executor.map(_load_single_agent, agents)
420
+ )
421
+ else:
422
+ cached_agents = [
423
+ _load_single_agent(agent) for agent in agents
424
+ ]
425
+
426
+ total_time = time.time() - start_time
427
+
428
+ # Log performance stats
429
+ stats = cache.get_cache_stats()
430
+ logger.info(
431
+ f"Processed {len(cached_agents)} agents in {total_time:.3f}s "
432
+ f"(Hit rate: {stats['hit_rate_percent']}%)"
433
+ )
434
+
435
+ return cached_agents
436
+
437
+
438
+ def _extract_agent_config(agent: Agent) -> Dict[str, Any]:
439
+ """
440
+ Extract a configuration dictionary from an Agent instance for caching.
441
+
442
+ Args:
443
+ agent: Agent instance to extract config from
444
+
445
+ Returns:
446
+ Configuration dictionary suitable for cache key generation
447
+ """
448
+ # Extract key attributes that define an agent's identity
449
+ config = {
450
+ "agent_name": getattr(agent, "agent_name", None),
451
+ "model_name": getattr(agent, "model_name", None),
452
+ "system_prompt": getattr(agent, "system_prompt", None),
453
+ "max_loops": getattr(agent, "max_loops", None),
454
+ "temperature": getattr(agent, "temperature", None),
455
+ "max_tokens": getattr(agent, "max_tokens", None),
456
+ "agent_description": getattr(
457
+ agent, "agent_description", None
458
+ ),
459
+ # Add other key identifying attributes
460
+ "tools": str(
461
+ getattr(agent, "tools", [])
462
+ ), # Convert to string for hashing, default to empty list
463
+ "context_length": getattr(agent, "context_length", None),
464
+ }
465
+
466
+ # Remove None values to create a clean config
467
+ config = {k: v for k, v in config.items() if v is not None}
468
+
469
+ return config
470
+
471
+
472
+ def cached_agent_loader_from_configs(
473
+ agent_configs: List[Dict[str, Any]],
474
+ cache_instance: Optional[AgentCache] = None,
475
+ preload: bool = True,
476
+ parallel_loading: bool = True,
477
+ ) -> List[Agent]:
478
+ """
479
+ Load a list of agents from configuration dictionaries with caching.
480
+
481
+ Args:
482
+ agent_configs: List of agent configuration dictionaries
483
+ cache_instance: Optional cache instance (uses global cache if None)
484
+ preload: Whether to preload agents in background
485
+ parallel_loading: Whether to load agents in parallel
486
+
487
+ Returns:
488
+ List of Agent instances
489
+
490
+ Examples:
491
+ # Basic usage
492
+ configs = [{"agent_name": "Agent1", "model_name": "gpt-4"}, ...]
493
+ agents = cached_agent_loader_from_configs(configs)
494
+
495
+ # With custom cache
496
+ cache = AgentCache(max_memory_cache_size=256)
497
+ agents = cached_agent_loader_from_configs(configs, cache_instance=cache)
498
+ """
499
+ cache = cache_instance or get_global_cache()
500
+
501
+ start_time = time.time()
502
+
503
+ if preload:
504
+ # Preload agents in background
505
+ cache.preload_agents(agent_configs)
506
+
507
+ def _load_single_agent(config: Dict[str, Any]) -> Agent:
508
+ """Load a single agent with caching."""
509
+ # Try to get from cache first
510
+ agent = cache.get_agent(config)
511
+
512
+ if agent is None:
513
+ # Cache miss - create new agent
514
+ load_start = time.time()
515
+ agent = Agent(**config)
516
+ load_time = time.time() - load_start
517
+
518
+ # Add to cache for future use
519
+ cache.put_agent(config, agent)
520
+
521
+ logger.debug(
522
+ f"Created new agent {agent.agent_name} in {load_time:.3f}s"
523
+ )
524
+
525
+ return agent
526
+
527
+ # Load agents (parallel or sequential)
528
+ if parallel_loading and len(agent_configs) > 1:
529
+ max_workers = min(len(agent_configs), os.cpu_count())
530
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
531
+ agents = list(
532
+ executor.map(_load_single_agent, agent_configs)
533
+ )
534
+ else:
535
+ agents = [
536
+ _load_single_agent(config) for config in agent_configs
537
+ ]
538
+
539
+ total_time = time.time() - start_time
540
+
541
+ # Log performance stats
542
+ stats = cache.get_cache_stats()
543
+ logger.info(
544
+ f"Loaded {len(agents)} agents in {total_time:.3f}s "
545
+ f"(Hit rate: {stats['hit_rate_percent']}%)"
546
+ )
547
+
548
+ return agents
549
+
550
+
551
+ # Decorator for caching individual agent creation
552
+ def cache_agent_creation(cache_instance: Optional[AgentCache] = None):
553
+ """
554
+ Decorator to cache agent creation based on initialization parameters.
555
+
556
+ Args:
557
+ cache_instance: Optional cache instance (uses global cache if None)
558
+
559
+ Returns:
560
+ Decorator function
561
+
562
+ Example:
563
+ @cache_agent_creation()
564
+ def create_trading_agent(symbol: str, model: str):
565
+ return Agent(
566
+ agent_name=f"Trading-{symbol}",
567
+ model_name=model,
568
+ system_prompt=f"You are a trading agent for {symbol}"
569
+ )
570
+
571
+ agent1 = create_trading_agent("AAPL", "gpt-4") # Creates new agent
572
+ agent2 = create_trading_agent("AAPL", "gpt-4") # Returns cached agent
573
+ """
574
+
575
+ def decorator(func: Callable[..., Agent]) -> Callable[..., Agent]:
576
+ cache = cache_instance or get_global_cache()
577
+
578
+ @wraps(func)
579
+ def wrapper(*args, **kwargs) -> Agent:
580
+ # Create a config dict from function arguments
581
+ import inspect
582
+
583
+ sig = inspect.signature(func)
584
+ bound_args = sig.bind(*args, **kwargs)
585
+ bound_args.apply_defaults()
586
+
587
+ config = dict(bound_args.arguments)
588
+
589
+ # Try to get from cache
590
+ agent = cache.get_agent(config)
591
+
592
+ if agent is None:
593
+ # Cache miss - call original function
594
+ agent = func(*args, **kwargs)
595
+ cache.put_agent(config, agent)
596
+
597
+ return agent
598
+
599
+ return wrapper
600
+
601
+ return decorator
602
+
603
+
604
+ # LRU Cache-based simple approach
605
+ @lru_cache(maxsize=128)
606
+ def _cached_agent_by_hash(
607
+ config_hash: str, config_json: str
608
+ ) -> Agent:
609
+ """Internal LRU cached agent creation by config hash."""
610
+ config = json.loads(config_json)
611
+ return Agent(**config)
612
+
613
+
614
+ def simple_lru_agent_loader(
615
+ agents: List[Agent],
616
+ ) -> List[Agent]:
617
+ """
618
+ Simple LRU cache-based agent loader using functools.lru_cache.
619
+
620
+ Args:
621
+ agents: List of Agent instances
622
+
623
+ Returns:
624
+ List of Agent instances (cached versions if available)
625
+
626
+ Note:
627
+ This is a simpler approach but less flexible than the full AgentCache.
628
+ """
629
+ cached_agents = []
630
+
631
+ for agent in agents:
632
+ # Extract config from agent
633
+ config = _extract_agent_config(agent)
634
+
635
+ # Create stable hash and JSON string
636
+ config_json = json.dumps(config, sort_keys=True, default=str)
637
+ config_hash = hashlib.md5(config_json.encode()).hexdigest()
638
+
639
+ # Use LRU cached function
640
+ cached_agent = _cached_agent_by_hash_from_agent(
641
+ config_hash, agent
642
+ )
643
+ cached_agents.append(cached_agent)
644
+
645
+ return cached_agents
646
+
647
+
648
+ @lru_cache(maxsize=128)
649
+ def _cached_agent_by_hash_from_agent(
650
+ config_hash: str, agent: Agent
651
+ ) -> Agent:
652
+ """Internal LRU cached agent storage by config hash."""
653
+ # Return the same agent instance (this creates the caching effect)
654
+ return agent
655
+
656
+
657
+ # Utility functions for cache management
658
+ def clear_agent_cache():
659
+ """Clear the global agent cache."""
660
+ cache = get_global_cache()
661
+ cache.clear_cache()
662
+
663
+
664
+ def get_agent_cache_stats() -> Dict[str, Any]:
665
+ """Get statistics from the global agent cache."""
666
+ cache = get_global_cache()
667
+ return cache.get_cache_stats()
668
+
669
+
670
+ def shutdown_agent_cache():
671
+ """Shutdown the global agent cache gracefully."""
672
+ global _global_cache
673
+ if _global_cache:
674
+ _global_cache.shutdown()
675
+ _global_cache = None