kailash 0.1.4__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/access_control.py +740 -0
  3. kailash/api/__main__.py +6 -0
  4. kailash/api/auth.py +668 -0
  5. kailash/api/custom_nodes.py +285 -0
  6. kailash/api/custom_nodes_secure.py +377 -0
  7. kailash/api/database.py +620 -0
  8. kailash/api/studio.py +915 -0
  9. kailash/api/studio_secure.py +893 -0
  10. kailash/mcp/__init__.py +53 -0
  11. kailash/mcp/__main__.py +13 -0
  12. kailash/mcp/ai_registry_server.py +712 -0
  13. kailash/mcp/client.py +447 -0
  14. kailash/mcp/client_new.py +334 -0
  15. kailash/mcp/server.py +293 -0
  16. kailash/mcp/server_new.py +336 -0
  17. kailash/mcp/servers/__init__.py +12 -0
  18. kailash/mcp/servers/ai_registry.py +289 -0
  19. kailash/nodes/__init__.py +4 -2
  20. kailash/nodes/ai/__init__.py +38 -0
  21. kailash/nodes/ai/a2a.py +1790 -0
  22. kailash/nodes/ai/agents.py +116 -2
  23. kailash/nodes/ai/ai_providers.py +206 -8
  24. kailash/nodes/ai/intelligent_agent_orchestrator.py +2108 -0
  25. kailash/nodes/ai/iterative_llm_agent.py +1280 -0
  26. kailash/nodes/ai/llm_agent.py +324 -1
  27. kailash/nodes/ai/self_organizing.py +1623 -0
  28. kailash/nodes/api/http.py +106 -25
  29. kailash/nodes/api/rest.py +116 -21
  30. kailash/nodes/base.py +15 -2
  31. kailash/nodes/base_async.py +45 -0
  32. kailash/nodes/base_cycle_aware.py +374 -0
  33. kailash/nodes/base_with_acl.py +338 -0
  34. kailash/nodes/code/python.py +135 -27
  35. kailash/nodes/data/readers.py +116 -53
  36. kailash/nodes/data/writers.py +16 -6
  37. kailash/nodes/logic/__init__.py +8 -0
  38. kailash/nodes/logic/async_operations.py +48 -9
  39. kailash/nodes/logic/convergence.py +642 -0
  40. kailash/nodes/logic/loop.py +153 -0
  41. kailash/nodes/logic/operations.py +212 -27
  42. kailash/nodes/logic/workflow.py +26 -18
  43. kailash/nodes/mixins/__init__.py +11 -0
  44. kailash/nodes/mixins/mcp.py +228 -0
  45. kailash/nodes/mixins.py +387 -0
  46. kailash/nodes/transform/__init__.py +8 -1
  47. kailash/nodes/transform/processors.py +119 -4
  48. kailash/runtime/__init__.py +2 -1
  49. kailash/runtime/access_controlled.py +458 -0
  50. kailash/runtime/local.py +106 -33
  51. kailash/runtime/parallel_cyclic.py +529 -0
  52. kailash/sdk_exceptions.py +90 -5
  53. kailash/security.py +845 -0
  54. kailash/tracking/manager.py +38 -15
  55. kailash/tracking/models.py +1 -1
  56. kailash/tracking/storage/filesystem.py +30 -2
  57. kailash/utils/__init__.py +8 -0
  58. kailash/workflow/__init__.py +18 -0
  59. kailash/workflow/convergence.py +270 -0
  60. kailash/workflow/cycle_analyzer.py +768 -0
  61. kailash/workflow/cycle_builder.py +573 -0
  62. kailash/workflow/cycle_config.py +709 -0
  63. kailash/workflow/cycle_debugger.py +760 -0
  64. kailash/workflow/cycle_exceptions.py +601 -0
  65. kailash/workflow/cycle_profiler.py +671 -0
  66. kailash/workflow/cycle_state.py +338 -0
  67. kailash/workflow/cyclic_runner.py +985 -0
  68. kailash/workflow/graph.py +500 -39
  69. kailash/workflow/migration.py +768 -0
  70. kailash/workflow/safety.py +365 -0
  71. kailash/workflow/templates.py +744 -0
  72. kailash/workflow/validation.py +693 -0
  73. {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/METADATA +446 -13
  74. kailash-0.2.0.dist-info/RECORD +125 -0
  75. kailash/nodes/mcp/__init__.py +0 -11
  76. kailash/nodes/mcp/client.py +0 -554
  77. kailash/nodes/mcp/resource.py +0 -682
  78. kailash/nodes/mcp/server.py +0 -577
  79. kailash-0.1.4.dist-info/RECORD +0 -85
  80. {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/WHEEL +0 -0
  81. {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/entry_points.txt +0 -0
  82. {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/licenses/LICENSE +0 -0
  83. {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1790 @@
1
+ """Agent-to-Agent (A2A) communication nodes with shared memory pools.
2
+
3
+ This module implements multi-agent communication with selective attention mechanisms,
4
+ enabling efficient collaboration between AI agents while preventing information overload.
5
+
6
+ Design Philosophy:
7
+ The A2A system enables decentralized multi-agent collaboration through shared
8
+ memory pools and attention mechanisms. Agents can share insights, coordinate
9
+ tasks, and build collective intelligence without centralized control.
10
+ """
11
+
12
+ import json
13
+ import time
14
+ import uuid
15
+ from collections import defaultdict, deque
16
+ from datetime import datetime
17
+ from typing import Any, Dict, List, Optional, Set
18
+
19
+ from kailash.nodes.ai.llm_agent import LLMAgentNode
20
+ from kailash.nodes.base import Node, NodeParameter, register_node
21
+ from kailash.nodes.base_cycle_aware import CycleAwareNode
22
+
23
+
24
+ @register_node()
25
+ class SharedMemoryPoolNode(Node):
26
+ """
27
+ Central memory pool that multiple agents can read from and write to.
28
+
29
+ This node implements a sophisticated shared memory system with selective attention
30
+ mechanisms, enabling efficient multi-agent collaboration while preventing information
31
+ overload through intelligent filtering and segmentation.
32
+
33
+ Design Philosophy:
34
+ The SharedMemoryPoolNode acts as a cognitive workspace where agents can share
35
+ discoveries, insights, and intermediate results. It implements attention-based
36
+ filtering inspired by human selective attention, allowing agents to focus on
37
+ relevant information without being overwhelmed by the full memory pool.
38
+
39
+ Upstream Dependencies:
40
+ - A2AAgentNode: Primary writer of memories with insights and discoveries
41
+ - A2ACoordinatorNode: Writes coordination messages and task assignments
42
+ - Any custom agent nodes that need to share information
43
+
44
+ Downstream Consumers:
45
+ - A2AAgentNode: Reads relevant memories to enhance context
46
+ - A2ACoordinatorNode: Monitors agent progress through memory queries
47
+ - SolutionEvaluatorNode: Aggregates insights for evaluation
48
+ - Any analysis or visualization nodes needing shared context
49
+
50
+ Configuration:
51
+ This node is typically configured at workflow initialization and doesn't
52
+ require runtime configuration. Memory segmentation and size limits can
53
+ be adjusted through class attributes.
54
+
55
+ Implementation Details:
56
+ - Uses segmented memory pools for different types of information
57
+ - Implements tag-based indexing for fast retrieval
58
+ - Supports importance-weighted attention filtering
59
+ - Maintains agent subscription patterns for targeted delivery
60
+ - Automatically manages memory size through FIFO eviction
61
+
62
+ Error Handling:
63
+ - Returns empty results for invalid queries rather than failing
64
+ - Handles missing segments gracefully
65
+ - Validates importance scores to [0, 1] range
66
+
67
+ Side Effects:
68
+ - Maintains internal memory state across workflow execution
69
+ - Memory persists for the lifetime of the node instance
70
+ - Does not persist to disk or external storage
71
+
72
+ Examples:
73
+ >>> # Create a shared memory pool
74
+ >>> memory_pool = SharedMemoryPoolNode()
75
+ >>>
76
+ >>> # Write memory from an agent
77
+ >>> result = memory_pool.run(
78
+ ... action="write",
79
+ ... agent_id="researcher_001",
80
+ ... content="Found correlation between X and Y",
81
+ ... tags=["research", "correlation", "data"],
82
+ ... importance=0.8,
83
+ ... segment="findings"
84
+ ... )
85
+ >>> assert result["success"] == True
86
+ >>> assert result["memory_id"] is not None
87
+ >>>
88
+ >>> # Read with attention filter
89
+ >>> memories = memory_pool.run(
90
+ ... action="read",
91
+ ... agent_id="analyst_001",
92
+ ... attention_filter={
93
+ ... "tags": ["correlation"],
94
+ ... "importance_threshold": 0.7,
95
+ ... "window_size": 5
96
+ ... }
97
+ ... )
98
+ >>> assert len(memories["memories"]) > 0
99
+ >>>
100
+ >>> # Subscribe to specific segments
101
+ >>> memory_pool.run(
102
+ ... action="subscribe",
103
+ ... agent_id="monitor_001",
104
+ ... segments=["findings", "alerts"]
105
+ ... )
106
+ >>>
107
+ >>> # Semantic query across all memories
108
+ >>> results = memory_pool.run(
109
+ ... action="query",
110
+ ... query="correlation analysis",
111
+ ... top_k=3
112
+ ... )
113
+ """
114
+
115
+ def __init__(self):
116
+ super().__init__()
117
+ self.memory_segments = defaultdict(deque)
118
+ self.agent_subscriptions = defaultdict(set)
119
+ self.attention_indices = defaultdict(lambda: defaultdict(list))
120
+ self.memory_id_counter = 0
121
+ self.max_segment_size = 1000
122
+
123
+ def get_parameters(self) -> Dict[str, NodeParameter]:
124
+ return {
125
+ "action": NodeParameter(
126
+ name="action",
127
+ type=str,
128
+ required=False,
129
+ default="read",
130
+ description="Action to perform: 'write', 'read', 'subscribe', 'query'",
131
+ ),
132
+ "agent_id": NodeParameter(
133
+ name="agent_id",
134
+ type=str,
135
+ required=False,
136
+ default="system",
137
+ description="ID of the agent performing the action",
138
+ ),
139
+ "content": NodeParameter(
140
+ name="content",
141
+ type=Any,
142
+ required=False,
143
+ description="Content to write to memory (for write action)",
144
+ ),
145
+ "tags": NodeParameter(
146
+ name="tags",
147
+ type=list,
148
+ required=False,
149
+ default=[],
150
+ description="Tags to categorize the memory",
151
+ ),
152
+ "importance": NodeParameter(
153
+ name="importance",
154
+ type=float,
155
+ required=False,
156
+ default=0.5,
157
+ description="Importance score (0.0 to 1.0)",
158
+ ),
159
+ "segment": NodeParameter(
160
+ name="segment",
161
+ type=str,
162
+ required=False,
163
+ default="general",
164
+ description="Memory segment to write to",
165
+ ),
166
+ "attention_filter": NodeParameter(
167
+ name="attention_filter",
168
+ type=dict,
169
+ required=False,
170
+ default={},
171
+ description="Filter criteria for reading memories",
172
+ ),
173
+ "context": NodeParameter(
174
+ name="context",
175
+ type=dict,
176
+ required=False,
177
+ default={},
178
+ description="Additional context for the memory",
179
+ ),
180
+ "query": NodeParameter(
181
+ name="query",
182
+ type=str,
183
+ required=False,
184
+ description="Search query for semantic memory search",
185
+ ),
186
+ }
187
+
188
+ def run(self, **kwargs) -> Dict[str, Any]:
189
+ """
190
+ Execute memory pool operations.
191
+
192
+ This method routes requests to appropriate handlers based on the action
193
+ parameter, supporting write, read, subscribe, and query operations.
194
+
195
+ Args:
196
+ **kwargs: Operation parameters including:
197
+ action (str): Operation type ('write', 'read', 'subscribe', 'query')
198
+ Additional parameters specific to each action
199
+
200
+ Returns:
201
+ Dict[str, Any]: Operation results with 'success' status and action-specific data
202
+
203
+ Raises:
204
+ No exceptions raised - errors returned in response dict
205
+
206
+ Side Effects:
207
+ Modifies internal memory state for write operations
208
+ Updates subscription lists for subscribe operations
209
+ """
210
+ action = kwargs.get("action")
211
+
212
+ if action == "write":
213
+ return self._write_memory(kwargs)
214
+ elif action == "read":
215
+ return self._read_with_attention(kwargs)
216
+ elif action == "subscribe":
217
+ return self._subscribe_agent(kwargs)
218
+ elif action == "query":
219
+ return self._semantic_query(kwargs)
220
+ elif action == "metrics":
221
+ return self._get_metrics()
222
+ else:
223
+ return {"success": False, "error": f"Unknown action: {action}"}
224
+
225
+ def _write_memory(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
226
+ """Write information to shared pool with metadata."""
227
+ self.memory_id_counter += 1
228
+ memory_item = {
229
+ "id": f"mem_{self.memory_id_counter}",
230
+ "content": kwargs["content"],
231
+ "agent_id": kwargs["agent_id"],
232
+ "timestamp": time.time(),
233
+ "datetime": datetime.now().isoformat(),
234
+ "tags": kwargs.get("tags", []),
235
+ "importance": kwargs.get("importance", 0.5),
236
+ "context": kwargs.get("context", {}),
237
+ "access_count": 0,
238
+ }
239
+
240
+ # Store in appropriate segment
241
+ segment = kwargs.get("segment", "general")
242
+ self.memory_segments[segment].append(memory_item)
243
+
244
+ # Maintain segment size limit
245
+ if len(self.memory_segments[segment]) > self.max_segment_size:
246
+ self.memory_segments[segment].popleft()
247
+
248
+ # Update attention indices
249
+ self._update_attention_indices(memory_item, segment)
250
+
251
+ # Get relevant agents
252
+ relevant_agents = self._get_relevant_agents(memory_item, segment)
253
+
254
+ return {
255
+ "success": True,
256
+ "memory_id": memory_item["id"],
257
+ "segment": segment,
258
+ "notified_agents": list(relevant_agents),
259
+ "timestamp": memory_item["timestamp"],
260
+ }
261
+
262
+ def _read_with_attention(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
263
+ """Read relevant memories based on attention filter."""
264
+ agent_id = kwargs["agent_id"]
265
+ attention_filter = kwargs.get("attention_filter", {})
266
+
267
+ relevant_memories = []
268
+
269
+ # Apply attention mechanism
270
+ for segment, memories in self.memory_segments.items():
271
+ if self._matches_attention_filter(segment, attention_filter):
272
+ for memory in memories:
273
+ relevance_score = self._calculate_relevance(
274
+ memory, attention_filter, agent_id
275
+ )
276
+ if relevance_score > attention_filter.get("threshold", 0.3):
277
+ memory["access_count"] += 1
278
+ relevant_memories.append(
279
+ {
280
+ **memory,
281
+ "relevance_score": relevance_score,
282
+ "segment": segment,
283
+ }
284
+ )
285
+
286
+ # Sort by relevance and recency
287
+ relevant_memories.sort(
288
+ key=lambda x: (x["relevance_score"], x["timestamp"]), reverse=True
289
+ )
290
+
291
+ # Limit to attention window
292
+ window_size = attention_filter.get("window_size", 10)
293
+ selected_memories = relevant_memories[:window_size]
294
+
295
+ return {
296
+ "success": True,
297
+ "memories": selected_memories,
298
+ "total_available": len(relevant_memories),
299
+ "segments_scanned": list(self.memory_segments.keys()),
300
+ "agent_id": agent_id,
301
+ }
302
+
303
+ def _subscribe_agent(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
304
+ """Subscribe an agent to specific memory segments or tags."""
305
+ agent_id = kwargs["agent_id"]
306
+ segments = kwargs.get("segments", ["general"])
307
+ tags = kwargs.get("tags", [])
308
+
309
+ for segment in segments:
310
+ self.agent_subscriptions[segment].add(agent_id)
311
+
312
+ # Store subscription preferences
313
+ if not hasattr(self, "agent_preferences"):
314
+ self.agent_preferences = {}
315
+
316
+ self.agent_preferences[agent_id] = {
317
+ "segments": segments,
318
+ "tags": tags,
319
+ "attention_filter": kwargs.get("attention_filter", {}),
320
+ }
321
+
322
+ return {
323
+ "success": True,
324
+ "agent_id": agent_id,
325
+ "subscribed_segments": segments,
326
+ "subscribed_tags": tags,
327
+ }
328
+
329
+ def _semantic_query(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
330
+ """Perform semantic search across memories."""
331
+ query = kwargs.get("query", "")
332
+ kwargs["agent_id"]
333
+
334
+ # Simple keyword matching for now (can be enhanced with embeddings)
335
+ matching_memories = []
336
+ query_lower = query.lower()
337
+
338
+ for segment, memories in self.memory_segments.items():
339
+ for memory in memories:
340
+ content_str = str(memory.get("content", "")).lower()
341
+ if query_lower in content_str:
342
+ score = content_str.count(query_lower) / len(content_str.split())
343
+ matching_memories.append(
344
+ {**memory, "match_score": score, "segment": segment}
345
+ )
346
+
347
+ # Sort by match score
348
+ matching_memories.sort(key=lambda x: x["match_score"], reverse=True)
349
+
350
+ return {
351
+ "success": True,
352
+ "query": query,
353
+ "results": matching_memories[:10],
354
+ "total_matches": len(matching_memories),
355
+ }
356
+
357
+ def _update_attention_indices(self, memory_item: Dict[str, Any], segment: str):
358
+ """Update indices for efficient attention-based retrieval."""
359
+ # Index by tags
360
+ for tag in memory_item.get("tags", []):
361
+ self.attention_indices["tags"][tag].append(memory_item["id"])
362
+
363
+ # Index by agent
364
+ agent_id = memory_item["agent_id"]
365
+ self.attention_indices["agents"][agent_id].append(memory_item["id"])
366
+
367
+ # Index by importance level
368
+ importance = memory_item["importance"]
369
+ if importance >= 0.8:
370
+ self.attention_indices["importance"]["high"].append(memory_item["id"])
371
+ elif importance >= 0.5:
372
+ self.attention_indices["importance"]["medium"].append(memory_item["id"])
373
+ else:
374
+ self.attention_indices["importance"]["low"].append(memory_item["id"])
375
+
376
+ def _matches_attention_filter(
377
+ self, segment: str, attention_filter: Dict[str, Any]
378
+ ) -> bool:
379
+ """Check if a segment matches the attention filter."""
380
+ # Check segment filter
381
+ if "segments" in attention_filter:
382
+ if segment not in attention_filter["segments"]:
383
+ return False
384
+
385
+ return True
386
+
387
+ def _calculate_relevance(
388
+ self, memory: Dict[str, Any], attention_filter: Dict[str, Any], agent_id: str
389
+ ) -> float:
390
+ """Calculate relevance score for a memory item."""
391
+ score = 0.0
392
+ weights = attention_filter.get(
393
+ "weights", {"tags": 0.3, "importance": 0.3, "recency": 0.2, "agent": 0.2}
394
+ )
395
+
396
+ # Tag matching
397
+ if "tags" in attention_filter:
398
+ filter_tags = set(attention_filter["tags"])
399
+ memory_tags = set(memory.get("tags", []))
400
+ if filter_tags & memory_tags:
401
+ score += (
402
+ weights.get("tags", 0.3)
403
+ * len(filter_tags & memory_tags)
404
+ / len(filter_tags)
405
+ )
406
+
407
+ # Importance threshold
408
+ importance_threshold = attention_filter.get("importance_threshold", 0.0)
409
+ if memory.get("importance", 0) >= importance_threshold:
410
+ score += weights.get("importance", 0.3) * memory["importance"]
411
+
412
+ # Recency
413
+ current_time = time.time()
414
+ age_seconds = current_time - memory["timestamp"]
415
+ recency_window = attention_filter.get("recency_window", 3600) # 1 hour default
416
+ if age_seconds < recency_window:
417
+ recency_score = 1.0 - (age_seconds / recency_window)
418
+ score += weights.get("recency", 0.2) * recency_score
419
+
420
+ # Agent affinity
421
+ if "preferred_agents" in attention_filter:
422
+ if memory["agent_id"] in attention_filter["preferred_agents"]:
423
+ score += weights.get("agent", 0.2)
424
+
425
+ return min(score, 1.0)
426
+
427
+ def _get_relevant_agents(
428
+ self, memory_item: Dict[str, Any], segment: str
429
+ ) -> Set[str]:
430
+ """Get agents that should be notified about this memory."""
431
+ relevant_agents = set()
432
+
433
+ # Agents subscribed to this segment
434
+ relevant_agents.update(self.agent_subscriptions.get(segment, set()))
435
+
436
+ # Agents with matching tag subscriptions
437
+ if hasattr(self, "agent_preferences"):
438
+ for agent_id, prefs in self.agent_preferences.items():
439
+ if any(
440
+ tag in memory_item.get("tags", []) for tag in prefs.get("tags", [])
441
+ ):
442
+ relevant_agents.add(agent_id)
443
+
444
+ # Remove the writing agent
445
+ relevant_agents.discard(memory_item["agent_id"])
446
+
447
+ return relevant_agents
448
+
449
+ def _get_metrics(self) -> Dict[str, Any]:
450
+ """Get memory pool metrics."""
451
+ total_memories = sum(
452
+ len(memories) for memories in self.memory_segments.values()
453
+ )
454
+
455
+ return {
456
+ "success": True,
457
+ "total_memories": total_memories,
458
+ "segments": list(self.memory_segments.keys()),
459
+ "segment_sizes": {
460
+ segment: len(memories)
461
+ for segment, memories in self.memory_segments.items()
462
+ },
463
+ "total_agents": len(self.agent_subscriptions),
464
+ "memory_id_counter": self.memory_id_counter,
465
+ }
466
+
467
+
468
+ @register_node()
469
+ class A2AAgentNode(LLMAgentNode):
470
+ """
471
+ Enhanced LLM agent with agent-to-agent communication capabilities.
472
+
473
+ This node extends the standard LLMAgentNode with sophisticated A2A communication
474
+ features, enabling agents to share insights through a shared memory pool, enhance
475
+ their context with relevant information from other agents, and collaborate
476
+ effectively on complex tasks.
477
+
478
+ Design Philosophy:
479
+ A2AAgentNode represents an intelligent agent that can both contribute to and
480
+ benefit from collective intelligence. It automatically extracts insights from
481
+ its responses and shares them with other agents while selectively attending
482
+ to relevant information from the shared memory pool. This creates an emergent
483
+ collaborative intelligence system.
484
+
485
+ Upstream Dependencies:
486
+ - QueryAnalysisNode: Provides analyzed queries and context
487
+ - TeamFormationNode: Assigns roles and capabilities to agents
488
+ - A2ACoordinatorNode: Delegates tasks and coordinates activities
489
+ - SharedMemoryPoolNode: Provides access to shared memories
490
+
491
+ Downstream Consumers:
492
+ - SharedMemoryPoolNode: Receives insights and discoveries
493
+ - A2ACoordinatorNode: Reports progress and results
494
+ - SolutionEvaluatorNode: Provides solutions for evaluation
495
+ - Other A2AAgentNodes: Indirect consumers through shared memory
496
+
497
+ Configuration:
498
+ Inherits all configuration from LLMAgentNode plus A2A-specific parameters
499
+ for memory pool integration, attention filtering, and collaboration modes.
500
+
501
+ Implementation Details:
502
+ - Automatically extracts insights from LLM responses
503
+ - Enhances prompts with relevant context from shared memory
504
+ - Supports multiple collaboration modes (cooperative, competitive, hierarchical)
505
+ - Tracks conversation context and shares key discoveries
506
+ - Implements attention filtering to prevent information overload
507
+
508
+ Error Handling:
509
+ - Gracefully handles missing memory pool connections
510
+ - Falls back to standard LLM behavior if A2A features fail
511
+ - Validates insight extraction to prevent malformed memories
512
+
513
+ Side Effects:
514
+ - Writes insights to SharedMemoryPoolNode after each interaction
515
+ - Maintains conversation history for context
516
+ - May influence other agents through shared memories
517
+
518
+ Examples:
519
+ >>> # Create an A2A agent with specific expertise
520
+ >>> agent = A2AAgentNode()
521
+ >>>
522
+ >>> # Execute with A2A features
523
+ >>> result = agent.run(
524
+ ... agent_id="researcher_001",
525
+ ... agent_role="research_specialist",
526
+ ... provider="openai",
527
+ ... model="gpt-4",
528
+ ... messages=[{
529
+ ... "role": "user",
530
+ ... "content": "Analyze the impact of AI on productivity"
531
+ ... }],
532
+ ... memory_pool=memory_pool_instance,
533
+ ... attention_filter={
534
+ ... "tags": ["productivity", "AI", "research"],
535
+ ... "importance_threshold": 0.7
536
+ ... },
537
+ ... collaboration_mode="cooperative"
538
+ ... )
539
+ >>> assert result["success"] == True
540
+ >>> assert "insights_generated" in result["a2a_metadata"]
541
+ >>>
542
+ >>> # Agent automatically shares insights
543
+ >>> insights = result["a2a_metadata"]["insights_generated"]
544
+ >>> assert len(insights) > 0
545
+ >>> assert all("content" in i for i in insights)
546
+ """
547
+
548
+ def __init__(self):
549
+ super().__init__()
550
+ self.local_memory = deque(maxlen=100)
551
+ self.communication_log = deque(maxlen=50)
552
+
553
+ def get_parameters(self) -> Dict[str, NodeParameter]:
554
+ # Inherit all LLMAgentNode parameters
555
+ params = super().get_parameters()
556
+
557
+ # Add A2A-specific parameters
558
+ params.update(
559
+ {
560
+ "agent_id": NodeParameter(
561
+ name="agent_id",
562
+ type=str,
563
+ required=False,
564
+ default=f"agent_{uuid.uuid4().hex[:8]}",
565
+ description="Unique identifier for this agent",
566
+ ),
567
+ "agent_role": NodeParameter(
568
+ name="agent_role",
569
+ type=str,
570
+ required=False,
571
+ default="general",
572
+ description="Role of the agent (researcher, analyst, coordinator, etc.)",
573
+ ),
574
+ "memory_pool": NodeParameter(
575
+ name="memory_pool",
576
+ type=Node,
577
+ required=False,
578
+ description="Reference to SharedMemoryPoolNode",
579
+ ),
580
+ "attention_filter": NodeParameter(
581
+ name="attention_filter",
582
+ type=dict,
583
+ required=False,
584
+ default={},
585
+ description="Criteria for filtering relevant information from shared memory",
586
+ ),
587
+ "communication_config": NodeParameter(
588
+ name="communication_config",
589
+ type=dict,
590
+ required=False,
591
+ default={"mode": "direct", "protocol": "json-rpc"},
592
+ description="A2A communication settings",
593
+ ),
594
+ "collaboration_mode": NodeParameter(
595
+ name="collaboration_mode",
596
+ type=str,
597
+ required=False,
598
+ default="cooperative",
599
+ description="How agent collaborates: cooperative, competitive, hierarchical",
600
+ ),
601
+ "peer_agents": NodeParameter(
602
+ name="peer_agents",
603
+ type=list,
604
+ required=False,
605
+ default=[],
606
+ description="List of peer agent IDs for direct communication",
607
+ ),
608
+ }
609
+ )
610
+ return params
611
+
612
+ def run(self, **kwargs) -> Dict[str, Any]:
613
+ """
614
+ Execute the A2A agent with enhanced communication capabilities.
615
+
616
+ This method extends the base LLMAgentNode execution by:
617
+ 1. Reading relevant context from the shared memory pool
618
+ 2. Enhancing the prompt with shared discoveries
619
+ 3. Executing the LLM call with enriched context
620
+ 4. Extracting insights from the response
621
+ 5. Sharing valuable insights back to the memory pool
622
+
623
+ Args:
624
+ **kwargs: All LLMAgentNode parameters plus:
625
+ agent_id (str): Unique identifier for this agent
626
+ agent_role (str): Agent's role in the team
627
+ memory_pool (SharedMemoryPoolNode): Shared memory instance
628
+ attention_filter (dict): Criteria for filtering memories
629
+ collaboration_mode (str): How agent collaborates
630
+
631
+ Returns:
632
+ Dict[str, Any]: LLMAgentNode response plus:
633
+ a2a_metadata: Information about A2A interactions including
634
+ insights_generated, shared_context_used, collaboration_stats
635
+
636
+ Side Effects:
637
+ Writes insights to shared memory pool if available
638
+ Updates internal conversation history
639
+ """
640
+ # Extract A2A specific parameters
641
+ agent_id = kwargs.get("agent_id")
642
+ agent_role = kwargs.get("agent_role", "general")
643
+ memory_pool = kwargs.get("memory_pool")
644
+ attention_filter = kwargs.get("attention_filter", {})
645
+
646
+ # Read from shared memory if available
647
+ shared_context = []
648
+ if memory_pool:
649
+ memory_result = memory_pool.run(
650
+ action="read", agent_id=agent_id, attention_filter=attention_filter
651
+ )
652
+ if memory_result.get("success"):
653
+ shared_context = memory_result.get("memories", [])
654
+
655
+ # Store provider and model for use in summarization
656
+ self._current_provider = kwargs.get("provider", "mock")
657
+ self._current_model = kwargs.get("model", "mock-model")
658
+
659
+ # Enhance messages with shared context
660
+ messages = kwargs.get("messages", [])
661
+ if shared_context:
662
+ context_summary = self._summarize_shared_context(shared_context)
663
+ enhanced_system_prompt = f"""You are agent {agent_id} with role: {agent_role}.
664
+
665
+ Relevant shared context from other agents:
666
+ {context_summary}
667
+
668
+ {kwargs.get('system_prompt', '')}"""
669
+ kwargs["system_prompt"] = enhanced_system_prompt
670
+
671
+ # Execute LLM agent
672
+ result = super().run(**kwargs)
673
+
674
+ # If successful, write insights to shared memory
675
+ if result.get("success") and memory_pool:
676
+ response_content = result.get("response", {}).get("content", "")
677
+
678
+ # Use LLM to extract insights if provider supports it
679
+ use_llm_extraction = kwargs.get("use_llm_insight_extraction", True)
680
+ provider = kwargs.get("provider", "mock")
681
+
682
+ if use_llm_extraction and provider not in ["mock"]:
683
+ # Use LLM to extract and analyze insights
684
+ insights = self._extract_insights_with_llm(
685
+ response_content, agent_role, agent_id, kwargs
686
+ )
687
+ else:
688
+ # Fallback to rule-based extraction
689
+ insights = self._extract_insights(response_content, agent_role)
690
+
691
+ # Track insight statistics
692
+ insight_stats = {
693
+ "total": len(insights),
694
+ "high_importance": sum(1 for i in insights if i["importance"] >= 0.8),
695
+ "by_type": {},
696
+ "extraction_method": (
697
+ "llm"
698
+ if use_llm_extraction and provider not in ["mock"]
699
+ else "rule-based"
700
+ ),
701
+ }
702
+
703
+ for insight in insights:
704
+ # Update type statistics
705
+ insight_type = insight.get("metadata", {}).get(
706
+ "insight_type", "general"
707
+ )
708
+ insight_stats["by_type"][insight_type] = (
709
+ insight_stats["by_type"].get(insight_type, 0) + 1
710
+ )
711
+
712
+ # Write to memory pool with enhanced context
713
+ memory_pool.run(
714
+ action="write",
715
+ agent_id=agent_id,
716
+ content=insight["content"],
717
+ tags=insight.get("tags", [agent_role]),
718
+ importance=insight.get("importance", 0.6),
719
+ segment=insight.get("segment", agent_role),
720
+ context={
721
+ "source_message": messages[-1] if messages else None,
722
+ "agent_role": agent_role,
723
+ "insight_metadata": insight.get("metadata", {}),
724
+ "timestamp": kwargs.get("timestamp", time.time()),
725
+ },
726
+ )
727
+
728
+ # Store insights in local memory for agent's own reference
729
+ for insight in insights:
730
+ self.local_memory.append(
731
+ {
732
+ "type": "insight",
733
+ "content": insight["content"],
734
+ "importance": insight["importance"],
735
+ "timestamp": time.time(),
736
+ }
737
+ )
738
+
739
+ # Add A2A metadata to result
740
+ result["a2a_metadata"] = {
741
+ "agent_id": agent_id,
742
+ "agent_role": agent_role,
743
+ "shared_context_used": len(shared_context),
744
+ "insights_generated": len(insights) if "insights" in locals() else 0,
745
+ "insight_statistics": insight_stats if "insight_stats" in locals() else {},
746
+ "memory_pool_active": memory_pool is not None,
747
+ "local_memory_size": len(self.local_memory),
748
+ }
749
+
750
+ return result
751
+
752
+ def _summarize_shared_context(self, shared_context: List[Dict[str, Any]]) -> str:
753
+ """Summarize shared context for inclusion in prompt."""
754
+ if not shared_context:
755
+ return "No relevant shared context available."
756
+
757
+ # For small context, use simple formatting
758
+ if len(shared_context) <= 3:
759
+ summary_parts = []
760
+ for memory in shared_context:
761
+ agent_id = memory.get("agent_id", "unknown")
762
+ content = memory.get("content", "")
763
+ importance = memory.get("importance", 0)
764
+ tags = ", ".join(memory.get("tags", []))
765
+
766
+ summary_parts.append(
767
+ f"- Agent {agent_id} ({importance:.1f} importance, tags: {tags}): {content}"
768
+ )
769
+ return "\n".join(summary_parts)
770
+
771
+ # For larger context, use LLM to create intelligent summary
772
+ return self._summarize_with_llm(shared_context)
773
+
774
+ def _summarize_with_llm(self, shared_context: List[Dict[str, Any]]) -> str:
775
+ """Use LLM to create an intelligent summary of shared context."""
776
+
777
+ # Prepare context for summarization
778
+ context_items = []
779
+ for memory in shared_context[:10]: # Process up to 10 most relevant
780
+ context_items.append(
781
+ {
782
+ "agent": memory.get("agent_id", "unknown"),
783
+ "content": memory.get("content", ""),
784
+ "importance": memory.get("importance", 0),
785
+ "tags": memory.get("tags", []),
786
+ "type": memory.get("context", {})
787
+ .get("insight_metadata", {})
788
+ .get("insight_type", "general"),
789
+ }
790
+ )
791
+
792
+ # Create summarization prompt
793
+ summarization_prompt = f"""Summarize the following shared insights from other agents into a concise, actionable briefing.
794
+
795
+ Shared Context Items:
796
+ {json.dumps(context_items, indent=2)}
797
+
798
+ Create a summary that:
799
+ 1. Groups related insights by theme
800
+ 2. Highlights the most important findings (importance >= 0.8)
801
+ 3. Identifies consensus points where multiple agents agree
802
+ 4. Notes any contradictions or disagreements
803
+ 5. Extracts key metrics and data points
804
+ 6. Suggests areas needing further investigation
805
+
806
+ Format the summary as a brief paragraph (max 200 words) that another agent can quickly understand and act upon.
807
+ Focus on actionable intelligence rather than just listing what each agent said."""
808
+
809
+ try:
810
+ # Use the current agent's LLM configuration for summarization
811
+ provider = getattr(self, "_current_provider", "mock")
812
+ model = getattr(self, "_current_model", "mock-model")
813
+
814
+ if provider not in ["mock"]:
815
+ summary_kwargs = {
816
+ "provider": provider,
817
+ "model": model,
818
+ "temperature": 0.3,
819
+ "messages": [
820
+ {
821
+ "role": "system",
822
+ "content": "You are an expert at synthesizing information from multiple sources into clear, actionable summaries.",
823
+ },
824
+ {"role": "user", "content": summarization_prompt},
825
+ ],
826
+ "max_tokens": 300,
827
+ }
828
+
829
+ result = super().run(**summary_kwargs)
830
+
831
+ if result.get("success"):
832
+ summary = result.get("response", {}).get("content", "")
833
+ if summary:
834
+ return f"Shared Context Summary:\n{summary}"
835
+ except:
836
+ pass
837
+
838
+ # Fallback to simple summary
839
+ summary_parts = []
840
+ for memory in shared_context[:5]:
841
+ agent_id = memory.get("agent_id", "unknown")
842
+ content = memory.get("content", "")[:100] + "..."
843
+ importance = memory.get("importance", 0)
844
+
845
+ summary_parts.append(f"- {agent_id} [{importance:.1f}]: {content}")
846
+
847
+ return "Recent insights:\n" + "\n".join(summary_parts)
848
+
849
+ def _extract_insights(self, response: str, agent_role: str) -> List[Dict[str, Any]]:
850
+ """Extract important insights from agent response using advanced NLP techniques."""
851
+ insights = []
852
+
853
+ # Enhanced keyword patterns for different types of insights
854
+ insight_patterns = {
855
+ "findings": {
856
+ "keywords": [
857
+ "found",
858
+ "discovered",
859
+ "identified",
860
+ "revealed",
861
+ "uncovered",
862
+ "detected",
863
+ "observed",
864
+ "noted",
865
+ "recognized",
866
+ ],
867
+ "importance": 0.8,
868
+ "tags": ["finding", "discovery"],
869
+ },
870
+ "conclusions": {
871
+ "keywords": [
872
+ "conclude",
873
+ "therefore",
874
+ "thus",
875
+ "hence",
876
+ "consequently",
877
+ "as a result",
878
+ "in summary",
879
+ "overall",
880
+ "in conclusion",
881
+ ],
882
+ "importance": 0.9,
883
+ "tags": ["conclusion", "summary"],
884
+ },
885
+ "comparisons": {
886
+ "keywords": [
887
+ "compared to",
888
+ "versus",
889
+ "vs",
890
+ "better than",
891
+ "worse than",
892
+ "improvement",
893
+ "decline",
894
+ "increase",
895
+ "decrease",
896
+ "change",
897
+ ],
898
+ "importance": 0.7,
899
+ "tags": ["comparison", "analysis"],
900
+ },
901
+ "recommendations": {
902
+ "keywords": [
903
+ "recommend",
904
+ "suggest",
905
+ "should",
906
+ "advise",
907
+ "propose",
908
+ "best practice",
909
+ "optimal",
910
+ "ideal",
911
+ ],
912
+ "importance": 0.85,
913
+ "tags": ["recommendation", "advice"],
914
+ },
915
+ "problems": {
916
+ "keywords": [
917
+ "issue",
918
+ "problem",
919
+ "challenge",
920
+ "limitation",
921
+ "constraint",
922
+ "difficulty",
923
+ "obstacle",
924
+ "concern",
925
+ "risk",
926
+ ],
927
+ "importance": 0.75,
928
+ "tags": ["problem", "challenge"],
929
+ },
930
+ "metrics": {
931
+ "keywords": [
932
+ "percent",
933
+ "%",
934
+ "score",
935
+ "rating",
936
+ "benchmark",
937
+ "metric",
938
+ "measurement",
939
+ "performance",
940
+ "efficiency",
941
+ ],
942
+ "importance": 0.65,
943
+ "tags": ["metric", "measurement"],
944
+ },
945
+ }
946
+
947
+ # Process response by sentences for better context
948
+ import re
949
+
950
+ sentences = re.split(r"[.!?]+", response)
951
+
952
+ for sentence in sentences:
953
+ sentence = sentence.strip()
954
+ if not sentence or len(sentence) < 20:
955
+ continue
956
+
957
+ # Calculate importance based on multiple factors
958
+ importance = 0.5 # Base importance
959
+ matched_tags = set([agent_role])
960
+ insight_type = None
961
+
962
+ # Check for insight patterns
963
+ sentence_lower = sentence.lower()
964
+ for pattern_type, pattern_info in insight_patterns.items():
965
+ if any(
966
+ keyword in sentence_lower for keyword in pattern_info["keywords"]
967
+ ):
968
+ importance = max(importance, pattern_info["importance"])
969
+ matched_tags.update(pattern_info["tags"])
970
+ insight_type = pattern_type
971
+ break
972
+
973
+ # Extract entities and add as tags
974
+ # Simple entity extraction - numbers, capitalized words, technical terms
975
+ numbers = re.findall(r"\b\d+(?:\.\d+)?%?\b", sentence)
976
+ if numbers:
977
+ matched_tags.add("quantitative")
978
+ importance += 0.1
979
+
980
+ # Extract technical terms (words with specific patterns)
981
+ tech_terms = re.findall(r"\b[A-Z][a-z]+(?:[A-Z][a-z]+)*\b", sentence)
982
+ if tech_terms:
983
+ matched_tags.update(
984
+ [term.lower() for term in tech_terms[:2]]
985
+ ) # Limit tags
986
+
987
+ # Boost importance for sentences with multiple capital letters (proper nouns)
988
+ capital_words = re.findall(r"\b[A-Z][A-Za-z]+\b", sentence)
989
+ if len(capital_words) > 2:
990
+ importance += 0.05
991
+
992
+ # Check for structured data (JSON, lists, etc.)
993
+ if any(char in sentence for char in ["{", "[", ":", "-"]):
994
+ matched_tags.add("structured")
995
+ importance += 0.05
996
+
997
+ # Determine segment based on insight type and role
998
+ segment = f"{agent_role}_{insight_type}" if insight_type else agent_role
999
+
1000
+ # Create insight with rich metadata
1001
+ insight = {
1002
+ "content": sentence,
1003
+ "importance": min(importance, 1.0), # Cap at 1.0
1004
+ "tags": list(matched_tags),
1005
+ "segment": segment,
1006
+ "metadata": {
1007
+ "length": len(sentence),
1008
+ "has_numbers": bool(numbers),
1009
+ "insight_type": insight_type or "general",
1010
+ "extracted_entities": tech_terms[:3] if tech_terms else [],
1011
+ },
1012
+ }
1013
+
1014
+ insights.append(insight)
1015
+
1016
+ # Sort by importance and return top insights
1017
+ insights.sort(key=lambda x: x["importance"], reverse=True)
1018
+
1019
+ # Dynamic limit based on response quality
1020
+ # If we have many high-quality insights, return more
1021
+ high_quality_count = sum(1 for i in insights if i["importance"] >= 0.7)
1022
+ limit = min(5 if high_quality_count > 3 else 3, len(insights))
1023
+
1024
+ return insights[:limit]
1025
+
1026
+ def _extract_insights_with_llm(
1027
+ self,
1028
+ response: str,
1029
+ agent_role: str,
1030
+ agent_id: str,
1031
+ original_kwargs: Dict[str, Any],
1032
+ ) -> List[Dict[str, Any]]:
1033
+ """Use LLM to extract and analyze insights from the response."""
1034
+
1035
+ # Prepare a focused prompt for insight extraction
1036
+ insight_extraction_prompt = f"""You are an AI insight extraction specialist. Analyze the following response and extract the most important insights.
1037
+
1038
+ Agent Role: {agent_role}
1039
+ Original Response:
1040
+ {response}
1041
+
1042
+ Extract 3-5 key insights from this response. For each insight:
1043
+ 1. Summarize the core finding or conclusion (max 100 words)
1044
+ 2. Assign an importance score (0.0-1.0) based on:
1045
+ - Novelty and uniqueness (0.3 weight)
1046
+ - Impact on decision-making (0.4 weight)
1047
+ - Supporting evidence quality (0.3 weight)
1048
+ 3. Categorize the insight type: finding, conclusion, comparison, recommendation, problem, metric, or pattern
1049
+ 4. Extract key entities mentioned (products, technologies, metrics, etc.)
1050
+ 5. Suggest relevant tags for categorization
1051
+
1052
+ Output your analysis as a JSON array with this structure:
1053
+ [
1054
+ {{
1055
+ "content": "The core insight summarized concisely",
1056
+ "importance": 0.85,
1057
+ "type": "finding",
1058
+ "entities": ["MacBook Air M3", "M2", "battery life"],
1059
+ "tags": ["performance", "comparison", "hardware"],
1060
+ "evidence": "Brief supporting evidence from the text"
1061
+ }}
1062
+ ]
1063
+
1064
+ Focus on insights that would be valuable for other agents to know. Ensure the JSON is valid."""
1065
+
1066
+ try:
1067
+ # Create a sub-call to the LLM for insight extraction
1068
+ extraction_kwargs = {
1069
+ "provider": original_kwargs.get("provider", "ollama"),
1070
+ "model": original_kwargs.get("model", "mistral"),
1071
+ "temperature": 0.3, # Lower temperature for more focused extraction
1072
+ "messages": [
1073
+ {
1074
+ "role": "system",
1075
+ "content": "You are an expert at analyzing text and extracting structured insights. Always respond with valid JSON.",
1076
+ },
1077
+ {"role": "user", "content": insight_extraction_prompt},
1078
+ ],
1079
+ "max_tokens": original_kwargs.get("max_tokens", 1000),
1080
+ }
1081
+
1082
+ # Execute LLM call for insight extraction
1083
+ extraction_result = super().run(**extraction_kwargs)
1084
+
1085
+ if extraction_result.get("success"):
1086
+ extracted_content = extraction_result.get("response", {}).get(
1087
+ "content", ""
1088
+ )
1089
+
1090
+ # Parse the JSON response
1091
+ import json
1092
+ import re
1093
+
1094
+ # Try to extract JSON from the response
1095
+ json_match = re.search(r"\[.*?\]", extracted_content, re.DOTALL)
1096
+ if json_match:
1097
+ try:
1098
+ extracted_insights = json.loads(json_match.group())
1099
+
1100
+ # Convert to our insight format
1101
+ insights = []
1102
+ for item in extracted_insights[:5]: # Limit to 5 insights
1103
+ insight = {
1104
+ "content": item.get("content", ""),
1105
+ "importance": min(
1106
+ max(item.get("importance", 0.5), 0.0), 1.0
1107
+ ),
1108
+ "tags": item.get("tags", []) + [agent_role],
1109
+ "segment": f"{agent_role}_{item.get('type', 'general')}",
1110
+ "metadata": {
1111
+ "insight_type": item.get("type", "general"),
1112
+ "extracted_entities": item.get("entities", []),
1113
+ "evidence": item.get("evidence", ""),
1114
+ "llm_extracted": True,
1115
+ },
1116
+ }
1117
+ insights.append(insight)
1118
+
1119
+ return insights
1120
+ except json.JSONDecodeError:
1121
+ pass
1122
+
1123
+ except Exception:
1124
+ # Log the error but don't fail - fall back to rule-based extraction
1125
+ pass
1126
+
1127
+ # If LLM extraction fails, fall back to rule-based
1128
+ return self._extract_insights(response, agent_role)
1129
+
1130
+
1131
+ @register_node()
1132
+ class A2ACoordinatorNode(CycleAwareNode):
1133
+ """
1134
+ Coordinates communication and task delegation between A2A agents.
1135
+
1136
+ This node acts as a central orchestrator for multi-agent systems, managing task
1137
+ distribution, consensus building, and workflow coordination. It implements various
1138
+ coordination strategies to optimize agent utilization and ensure effective
1139
+ collaboration across heterogeneous agent teams.
1140
+
1141
+ Design Philosophy:
1142
+ The A2ACoordinatorNode serves as a decentralized coordination mechanism that
1143
+ enables agents to self-organize without requiring a fixed hierarchy. It provides
1144
+ flexible coordination patterns (delegation, broadcast, consensus, workflow)
1145
+ that can be composed to create sophisticated multi-agent behaviors.
1146
+
1147
+ Upstream Dependencies:
1148
+ - ProblemAnalyzerNode: Provides decomposed tasks and requirements
1149
+ - TeamFormationNode: Supplies formed teams and agent assignments
1150
+ - QueryAnalysisNode: Delivers analyzed queries needing coordination
1151
+ - OrchestrationManagerNode: High-level orchestration directives
1152
+
1153
+ Downstream Consumers:
1154
+ - A2AAgentNode: Receives task assignments and coordination messages
1155
+ - SharedMemoryPoolNode: Stores coordination decisions and progress
1156
+ - SolutionEvaluatorNode: Evaluates coordinated solution components
1157
+ - ConvergenceDetectorNode: Monitors coordination effectiveness
1158
+
1159
+ Configuration:
1160
+ The coordinator adapts its behavior based on the coordination strategy
1161
+ selected and the characteristics of available agents. No static configuration
1162
+ is required, but runtime parameters control coordination behavior.
1163
+
1164
+ Implementation Details:
1165
+ - Maintains registry of active agents with capabilities and status
1166
+ - Implements multiple delegation strategies (best_match, round_robin, auction)
1167
+ - Tracks task assignments and agent performance metrics
1168
+ - Supports both synchronous and asynchronous coordination patterns
1169
+ - Manages consensus voting with configurable thresholds
1170
+
1171
+ Error Handling:
1172
+ - Handles agent failures with automatic reassignment
1173
+ - Validates task requirements before delegation
1174
+ - Falls back to broadcast when specific agents unavailable
1175
+ - Returns partial results if consensus cannot be reached
1176
+
1177
+ Side Effects:
1178
+ - Maintains internal agent registry across calls
1179
+ - Updates agent performance metrics after task completion
1180
+ - May modify task priorities based on agent availability
1181
+
1182
+ Examples:
1183
+ >>> # Create coordinator
1184
+ >>> coordinator = A2ACoordinatorNode()
1185
+ >>>
1186
+ >>> # Register agents
1187
+ >>> coordinator.run(
1188
+ ... action="register",
1189
+ ... agent_info={
1190
+ ... "id": "analyst_001",
1191
+ ... "skills": ["data_analysis", "statistics"],
1192
+ ... "role": "analyst"
1193
+ ... }
1194
+ ... )
1195
+ >>>
1196
+ >>> # Delegate task with best match strategy
1197
+ >>> result = coordinator.run(
1198
+ ... action="delegate",
1199
+ ... task={
1200
+ ... "type": "analysis",
1201
+ ... "description": "Analyze sales data",
1202
+ ... "required_skills": ["data_analysis"],
1203
+ ... "priority": "high"
1204
+ ... },
1205
+ ... available_agents=[
1206
+ ... {"id": "analyst_001", "skills": ["data_analysis"]},
1207
+ ... {"id": "researcher_001", "skills": ["research"]}
1208
+ ... ],
1209
+ ... coordination_strategy="best_match"
1210
+ ... )
1211
+ >>> assert result["success"] == True
1212
+ >>> assert result["assigned_agent"] == "analyst_001"
1213
+ >>>
1214
+ >>> # Build consensus among agents
1215
+ >>> consensus_result = coordinator.run(
1216
+ ... action="consensus",
1217
+ ... proposal="Implement new feature X",
1218
+ ... voting_agents=["agent1", "agent2", "agent3"],
1219
+ ... consensus_threshold=0.66
1220
+ ... )
1221
+ """
1222
+
1223
+ def __init__(self):
1224
+ super().__init__()
1225
+ self.registered_agents = {}
1226
+ self.task_queue = deque()
1227
+ self.consensus_sessions = {}
1228
+
1229
+ def get_parameters(self) -> Dict[str, NodeParameter]:
1230
+ return {
1231
+ "action": NodeParameter(
1232
+ name="action",
1233
+ type=str,
1234
+ required=False,
1235
+ default="coordinate",
1236
+ description="Action: 'register', 'delegate', 'broadcast', 'consensus', 'coordinate'",
1237
+ ),
1238
+ "agent_info": NodeParameter(
1239
+ name="agent_info",
1240
+ type=dict,
1241
+ required=False,
1242
+ description="Information about agent (for registration)",
1243
+ ),
1244
+ "task": NodeParameter(
1245
+ name="task",
1246
+ type=dict,
1247
+ required=False,
1248
+ description="Task to delegate or coordinate",
1249
+ ),
1250
+ "message": NodeParameter(
1251
+ name="message",
1252
+ type=dict,
1253
+ required=False,
1254
+ description="Message to broadcast",
1255
+ ),
1256
+ "consensus_proposal": NodeParameter(
1257
+ name="consensus_proposal",
1258
+ type=dict,
1259
+ required=False,
1260
+ description="Proposal for consensus",
1261
+ ),
1262
+ "available_agents": NodeParameter(
1263
+ name="available_agents",
1264
+ type=list,
1265
+ required=False,
1266
+ default=[],
1267
+ description="List of available agents",
1268
+ ),
1269
+ "coordination_strategy": NodeParameter(
1270
+ name="coordination_strategy",
1271
+ type=str,
1272
+ required=False,
1273
+ default="best_match",
1274
+ description="Strategy: 'best_match', 'round_robin', 'broadcast', 'auction'",
1275
+ ),
1276
+ }
1277
+
1278
+ def run(self, context: Dict[str, Any], **kwargs) -> Dict[str, Any]:
1279
+ """
1280
+ Execute coordination action with cycle awareness.
1281
+
1282
+ Routes coordination requests to appropriate handlers based on action
1283
+ parameter. Tracks coordination history and agent performance across
1284
+ iterations for cycle-aware optimization.
1285
+
1286
+ Args:
1287
+ context: Execution context with cycle information
1288
+ **kwargs: Action-specific parameters including:
1289
+ action (str): Type of coordination action
1290
+ agent_info (dict): Agent registration details
1291
+ task (dict): Task to delegate
1292
+ available_agents (list): Agents available for tasks
1293
+ coordination_strategy (str): Delegation strategy
1294
+
1295
+ Returns:
1296
+ Dict[str, Any]: Action results with cycle metadata including:
1297
+ success (bool): Whether action succeeded
1298
+ cycle_info (dict): Iteration and history information
1299
+ Additional action-specific fields
1300
+
1301
+ Raises:
1302
+ None - errors returned in result dictionary
1303
+
1304
+ Side Effects:
1305
+ Updates internal agent registry
1306
+ Modifies coordination history
1307
+ Updates agent performance metrics
1308
+
1309
+ Examples:
1310
+ >>> coordinator = A2ACoordinatorNode()
1311
+ >>> result = coordinator.run(context,
1312
+ ... action=\"delegate\",
1313
+ ... task={\"type\": \"analysis\", \"required_skills\": [\"data\"]},
1314
+ ... coordination_strategy=\"best_match\"
1315
+ ... )
1316
+ >>> assert result[\"success\"] == True
1317
+ """
1318
+ action = kwargs.get("action")
1319
+
1320
+ # Get cycle information using CycleAwareNode helpers
1321
+ iteration = self.get_iteration(context)
1322
+ is_first = self.is_first_iteration(context)
1323
+ prev_state = self.get_previous_state(context)
1324
+
1325
+ # Initialize cycle-aware coordination state
1326
+ if is_first:
1327
+ self.log_cycle_info(context, f"Starting coordination with action: {action}")
1328
+ coordination_history = []
1329
+ agent_performance_history = {}
1330
+ else:
1331
+ coordination_history = prev_state.get("coordination_history", [])
1332
+ agent_performance_history = prev_state.get("agent_performance", {})
1333
+
1334
+ # Execute the coordination action
1335
+ if action == "register":
1336
+ result = self._register_agent(kwargs, context)
1337
+ elif action == "delegate":
1338
+ result = self._delegate_task(
1339
+ kwargs, context, coordination_history, agent_performance_history
1340
+ )
1341
+ elif action == "broadcast":
1342
+ result = self._broadcast_message(kwargs, context)
1343
+ elif action == "consensus":
1344
+ result = self._manage_consensus(kwargs, context, coordination_history)
1345
+ elif action == "coordinate":
1346
+ result = self._coordinate_workflow(kwargs, context, iteration)
1347
+ else:
1348
+ result = {"success": False, "error": f"Unknown action: {action}"}
1349
+
1350
+ # Track coordination history for cycle learning
1351
+ coordination_event = {
1352
+ "iteration": iteration,
1353
+ "action": action,
1354
+ "success": result.get("success", False),
1355
+ "timestamp": time.time(),
1356
+ "details": {k: v for k, v in result.items() if k not in ["success"]},
1357
+ }
1358
+ coordination_history.append(coordination_event)
1359
+
1360
+ # Update agent performance tracking
1361
+ if action == "delegate" and result.get("success"):
1362
+ agent_id = result.get("delegated_to")
1363
+ if agent_id:
1364
+ if agent_id not in agent_performance_history:
1365
+ agent_performance_history[agent_id] = {
1366
+ "assignments": 0,
1367
+ "success_rate": 1.0,
1368
+ }
1369
+ agent_performance_history[agent_id]["assignments"] += 1
1370
+
1371
+ # Add cycle-aware metadata to result
1372
+ result.update(
1373
+ {
1374
+ "cycle_info": {
1375
+ "iteration": iteration,
1376
+ "coordination_history_length": len(coordination_history),
1377
+ "active_agents": len(self.registered_agents),
1378
+ "performance_tracked_agents": len(agent_performance_history),
1379
+ }
1380
+ }
1381
+ )
1382
+
1383
+ # Log progress
1384
+ if iteration % 5 == 0: # Log every 5 iterations
1385
+ self.log_cycle_info(
1386
+ context,
1387
+ f"Coordination stats: {len(coordination_history)} events, {len(self.registered_agents)} agents",
1388
+ )
1389
+
1390
+ # Persist state for next iteration
1391
+ return {
1392
+ **result,
1393
+ **self.set_cycle_state(
1394
+ {
1395
+ "coordination_history": coordination_history[
1396
+ -50:
1397
+ ], # Keep last 50 events
1398
+ "agent_performance": agent_performance_history,
1399
+ }
1400
+ ),
1401
+ }
1402
+
1403
+ def _register_agent(
1404
+ self, kwargs: Dict[str, Any], context: Dict[str, Any]
1405
+ ) -> Dict[str, Any]:
1406
+ """Register an agent with the coordinator."""
1407
+ agent_info = kwargs.get("agent_info", {})
1408
+ agent_id = agent_info.get("id")
1409
+
1410
+ if not agent_id:
1411
+ return {"success": False, "error": "Agent ID required"}
1412
+
1413
+ self.registered_agents[agent_id] = {
1414
+ "id": agent_id,
1415
+ "skills": agent_info.get("skills", []),
1416
+ "role": agent_info.get("role", "general"),
1417
+ "status": "available",
1418
+ "registered_at": time.time(),
1419
+ "task_count": 0,
1420
+ "success_rate": 1.0,
1421
+ }
1422
+
1423
+ return {
1424
+ "success": True,
1425
+ "agent_id": agent_id,
1426
+ "registered_agents": list(self.registered_agents.keys()),
1427
+ }
1428
+
1429
+ def _delegate_task(
1430
+ self,
1431
+ kwargs: Dict[str, Any],
1432
+ context: Dict[str, Any],
1433
+ coordination_history: List[Dict],
1434
+ agent_performance: Dict,
1435
+ ) -> Dict[str, Any]:
1436
+ """Delegate task to most suitable agent with cycle-aware optimization."""
1437
+ task = kwargs.get("task", {})
1438
+ available_agents = kwargs.get("available_agents", [])
1439
+ strategy = kwargs.get("coordination_strategy", "best_match")
1440
+
1441
+ if not available_agents:
1442
+ available_agents = [
1443
+ agent
1444
+ for agent in self.registered_agents.values()
1445
+ if agent["status"] == "available"
1446
+ ]
1447
+
1448
+ if not available_agents:
1449
+ return {"success": False, "error": "No available agents"}
1450
+
1451
+ # Use cycle-aware agent selection based on performance history
1452
+ iteration = self.get_iteration(context)
1453
+
1454
+ # Select agent based on strategy with cycle learning
1455
+ if strategy == "best_match":
1456
+ selected_agent = self._find_best_match_cycle_aware(
1457
+ task, available_agents, agent_performance, iteration
1458
+ )
1459
+ elif strategy == "round_robin":
1460
+ # Cycle-aware round-robin based on iteration
1461
+ agent_index = iteration % len(available_agents)
1462
+ selected_agent = available_agents[agent_index]
1463
+ elif strategy == "auction":
1464
+ selected_agent = self._run_auction_cycle_aware(
1465
+ task, available_agents, agent_performance
1466
+ )
1467
+ else:
1468
+ selected_agent = available_agents[0]
1469
+
1470
+ if not selected_agent:
1471
+ return {"success": False, "error": "No suitable agent found"}
1472
+
1473
+ # Update agent status
1474
+ agent_id = selected_agent.get("id")
1475
+ if agent_id in self.registered_agents:
1476
+ self.registered_agents[agent_id]["status"] = "busy"
1477
+ self.registered_agents[agent_id]["task_count"] += 1
1478
+
1479
+ return {
1480
+ "success": True,
1481
+ "delegated_to": agent_id,
1482
+ "task": task,
1483
+ "strategy": strategy,
1484
+ "agent_performance_score": agent_performance.get(agent_id, {}).get(
1485
+ "success_rate", 1.0
1486
+ ),
1487
+ "iteration": iteration,
1488
+ }
1489
+
1490
+ def _broadcast_message(
1491
+ self, kwargs: Dict[str, Any], context: Dict[str, Any]
1492
+ ) -> Dict[str, Any]:
1493
+ """Broadcast message to relevant agents."""
1494
+ message = kwargs.get("message", {})
1495
+ target_roles = message.get("target_roles", [])
1496
+ target_skills = message.get("target_skills", [])
1497
+
1498
+ recipients = []
1499
+ for agent in self.registered_agents.values():
1500
+ # Check role match
1501
+ if target_roles and agent["role"] not in target_roles:
1502
+ continue
1503
+
1504
+ # Check skills match
1505
+ if target_skills:
1506
+ if not any(skill in agent["skills"] for skill in target_skills):
1507
+ continue
1508
+
1509
+ recipients.append(agent["id"])
1510
+
1511
+ return {
1512
+ "success": True,
1513
+ "recipients": recipients,
1514
+ "message": message,
1515
+ "broadcast_time": time.time(),
1516
+ }
1517
+
1518
+ def _manage_consensus(
1519
+ self,
1520
+ kwargs: Dict[str, Any],
1521
+ context: Dict[str, Any],
1522
+ coordination_history: List[Dict],
1523
+ ) -> Dict[str, Any]:
1524
+ """Manage consensus building among agents."""
1525
+ proposal = kwargs.get("consensus_proposal", {})
1526
+ session_id = proposal.get("session_id", str(uuid.uuid4()))
1527
+
1528
+ if session_id not in self.consensus_sessions:
1529
+ self.consensus_sessions[session_id] = {
1530
+ "proposal": proposal,
1531
+ "votes": {},
1532
+ "started_at": time.time(),
1533
+ "status": "open",
1534
+ }
1535
+
1536
+ session = self.consensus_sessions[session_id]
1537
+
1538
+ # Handle vote
1539
+ if "vote" in kwargs:
1540
+ agent_id = kwargs.get("agent_id")
1541
+ vote = kwargs.get("vote")
1542
+ session["votes"][agent_id] = vote
1543
+
1544
+ # Check if consensus reached
1545
+ total_agents = len(self.registered_agents)
1546
+ votes_cast = len(session["votes"])
1547
+
1548
+ if votes_cast >= total_agents * 0.5: # Simple majority
1549
+ yes_votes = sum(1 for v in session["votes"].values() if v)
1550
+ consensus_reached = yes_votes > votes_cast / 2
1551
+
1552
+ session["status"] = "completed"
1553
+ session["result"] = "approved" if consensus_reached else "rejected"
1554
+
1555
+ return {
1556
+ "success": True,
1557
+ "session_id": session_id,
1558
+ "consensus_reached": consensus_reached,
1559
+ "result": session["result"],
1560
+ "votes": session["votes"],
1561
+ }
1562
+
1563
+ return {
1564
+ "success": True,
1565
+ "session_id": session_id,
1566
+ "status": session["status"],
1567
+ "votes_cast": votes_cast,
1568
+ "votes_needed": int(total_agents * 0.5),
1569
+ }
1570
+
1571
+ def _coordinate_workflow(
1572
+ self, kwargs: Dict[str, Any], context: Dict[str, Any], iteration: int
1573
+ ) -> Dict[str, Any]:
1574
+ """Coordinate a multi-agent workflow."""
1575
+ workflow_spec = kwargs.get("task", {})
1576
+ steps = workflow_spec.get("steps", [])
1577
+
1578
+ coordination_plan = []
1579
+ for step in steps:
1580
+ required_skills = step.get("required_skills", [])
1581
+ available_agents = [
1582
+ agent
1583
+ for agent in self.registered_agents.values()
1584
+ if any(skill in agent["skills"] for skill in required_skills)
1585
+ ]
1586
+
1587
+ if available_agents:
1588
+ selected_agent = self._find_best_match(step, available_agents)
1589
+ coordination_plan.append(
1590
+ {
1591
+ "step": step["name"],
1592
+ "assigned_to": selected_agent["id"],
1593
+ "skills_matched": [
1594
+ s for s in required_skills if s in selected_agent["skills"]
1595
+ ],
1596
+ }
1597
+ )
1598
+ else:
1599
+ coordination_plan.append(
1600
+ {
1601
+ "step": step["name"],
1602
+ "assigned_to": None,
1603
+ "error": "No agent with required skills",
1604
+ }
1605
+ )
1606
+
1607
+ return {
1608
+ "success": True,
1609
+ "workflow": workflow_spec.get("name", "unnamed"),
1610
+ "coordination_plan": coordination_plan,
1611
+ "total_steps": len(steps),
1612
+ "assigned_steps": sum(1 for p in coordination_plan if p.get("assigned_to")),
1613
+ }
1614
+
1615
+ def _find_best_match(
1616
+ self, task: Dict[str, Any], agents: List[Dict[str, Any]]
1617
+ ) -> Optional[Dict[str, Any]]:
1618
+ """Find best matching agent for task."""
1619
+ required_skills = task.get("required_skills", [])
1620
+ if not required_skills:
1621
+ return agents[0] if agents else None
1622
+
1623
+ best_agent = None
1624
+ best_score = 0
1625
+
1626
+ for agent in agents:
1627
+ agent_skills = set(agent.get("skills", []))
1628
+ required_set = set(required_skills)
1629
+
1630
+ # Calculate match score
1631
+ matches = agent_skills & required_set
1632
+ score = len(matches) / len(required_set) if required_set else 0
1633
+
1634
+ # Consider success rate
1635
+ success_rate = agent.get("success_rate", 1.0)
1636
+ score *= success_rate
1637
+
1638
+ if score > best_score:
1639
+ best_score = score
1640
+ best_agent = agent
1641
+
1642
+ return best_agent
1643
+
1644
+ def _run_auction(
1645
+ self, task: Dict[str, Any], agents: List[Dict[str, Any]]
1646
+ ) -> Optional[Dict[str, Any]]:
1647
+ """Run auction-based task assignment."""
1648
+ # Simplified auction - agents bid based on their capability
1649
+ bids = []
1650
+
1651
+ for agent in agents:
1652
+ # Calculate bid based on skill match and availability
1653
+ required_skills = set(task.get("required_skills", []))
1654
+ agent_skills = set(agent.get("skills", []))
1655
+
1656
+ skill_match = (
1657
+ len(required_skills & agent_skills) / len(required_skills)
1658
+ if required_skills
1659
+ else 1.0
1660
+ )
1661
+ workload = 1.0 - (agent.get("task_count", 0) / 10.0) # Lower bid if busy
1662
+
1663
+ bid_value = skill_match * workload * agent.get("success_rate", 1.0)
1664
+
1665
+ bids.append({"agent": agent, "bid": bid_value})
1666
+
1667
+ # Select highest bidder
1668
+ if bids:
1669
+ bids.sort(key=lambda x: x["bid"], reverse=True)
1670
+ return bids[0]["agent"]
1671
+
1672
+ return None
1673
+
1674
+ def _find_best_match_cycle_aware(
1675
+ self,
1676
+ task: Dict[str, Any],
1677
+ agents: List[Dict[str, Any]],
1678
+ agent_performance: Dict[str, Dict],
1679
+ iteration: int,
1680
+ ) -> Optional[Dict[str, Any]]:
1681
+ """Find best matching agent using cycle-aware performance data."""
1682
+ required_skills = task.get("required_skills", [])
1683
+ if not required_skills:
1684
+ # When no specific skills required, prefer agents with better historical performance
1685
+ if agent_performance:
1686
+ best_agent = None
1687
+ best_score = 0
1688
+ for agent in agents:
1689
+ agent_id = agent.get("id")
1690
+ perf = agent_performance.get(
1691
+ agent_id, {"success_rate": 1.0, "assignments": 0}
1692
+ )
1693
+ # Balance experience and success rate
1694
+ experience_factor = min(
1695
+ perf["assignments"] / 10.0, 1.0
1696
+ ) # Max at 10 assignments
1697
+ score = perf["success_rate"] * (0.7 + 0.3 * experience_factor)
1698
+ if score > best_score:
1699
+ best_score = score
1700
+ best_agent = agent
1701
+ return best_agent or (agents[0] if agents else None)
1702
+ return agents[0] if agents else None
1703
+
1704
+ best_agent = None
1705
+ best_score = 0
1706
+
1707
+ for agent in agents:
1708
+ agent_id = agent.get("id")
1709
+ agent_skills = set(agent.get("skills", []))
1710
+ required_set = set(required_skills)
1711
+
1712
+ # Calculate skill match score
1713
+ matches = agent_skills & required_set
1714
+ skill_score = len(matches) / len(required_set) if required_set else 0
1715
+
1716
+ # Get performance history
1717
+ perf = agent_performance.get(
1718
+ agent_id, {"success_rate": 1.0, "assignments": 0}
1719
+ )
1720
+ performance_score = perf["success_rate"]
1721
+
1722
+ # Experience bonus (agents with more assignments get slight preference)
1723
+ experience_bonus = min(perf["assignments"] * 0.05, 0.2) # Max 20% bonus
1724
+
1725
+ # Cycle adaptation: prefer different agents in different iterations to explore
1726
+ diversity_factor = 1.0
1727
+ if iteration > 0 and agent_performance:
1728
+ recent_assignments = sum(
1729
+ 1 for p in agent_performance.values() if p["assignments"] > 0
1730
+ )
1731
+ if recent_assignments > 0:
1732
+ agent_usage_ratio = perf["assignments"] / recent_assignments
1733
+ if agent_usage_ratio > 0.5: # Over-used agent
1734
+ diversity_factor = 0.8 # Slight penalty
1735
+
1736
+ # Combined score
1737
+ final_score = (
1738
+ skill_score * performance_score * diversity_factor
1739
+ ) + experience_bonus
1740
+
1741
+ if final_score > best_score:
1742
+ best_score = final_score
1743
+ best_agent = agent
1744
+
1745
+ return best_agent
1746
+
1747
+ def _run_auction_cycle_aware(
1748
+ self,
1749
+ task: Dict[str, Any],
1750
+ agents: List[Dict[str, Any]],
1751
+ agent_performance: Dict[str, Dict],
1752
+ ) -> Optional[Dict[str, Any]]:
1753
+ """Run auction-based task assignment with cycle-aware bidding."""
1754
+ bids = []
1755
+
1756
+ for agent in agents:
1757
+ agent_id = agent.get("id")
1758
+
1759
+ # Calculate bid based on skill match and availability (original logic)
1760
+ required_skills = set(task.get("required_skills", []))
1761
+ agent_skills = set(agent.get("skills", []))
1762
+
1763
+ skill_match = (
1764
+ len(required_skills & agent_skills) / len(required_skills)
1765
+ if required_skills
1766
+ else 1.0
1767
+ )
1768
+ workload = 1.0 - (agent.get("task_count", 0) / 10.0) # Lower bid if busy
1769
+
1770
+ # Enhance with performance history
1771
+ perf = agent_performance.get(
1772
+ agent_id, {"success_rate": 1.0, "assignments": 0}
1773
+ )
1774
+ performance_factor = perf["success_rate"]
1775
+
1776
+ # Experience factor (slight preference for experienced agents)
1777
+ experience_factor = min(
1778
+ 1.0 + (perf["assignments"] * 0.02), 1.2
1779
+ ) # Max 20% boost
1780
+
1781
+ bid_value = skill_match * workload * performance_factor * experience_factor
1782
+
1783
+ bids.append({"agent": agent, "bid": bid_value})
1784
+
1785
+ # Select highest bidder
1786
+ if bids:
1787
+ bids.sort(key=lambda x: x["bid"], reverse=True)
1788
+ return bids[0]["agent"]
1789
+
1790
+ return None