kailash 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/api/__init__.py +11 -1
  3. kailash/api/gateway.py +394 -0
  4. kailash/api/mcp_integration.py +478 -0
  5. kailash/api/workflow_api.py +29 -13
  6. kailash/nodes/ai/__init__.py +40 -4
  7. kailash/nodes/ai/a2a.py +1143 -0
  8. kailash/nodes/ai/agents.py +120 -6
  9. kailash/nodes/ai/ai_providers.py +224 -30
  10. kailash/nodes/ai/embedding_generator.py +34 -38
  11. kailash/nodes/ai/intelligent_agent_orchestrator.py +2114 -0
  12. kailash/nodes/ai/llm_agent.py +351 -356
  13. kailash/nodes/ai/self_organizing.py +1624 -0
  14. kailash/nodes/api/http.py +106 -25
  15. kailash/nodes/api/rest.py +116 -21
  16. kailash/nodes/base.py +60 -64
  17. kailash/nodes/code/python.py +61 -42
  18. kailash/nodes/data/__init__.py +10 -10
  19. kailash/nodes/data/readers.py +117 -66
  20. kailash/nodes/data/retrieval.py +1 -1
  21. kailash/nodes/data/sharepoint_graph.py +23 -25
  22. kailash/nodes/data/sql.py +24 -26
  23. kailash/nodes/data/writers.py +41 -44
  24. kailash/nodes/logic/__init__.py +9 -3
  25. kailash/nodes/logic/async_operations.py +60 -21
  26. kailash/nodes/logic/operations.py +43 -22
  27. kailash/nodes/logic/workflow.py +26 -18
  28. kailash/nodes/mcp/client.py +29 -33
  29. kailash/nodes/transform/__init__.py +8 -1
  30. kailash/nodes/transform/formatters.py +1 -1
  31. kailash/nodes/transform/processors.py +119 -4
  32. kailash/tracking/metrics_collector.py +6 -7
  33. kailash/utils/export.py +2 -2
  34. kailash/utils/templates.py +16 -16
  35. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/METADATA +293 -29
  36. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/RECORD +40 -35
  37. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/WHEEL +0 -0
  38. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/entry_points.txt +0 -0
  39. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/licenses/LICENSE +0 -0
  40. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1143 @@
1
+ """Agent-to-Agent (A2A) communication nodes with shared memory pools.
2
+
3
+ This module implements multi-agent communication with selective attention mechanisms,
4
+ enabling efficient collaboration between AI agents while preventing information overload.
5
+ """
6
+
7
+ import json
8
+ import time
9
+ import uuid
10
+ from collections import defaultdict, deque
11
+ from datetime import datetime
12
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple
13
+
14
+ from kailash.nodes.ai.llm_agent import LLMAgentNode
15
+ from kailash.nodes.base import Node, NodeParameter, register_node
16
+
17
+
18
+ @register_node()
19
+ class SharedMemoryPoolNode(Node):
20
+ """
21
+ Central memory pool that multiple agents can read from and write to.
22
+
23
+ This node implements a sophisticated shared memory system with selective attention
24
+ mechanisms, enabling efficient multi-agent collaboration while preventing information
25
+ overload through intelligent filtering and segmentation.
26
+
27
+ Design Philosophy:
28
+ The SharedMemoryPoolNode acts as a cognitive workspace where agents can share
29
+ discoveries, insights, and intermediate results. It implements attention-based
30
+ filtering inspired by human selective attention, allowing agents to focus on
31
+ relevant information without being overwhelmed by the full memory pool.
32
+
33
+ Upstream Dependencies:
34
+ - A2AAgentNode: Primary writer of memories with insights and discoveries
35
+ - A2ACoordinatorNode: Writes coordination messages and task assignments
36
+ - Any custom agent nodes that need to share information
37
+
38
+ Downstream Consumers:
39
+ - A2AAgentNode: Reads relevant memories to enhance context
40
+ - A2ACoordinatorNode: Monitors agent progress through memory queries
41
+ - SolutionEvaluatorNode: Aggregates insights for evaluation
42
+ - Any analysis or visualization nodes needing shared context
43
+
44
+ Configuration:
45
+ This node is typically configured at workflow initialization and doesn't
46
+ require runtime configuration. Memory segmentation and size limits can
47
+ be adjusted through class attributes.
48
+
49
+ Implementation Details:
50
+ - Uses segmented memory pools for different types of information
51
+ - Implements tag-based indexing for fast retrieval
52
+ - Supports importance-weighted attention filtering
53
+ - Maintains agent subscription patterns for targeted delivery
54
+ - Automatically manages memory size through FIFO eviction
55
+
56
+ Error Handling:
57
+ - Returns empty results for invalid queries rather than failing
58
+ - Handles missing segments gracefully
59
+ - Validates importance scores to [0, 1] range
60
+
61
+ Side Effects:
62
+ - Maintains internal memory state across workflow execution
63
+ - Memory persists for the lifetime of the node instance
64
+ - Does not persist to disk or external storage
65
+
66
+ Examples:
67
+ >>> # Create a shared memory pool
68
+ >>> memory_pool = SharedMemoryPoolNode()
69
+ >>>
70
+ >>> # Write memory from an agent
71
+ >>> result = memory_pool.run(
72
+ ... action="write",
73
+ ... agent_id="researcher_001",
74
+ ... content="Found correlation between X and Y",
75
+ ... tags=["research", "correlation", "data"],
76
+ ... importance=0.8,
77
+ ... segment="findings"
78
+ ... )
79
+ >>> assert result["success"] == True
80
+ >>> assert result["memory_id"] is not None
81
+ >>>
82
+ >>> # Read with attention filter
83
+ >>> memories = memory_pool.run(
84
+ ... action="read",
85
+ ... agent_id="analyst_001",
86
+ ... attention_filter={
87
+ ... "tags": ["correlation"],
88
+ ... "importance_threshold": 0.7,
89
+ ... "window_size": 5
90
+ ... }
91
+ ... )
92
+ >>> assert len(memories["memories"]) > 0
93
+ >>>
94
+ >>> # Subscribe to specific segments
95
+ >>> memory_pool.run(
96
+ ... action="subscribe",
97
+ ... agent_id="monitor_001",
98
+ ... segments=["findings", "alerts"]
99
+ ... )
100
+ >>>
101
+ >>> # Semantic query across all memories
102
+ >>> results = memory_pool.run(
103
+ ... action="query",
104
+ ... query="correlation analysis",
105
+ ... top_k=3
106
+ ... )
107
+ """
108
+
109
+ def __init__(self):
110
+ super().__init__()
111
+ self.memory_segments = defaultdict(deque)
112
+ self.agent_subscriptions = defaultdict(set)
113
+ self.attention_indices = defaultdict(lambda: defaultdict(list))
114
+ self.memory_id_counter = 0
115
+ self.max_segment_size = 1000
116
+
117
+ def get_parameters(self) -> Dict[str, NodeParameter]:
118
+ return {
119
+ "action": NodeParameter(
120
+ name="action",
121
+ type=str,
122
+ required=False,
123
+ default="read",
124
+ description="Action to perform: 'write', 'read', 'subscribe', 'query'",
125
+ ),
126
+ "agent_id": NodeParameter(
127
+ name="agent_id",
128
+ type=str,
129
+ required=False,
130
+ default="system",
131
+ description="ID of the agent performing the action",
132
+ ),
133
+ "content": NodeParameter(
134
+ name="content",
135
+ type=Any,
136
+ required=False,
137
+ description="Content to write to memory (for write action)",
138
+ ),
139
+ "tags": NodeParameter(
140
+ name="tags",
141
+ type=list,
142
+ required=False,
143
+ default=[],
144
+ description="Tags to categorize the memory",
145
+ ),
146
+ "importance": NodeParameter(
147
+ name="importance",
148
+ type=float,
149
+ required=False,
150
+ default=0.5,
151
+ description="Importance score (0.0 to 1.0)",
152
+ ),
153
+ "segment": NodeParameter(
154
+ name="segment",
155
+ type=str,
156
+ required=False,
157
+ default="general",
158
+ description="Memory segment to write to",
159
+ ),
160
+ "attention_filter": NodeParameter(
161
+ name="attention_filter",
162
+ type=dict,
163
+ required=False,
164
+ default={},
165
+ description="Filter criteria for reading memories",
166
+ ),
167
+ "context": NodeParameter(
168
+ name="context",
169
+ type=dict,
170
+ required=False,
171
+ default={},
172
+ description="Additional context for the memory",
173
+ ),
174
+ "query": NodeParameter(
175
+ name="query",
176
+ type=str,
177
+ required=False,
178
+ description="Search query for semantic memory search",
179
+ ),
180
+ }
181
+
182
+ def run(self, **kwargs) -> Dict[str, Any]:
183
+ """
184
+ Execute memory pool operations.
185
+
186
+ This method routes requests to appropriate handlers based on the action
187
+ parameter, supporting write, read, subscribe, and query operations.
188
+
189
+ Args:
190
+ **kwargs: Operation parameters including:
191
+ action (str): Operation type ('write', 'read', 'subscribe', 'query')
192
+ Additional parameters specific to each action
193
+
194
+ Returns:
195
+ Dict[str, Any]: Operation results with 'success' status and action-specific data
196
+
197
+ Raises:
198
+ No exceptions raised - errors returned in response dict
199
+
200
+ Side Effects:
201
+ Modifies internal memory state for write operations
202
+ Updates subscription lists for subscribe operations
203
+ """
204
+ action = kwargs.get("action")
205
+
206
+ if action == "write":
207
+ return self._write_memory(kwargs)
208
+ elif action == "read":
209
+ return self._read_with_attention(kwargs)
210
+ elif action == "subscribe":
211
+ return self._subscribe_agent(kwargs)
212
+ elif action == "query":
213
+ return self._semantic_query(kwargs)
214
+ else:
215
+ return {"success": False, "error": f"Unknown action: {action}"}
216
+
217
+ def _write_memory(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
218
+ """Write information to shared pool with metadata."""
219
+ self.memory_id_counter += 1
220
+ memory_item = {
221
+ "id": f"mem_{self.memory_id_counter}",
222
+ "content": kwargs["content"],
223
+ "agent_id": kwargs["agent_id"],
224
+ "timestamp": time.time(),
225
+ "datetime": datetime.now().isoformat(),
226
+ "tags": kwargs.get("tags", []),
227
+ "importance": kwargs.get("importance", 0.5),
228
+ "context": kwargs.get("context", {}),
229
+ "access_count": 0,
230
+ }
231
+
232
+ # Store in appropriate segment
233
+ segment = kwargs.get("segment", "general")
234
+ self.memory_segments[segment].append(memory_item)
235
+
236
+ # Maintain segment size limit
237
+ if len(self.memory_segments[segment]) > self.max_segment_size:
238
+ self.memory_segments[segment].popleft()
239
+
240
+ # Update attention indices
241
+ self._update_attention_indices(memory_item, segment)
242
+
243
+ # Get relevant agents
244
+ relevant_agents = self._get_relevant_agents(memory_item, segment)
245
+
246
+ return {
247
+ "success": True,
248
+ "memory_id": memory_item["id"],
249
+ "segment": segment,
250
+ "notified_agents": list(relevant_agents),
251
+ "timestamp": memory_item["timestamp"],
252
+ }
253
+
254
+ def _read_with_attention(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
255
+ """Read relevant memories based on attention filter."""
256
+ agent_id = kwargs["agent_id"]
257
+ attention_filter = kwargs.get("attention_filter", {})
258
+
259
+ relevant_memories = []
260
+
261
+ # Apply attention mechanism
262
+ for segment, memories in self.memory_segments.items():
263
+ if self._matches_attention_filter(segment, attention_filter):
264
+ for memory in memories:
265
+ relevance_score = self._calculate_relevance(
266
+ memory, attention_filter, agent_id
267
+ )
268
+ if relevance_score > attention_filter.get("threshold", 0.3):
269
+ memory["access_count"] += 1
270
+ relevant_memories.append(
271
+ {
272
+ **memory,
273
+ "relevance_score": relevance_score,
274
+ "segment": segment,
275
+ }
276
+ )
277
+
278
+ # Sort by relevance and recency
279
+ relevant_memories.sort(
280
+ key=lambda x: (x["relevance_score"], x["timestamp"]), reverse=True
281
+ )
282
+
283
+ # Limit to attention window
284
+ window_size = attention_filter.get("window_size", 10)
285
+ selected_memories = relevant_memories[:window_size]
286
+
287
+ return {
288
+ "success": True,
289
+ "memories": selected_memories,
290
+ "total_available": len(relevant_memories),
291
+ "segments_scanned": list(self.memory_segments.keys()),
292
+ "agent_id": agent_id,
293
+ }
294
+
295
+ def _subscribe_agent(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
296
+ """Subscribe an agent to specific memory segments or tags."""
297
+ agent_id = kwargs["agent_id"]
298
+ segments = kwargs.get("segments", ["general"])
299
+ tags = kwargs.get("tags", [])
300
+
301
+ for segment in segments:
302
+ self.agent_subscriptions[segment].add(agent_id)
303
+
304
+ # Store subscription preferences
305
+ if not hasattr(self, "agent_preferences"):
306
+ self.agent_preferences = {}
307
+
308
+ self.agent_preferences[agent_id] = {
309
+ "segments": segments,
310
+ "tags": tags,
311
+ "attention_filter": kwargs.get("attention_filter", {}),
312
+ }
313
+
314
+ return {
315
+ "success": True,
316
+ "agent_id": agent_id,
317
+ "subscribed_segments": segments,
318
+ "subscribed_tags": tags,
319
+ }
320
+
321
+ def _semantic_query(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
322
+ """Perform semantic search across memories."""
323
+ query = kwargs.get("query", "")
324
+ agent_id = kwargs["agent_id"]
325
+
326
+ # Simple keyword matching for now (can be enhanced with embeddings)
327
+ matching_memories = []
328
+ query_lower = query.lower()
329
+
330
+ for segment, memories in self.memory_segments.items():
331
+ for memory in memories:
332
+ content_str = str(memory.get("content", "")).lower()
333
+ if query_lower in content_str:
334
+ score = content_str.count(query_lower) / len(content_str.split())
335
+ matching_memories.append(
336
+ {**memory, "match_score": score, "segment": segment}
337
+ )
338
+
339
+ # Sort by match score
340
+ matching_memories.sort(key=lambda x: x["match_score"], reverse=True)
341
+
342
+ return {
343
+ "success": True,
344
+ "query": query,
345
+ "results": matching_memories[:10],
346
+ "total_matches": len(matching_memories),
347
+ }
348
+
349
+ def _update_attention_indices(self, memory_item: Dict[str, Any], segment: str):
350
+ """Update indices for efficient attention-based retrieval."""
351
+ # Index by tags
352
+ for tag in memory_item.get("tags", []):
353
+ self.attention_indices["tags"][tag].append(memory_item["id"])
354
+
355
+ # Index by agent
356
+ agent_id = memory_item["agent_id"]
357
+ self.attention_indices["agents"][agent_id].append(memory_item["id"])
358
+
359
+ # Index by importance level
360
+ importance = memory_item["importance"]
361
+ if importance >= 0.8:
362
+ self.attention_indices["importance"]["high"].append(memory_item["id"])
363
+ elif importance >= 0.5:
364
+ self.attention_indices["importance"]["medium"].append(memory_item["id"])
365
+ else:
366
+ self.attention_indices["importance"]["low"].append(memory_item["id"])
367
+
368
+ def _matches_attention_filter(
369
+ self, segment: str, attention_filter: Dict[str, Any]
370
+ ) -> bool:
371
+ """Check if a segment matches the attention filter."""
372
+ # Check segment filter
373
+ if "segments" in attention_filter:
374
+ if segment not in attention_filter["segments"]:
375
+ return False
376
+
377
+ return True
378
+
379
+ def _calculate_relevance(
380
+ self, memory: Dict[str, Any], attention_filter: Dict[str, Any], agent_id: str
381
+ ) -> float:
382
+ """Calculate relevance score for a memory item."""
383
+ score = 0.0
384
+ weights = attention_filter.get(
385
+ "weights", {"tags": 0.3, "importance": 0.3, "recency": 0.2, "agent": 0.2}
386
+ )
387
+
388
+ # Tag matching
389
+ if "tags" in attention_filter:
390
+ filter_tags = set(attention_filter["tags"])
391
+ memory_tags = set(memory.get("tags", []))
392
+ if filter_tags & memory_tags:
393
+ score += (
394
+ weights.get("tags", 0.3)
395
+ * len(filter_tags & memory_tags)
396
+ / len(filter_tags)
397
+ )
398
+
399
+ # Importance threshold
400
+ importance_threshold = attention_filter.get("importance_threshold", 0.0)
401
+ if memory.get("importance", 0) >= importance_threshold:
402
+ score += weights.get("importance", 0.3) * memory["importance"]
403
+
404
+ # Recency
405
+ current_time = time.time()
406
+ age_seconds = current_time - memory["timestamp"]
407
+ recency_window = attention_filter.get("recency_window", 3600) # 1 hour default
408
+ if age_seconds < recency_window:
409
+ recency_score = 1.0 - (age_seconds / recency_window)
410
+ score += weights.get("recency", 0.2) * recency_score
411
+
412
+ # Agent affinity
413
+ if "preferred_agents" in attention_filter:
414
+ if memory["agent_id"] in attention_filter["preferred_agents"]:
415
+ score += weights.get("agent", 0.2)
416
+
417
+ return min(score, 1.0)
418
+
419
+ def _get_relevant_agents(
420
+ self, memory_item: Dict[str, Any], segment: str
421
+ ) -> Set[str]:
422
+ """Get agents that should be notified about this memory."""
423
+ relevant_agents = set()
424
+
425
+ # Agents subscribed to this segment
426
+ relevant_agents.update(self.agent_subscriptions.get(segment, set()))
427
+
428
+ # Agents with matching tag subscriptions
429
+ if hasattr(self, "agent_preferences"):
430
+ for agent_id, prefs in self.agent_preferences.items():
431
+ if any(
432
+ tag in memory_item.get("tags", []) for tag in prefs.get("tags", [])
433
+ ):
434
+ relevant_agents.add(agent_id)
435
+
436
+ # Remove the writing agent
437
+ relevant_agents.discard(memory_item["agent_id"])
438
+
439
+ return relevant_agents
440
+
441
+
442
+ @register_node()
443
+ class A2AAgentNode(LLMAgentNode):
444
+ """
445
+ Enhanced LLM agent with agent-to-agent communication capabilities.
446
+
447
+ This node extends the standard LLMAgentNode with sophisticated A2A communication
448
+ features, enabling agents to share insights through a shared memory pool, enhance
449
+ their context with relevant information from other agents, and collaborate
450
+ effectively on complex tasks.
451
+
452
+ Design Philosophy:
453
+ A2AAgentNode represents an intelligent agent that can both contribute to and
454
+ benefit from collective intelligence. It automatically extracts insights from
455
+ its responses and shares them with other agents while selectively attending
456
+ to relevant information from the shared memory pool. This creates an emergent
457
+ collaborative intelligence system.
458
+
459
+ Upstream Dependencies:
460
+ - QueryAnalysisNode: Provides analyzed queries and context
461
+ - TeamFormationNode: Assigns roles and capabilities to agents
462
+ - A2ACoordinatorNode: Delegates tasks and coordinates activities
463
+ - SharedMemoryPoolNode: Provides access to shared memories
464
+
465
+ Downstream Consumers:
466
+ - SharedMemoryPoolNode: Receives insights and discoveries
467
+ - A2ACoordinatorNode: Reports progress and results
468
+ - SolutionEvaluatorNode: Provides solutions for evaluation
469
+ - Other A2AAgentNodes: Indirect consumers through shared memory
470
+
471
+ Configuration:
472
+ Inherits all configuration from LLMAgentNode plus A2A-specific parameters
473
+ for memory pool integration, attention filtering, and collaboration modes.
474
+
475
+ Implementation Details:
476
+ - Automatically extracts insights from LLM responses
477
+ - Enhances prompts with relevant context from shared memory
478
+ - Supports multiple collaboration modes (cooperative, competitive, hierarchical)
479
+ - Tracks conversation context and shares key discoveries
480
+ - Implements attention filtering to prevent information overload
481
+
482
+ Error Handling:
483
+ - Gracefully handles missing memory pool connections
484
+ - Falls back to standard LLM behavior if A2A features fail
485
+ - Validates insight extraction to prevent malformed memories
486
+
487
+ Side Effects:
488
+ - Writes insights to SharedMemoryPoolNode after each interaction
489
+ - Maintains conversation history for context
490
+ - May influence other agents through shared memories
491
+
492
+ Examples:
493
+ >>> # Create an A2A agent with specific expertise
494
+ >>> agent = A2AAgentNode()
495
+ >>>
496
+ >>> # Execute with A2A features
497
+ >>> result = agent.run(
498
+ ... agent_id="researcher_001",
499
+ ... agent_role="research_specialist",
500
+ ... provider="openai",
501
+ ... model="gpt-4",
502
+ ... messages=[{
503
+ ... "role": "user",
504
+ ... "content": "Analyze the impact of AI on productivity"
505
+ ... }],
506
+ ... memory_pool=memory_pool_instance,
507
+ ... attention_filter={
508
+ ... "tags": ["productivity", "AI", "research"],
509
+ ... "importance_threshold": 0.7
510
+ ... },
511
+ ... collaboration_mode="cooperative"
512
+ ... )
513
+ >>> assert result["success"] == True
514
+ >>> assert "insights_generated" in result["a2a_metadata"]
515
+ >>>
516
+ >>> # Agent automatically shares insights
517
+ >>> insights = result["a2a_metadata"]["insights_generated"]
518
+ >>> assert len(insights) > 0
519
+ >>> assert all("content" in i for i in insights)
520
+ """
521
+
522
+ def __init__(self):
523
+ super().__init__()
524
+ self.local_memory = deque(maxlen=100)
525
+ self.communication_log = deque(maxlen=50)
526
+
527
+ def get_parameters(self) -> Dict[str, NodeParameter]:
528
+ # Inherit all LLMAgentNode parameters
529
+ params = super().get_parameters()
530
+
531
+ # Add A2A-specific parameters
532
+ params.update(
533
+ {
534
+ "agent_id": NodeParameter(
535
+ name="agent_id",
536
+ type=str,
537
+ required=False,
538
+ default=f"agent_{uuid.uuid4().hex[:8]}",
539
+ description="Unique identifier for this agent",
540
+ ),
541
+ "agent_role": NodeParameter(
542
+ name="agent_role",
543
+ type=str,
544
+ required=False,
545
+ default="general",
546
+ description="Role of the agent (researcher, analyst, coordinator, etc.)",
547
+ ),
548
+ "memory_pool": NodeParameter(
549
+ name="memory_pool",
550
+ type=Node,
551
+ required=False,
552
+ description="Reference to SharedMemoryPoolNode",
553
+ ),
554
+ "attention_filter": NodeParameter(
555
+ name="attention_filter",
556
+ type=dict,
557
+ required=False,
558
+ default={},
559
+ description="Criteria for filtering relevant information from shared memory",
560
+ ),
561
+ "communication_config": NodeParameter(
562
+ name="communication_config",
563
+ type=dict,
564
+ required=False,
565
+ default={"mode": "direct", "protocol": "json-rpc"},
566
+ description="A2A communication settings",
567
+ ),
568
+ "collaboration_mode": NodeParameter(
569
+ name="collaboration_mode",
570
+ type=str,
571
+ required=False,
572
+ default="cooperative",
573
+ description="How agent collaborates: cooperative, competitive, hierarchical",
574
+ ),
575
+ "peer_agents": NodeParameter(
576
+ name="peer_agents",
577
+ type=list,
578
+ required=False,
579
+ default=[],
580
+ description="List of peer agent IDs for direct communication",
581
+ ),
582
+ }
583
+ )
584
+ return params
585
+
586
+ def run(self, **kwargs) -> Dict[str, Any]:
587
+ """
588
+ Execute the A2A agent with enhanced communication capabilities.
589
+
590
+ This method extends the base LLMAgentNode execution by:
591
+ 1. Reading relevant context from the shared memory pool
592
+ 2. Enhancing the prompt with shared discoveries
593
+ 3. Executing the LLM call with enriched context
594
+ 4. Extracting insights from the response
595
+ 5. Sharing valuable insights back to the memory pool
596
+
597
+ Args:
598
+ **kwargs: All LLMAgentNode parameters plus:
599
+ agent_id (str): Unique identifier for this agent
600
+ agent_role (str): Agent's role in the team
601
+ memory_pool (SharedMemoryPoolNode): Shared memory instance
602
+ attention_filter (dict): Criteria for filtering memories
603
+ collaboration_mode (str): How agent collaborates
604
+
605
+ Returns:
606
+ Dict[str, Any]: LLMAgentNode response plus:
607
+ a2a_metadata: Information about A2A interactions including
608
+ insights_generated, shared_context_used, collaboration_stats
609
+
610
+ Side Effects:
611
+ Writes insights to shared memory pool if available
612
+ Updates internal conversation history
613
+ """
614
+ # Extract A2A specific parameters
615
+ agent_id = kwargs.get("agent_id")
616
+ agent_role = kwargs.get("agent_role", "general")
617
+ memory_pool = kwargs.get("memory_pool")
618
+ attention_filter = kwargs.get("attention_filter", {})
619
+
620
+ # Read from shared memory if available
621
+ shared_context = []
622
+ if memory_pool:
623
+ memory_result = memory_pool.run(
624
+ action="read", agent_id=agent_id, attention_filter=attention_filter
625
+ )
626
+ if memory_result.get("success"):
627
+ shared_context = memory_result.get("memories", [])
628
+
629
+ # Enhance messages with shared context
630
+ messages = kwargs.get("messages", [])
631
+ if shared_context:
632
+ context_summary = self._summarize_shared_context(shared_context)
633
+ enhanced_system_prompt = f"""You are agent {agent_id} with role: {agent_role}.
634
+
635
+ Relevant shared context from other agents:
636
+ {context_summary}
637
+
638
+ {kwargs.get('system_prompt', '')}"""
639
+ kwargs["system_prompt"] = enhanced_system_prompt
640
+
641
+ # Execute LLM agent
642
+ result = super().run(**kwargs)
643
+
644
+ # If successful, write insights to shared memory
645
+ if result.get("success") and memory_pool:
646
+ response_content = result.get("response", {}).get("content", "")
647
+
648
+ # Extract important insights
649
+ insights = self._extract_insights(response_content, agent_role)
650
+
651
+ for insight in insights:
652
+ memory_pool.run(
653
+ action="write",
654
+ agent_id=agent_id,
655
+ content=insight["content"],
656
+ tags=insight.get("tags", [agent_role]),
657
+ importance=insight.get("importance", 0.6),
658
+ segment=insight.get("segment", agent_role),
659
+ context={
660
+ "source_message": messages[-1] if messages else None,
661
+ "agent_role": agent_role,
662
+ },
663
+ )
664
+
665
+ # Add A2A metadata to result
666
+ result["a2a_metadata"] = {
667
+ "agent_id": agent_id,
668
+ "agent_role": agent_role,
669
+ "shared_context_used": len(shared_context),
670
+ "insights_generated": len(insights) if "insights" in locals() else 0,
671
+ }
672
+
673
+ return result
674
+
675
+ def _summarize_shared_context(self, shared_context: List[Dict[str, Any]]) -> str:
676
+ """Summarize shared context for inclusion in prompt."""
677
+ if not shared_context:
678
+ return "No relevant shared context available."
679
+
680
+ summary_parts = []
681
+ for memory in shared_context[:5]: # Limit to top 5 most relevant
682
+ agent_id = memory.get("agent_id", "unknown")
683
+ content = memory.get("content", "")
684
+ importance = memory.get("importance", 0)
685
+ tags = ", ".join(memory.get("tags", []))
686
+
687
+ summary_parts.append(
688
+ f"- Agent {agent_id} ({importance:.1f} importance, tags: {tags}): {content}"
689
+ )
690
+
691
+ return "\n".join(summary_parts)
692
+
693
+ def _extract_insights(self, response: str, agent_role: str) -> List[Dict[str, Any]]:
694
+ """Extract important insights from agent response."""
695
+ insights = []
696
+
697
+ # Simple heuristic-based extraction
698
+ lines = response.split("\n")
699
+ for line in lines:
700
+ line = line.strip()
701
+ if not line:
702
+ continue
703
+
704
+ # High importance indicators
705
+ high_importance_keywords = [
706
+ "critical",
707
+ "important",
708
+ "key finding",
709
+ "conclusion",
710
+ "discovered",
711
+ ]
712
+ importance = 0.5
713
+
714
+ if any(keyword in line.lower() for keyword in high_importance_keywords):
715
+ importance = 0.8
716
+
717
+ # Tag extraction based on role
718
+ tags = [agent_role]
719
+ if "data" in line.lower():
720
+ tags.append("data")
721
+ if "pattern" in line.lower():
722
+ tags.append("pattern")
723
+ if "insight" in line.lower():
724
+ tags.append("insight")
725
+
726
+ # Only save substantive lines
727
+ if len(line) > 20:
728
+ insights.append(
729
+ {
730
+ "content": line,
731
+ "importance": importance,
732
+ "tags": tags,
733
+ "segment": agent_role,
734
+ }
735
+ )
736
+
737
+ return insights[:3] # Limit to top 3 insights per response
738
+
739
+
740
+ @register_node()
741
+ class A2ACoordinatorNode(Node):
742
+ """
743
+ Coordinates communication and task delegation between A2A agents.
744
+
745
+ This node acts as a central orchestrator for multi-agent systems, managing task
746
+ distribution, consensus building, and workflow coordination. It implements various
747
+ coordination strategies to optimize agent utilization and ensure effective
748
+ collaboration across heterogeneous agent teams.
749
+
750
+ Design Philosophy:
751
+ The A2ACoordinatorNode serves as a decentralized coordination mechanism that
752
+ enables agents to self-organize without requiring a fixed hierarchy. It provides
753
+ flexible coordination patterns (delegation, broadcast, consensus, workflow)
754
+ that can be composed to create sophisticated multi-agent behaviors.
755
+
756
+ Upstream Dependencies:
757
+ - ProblemAnalyzerNode: Provides decomposed tasks and requirements
758
+ - TeamFormationNode: Supplies formed teams and agent assignments
759
+ - QueryAnalysisNode: Delivers analyzed queries needing coordination
760
+ - OrchestrationManagerNode: High-level orchestration directives
761
+
762
+ Downstream Consumers:
763
+ - A2AAgentNode: Receives task assignments and coordination messages
764
+ - SharedMemoryPoolNode: Stores coordination decisions and progress
765
+ - SolutionEvaluatorNode: Evaluates coordinated solution components
766
+ - ConvergenceDetectorNode: Monitors coordination effectiveness
767
+
768
+ Configuration:
769
+ The coordinator adapts its behavior based on the coordination strategy
770
+ selected and the characteristics of available agents. No static configuration
771
+ is required, but runtime parameters control coordination behavior.
772
+
773
+ Implementation Details:
774
+ - Maintains registry of active agents with capabilities and status
775
+ - Implements multiple delegation strategies (best_match, round_robin, auction)
776
+ - Tracks task assignments and agent performance metrics
777
+ - Supports both synchronous and asynchronous coordination patterns
778
+ - Manages consensus voting with configurable thresholds
779
+
780
+ Error Handling:
781
+ - Handles agent failures with automatic reassignment
782
+ - Validates task requirements before delegation
783
+ - Falls back to broadcast when specific agents unavailable
784
+ - Returns partial results if consensus cannot be reached
785
+
786
+ Side Effects:
787
+ - Maintains internal agent registry across calls
788
+ - Updates agent performance metrics after task completion
789
+ - May modify task priorities based on agent availability
790
+
791
+ Examples:
792
+ >>> # Create coordinator
793
+ >>> coordinator = A2ACoordinatorNode()
794
+ >>>
795
+ >>> # Register agents
796
+ >>> coordinator.run(
797
+ ... action="register",
798
+ ... agent_info={
799
+ ... "id": "analyst_001",
800
+ ... "skills": ["data_analysis", "statistics"],
801
+ ... "role": "analyst"
802
+ ... }
803
+ ... )
804
+ >>>
805
+ >>> # Delegate task with best match strategy
806
+ >>> result = coordinator.run(
807
+ ... action="delegate",
808
+ ... task={
809
+ ... "type": "analysis",
810
+ ... "description": "Analyze sales data",
811
+ ... "required_skills": ["data_analysis"],
812
+ ... "priority": "high"
813
+ ... },
814
+ ... available_agents=[
815
+ ... {"id": "analyst_001", "skills": ["data_analysis"]},
816
+ ... {"id": "researcher_001", "skills": ["research"]}
817
+ ... ],
818
+ ... coordination_strategy="best_match"
819
+ ... )
820
+ >>> assert result["success"] == True
821
+ >>> assert result["assigned_agent"] == "analyst_001"
822
+ >>>
823
+ >>> # Build consensus among agents
824
+ >>> consensus_result = coordinator.run(
825
+ ... action="consensus",
826
+ ... proposal="Implement new feature X",
827
+ ... voting_agents=["agent1", "agent2", "agent3"],
828
+ ... consensus_threshold=0.66
829
+ ... )
830
+ """
831
+
832
+ def __init__(self):
833
+ super().__init__()
834
+ self.registered_agents = {}
835
+ self.task_queue = deque()
836
+ self.consensus_sessions = {}
837
+
838
+ def get_parameters(self) -> Dict[str, NodeParameter]:
839
+ return {
840
+ "action": NodeParameter(
841
+ name="action",
842
+ type=str,
843
+ required=False,
844
+ default="coordinate",
845
+ description="Action: 'register', 'delegate', 'broadcast', 'consensus', 'coordinate'",
846
+ ),
847
+ "agent_info": NodeParameter(
848
+ name="agent_info",
849
+ type=dict,
850
+ required=False,
851
+ description="Information about agent (for registration)",
852
+ ),
853
+ "task": NodeParameter(
854
+ name="task",
855
+ type=dict,
856
+ required=False,
857
+ description="Task to delegate or coordinate",
858
+ ),
859
+ "message": NodeParameter(
860
+ name="message",
861
+ type=dict,
862
+ required=False,
863
+ description="Message to broadcast",
864
+ ),
865
+ "consensus_proposal": NodeParameter(
866
+ name="consensus_proposal",
867
+ type=dict,
868
+ required=False,
869
+ description="Proposal for consensus",
870
+ ),
871
+ "available_agents": NodeParameter(
872
+ name="available_agents",
873
+ type=list,
874
+ required=False,
875
+ default=[],
876
+ description="List of available agents",
877
+ ),
878
+ "coordination_strategy": NodeParameter(
879
+ name="coordination_strategy",
880
+ type=str,
881
+ required=False,
882
+ default="best_match",
883
+ description="Strategy: 'best_match', 'round_robin', 'broadcast', 'auction'",
884
+ ),
885
+ }
886
+
887
+ def run(self, **kwargs) -> Dict[str, Any]:
888
+ """Execute coordination action."""
889
+ action = kwargs.get("action")
890
+
891
+ if action == "register":
892
+ return self._register_agent(kwargs)
893
+ elif action == "delegate":
894
+ return self._delegate_task(kwargs)
895
+ elif action == "broadcast":
896
+ return self._broadcast_message(kwargs)
897
+ elif action == "consensus":
898
+ return self._manage_consensus(kwargs)
899
+ elif action == "coordinate":
900
+ return self._coordinate_workflow(kwargs)
901
+ else:
902
+ return {"success": False, "error": f"Unknown action: {action}"}
903
+
904
+ def _register_agent(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
905
+ """Register an agent with the coordinator."""
906
+ agent_info = kwargs.get("agent_info", {})
907
+ agent_id = agent_info.get("id")
908
+
909
+ if not agent_id:
910
+ return {"success": False, "error": "Agent ID required"}
911
+
912
+ self.registered_agents[agent_id] = {
913
+ "id": agent_id,
914
+ "skills": agent_info.get("skills", []),
915
+ "role": agent_info.get("role", "general"),
916
+ "status": "available",
917
+ "registered_at": time.time(),
918
+ "task_count": 0,
919
+ "success_rate": 1.0,
920
+ }
921
+
922
+ return {
923
+ "success": True,
924
+ "agent_id": agent_id,
925
+ "registered_agents": list(self.registered_agents.keys()),
926
+ }
927
+
928
+ def _delegate_task(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
929
+ """Delegate task to most suitable agent."""
930
+ task = kwargs.get("task", {})
931
+ available_agents = kwargs.get("available_agents", [])
932
+ strategy = kwargs.get("coordination_strategy", "best_match")
933
+
934
+ if not available_agents:
935
+ available_agents = [
936
+ agent
937
+ for agent in self.registered_agents.values()
938
+ if agent["status"] == "available"
939
+ ]
940
+
941
+ if not available_agents:
942
+ return {"success": False, "error": "No available agents"}
943
+
944
+ # Select agent based on strategy
945
+ if strategy == "best_match":
946
+ selected_agent = self._find_best_match(task, available_agents)
947
+ elif strategy == "round_robin":
948
+ selected_agent = available_agents[0] # Simple round-robin
949
+ elif strategy == "auction":
950
+ selected_agent = self._run_auction(task, available_agents)
951
+ else:
952
+ selected_agent = available_agents[0]
953
+
954
+ if not selected_agent:
955
+ return {"success": False, "error": "No suitable agent found"}
956
+
957
+ # Update agent status
958
+ agent_id = selected_agent.get("id")
959
+ if agent_id in self.registered_agents:
960
+ self.registered_agents[agent_id]["status"] = "busy"
961
+ self.registered_agents[agent_id]["task_count"] += 1
962
+
963
+ return {
964
+ "success": True,
965
+ "delegated_to": agent_id,
966
+ "task": task,
967
+ "strategy": strategy,
968
+ }
969
+
970
+ def _broadcast_message(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
971
+ """Broadcast message to relevant agents."""
972
+ message = kwargs.get("message", {})
973
+ target_roles = message.get("target_roles", [])
974
+ target_skills = message.get("target_skills", [])
975
+
976
+ recipients = []
977
+ for agent in self.registered_agents.values():
978
+ # Check role match
979
+ if target_roles and agent["role"] not in target_roles:
980
+ continue
981
+
982
+ # Check skills match
983
+ if target_skills:
984
+ if not any(skill in agent["skills"] for skill in target_skills):
985
+ continue
986
+
987
+ recipients.append(agent["id"])
988
+
989
+ return {
990
+ "success": True,
991
+ "recipients": recipients,
992
+ "message": message,
993
+ "broadcast_time": time.time(),
994
+ }
995
+
996
+ def _manage_consensus(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
997
+ """Manage consensus building among agents."""
998
+ proposal = kwargs.get("consensus_proposal", {})
999
+ session_id = proposal.get("session_id", str(uuid.uuid4()))
1000
+
1001
+ if session_id not in self.consensus_sessions:
1002
+ self.consensus_sessions[session_id] = {
1003
+ "proposal": proposal,
1004
+ "votes": {},
1005
+ "started_at": time.time(),
1006
+ "status": "open",
1007
+ }
1008
+
1009
+ session = self.consensus_sessions[session_id]
1010
+
1011
+ # Handle vote
1012
+ if "vote" in kwargs:
1013
+ agent_id = kwargs.get("agent_id")
1014
+ vote = kwargs.get("vote")
1015
+ session["votes"][agent_id] = vote
1016
+
1017
+ # Check if consensus reached
1018
+ total_agents = len(self.registered_agents)
1019
+ votes_cast = len(session["votes"])
1020
+
1021
+ if votes_cast >= total_agents * 0.5: # Simple majority
1022
+ yes_votes = sum(1 for v in session["votes"].values() if v)
1023
+ consensus_reached = yes_votes > votes_cast / 2
1024
+
1025
+ session["status"] = "completed"
1026
+ session["result"] = "approved" if consensus_reached else "rejected"
1027
+
1028
+ return {
1029
+ "success": True,
1030
+ "session_id": session_id,
1031
+ "consensus_reached": consensus_reached,
1032
+ "result": session["result"],
1033
+ "votes": session["votes"],
1034
+ }
1035
+
1036
+ return {
1037
+ "success": True,
1038
+ "session_id": session_id,
1039
+ "status": session["status"],
1040
+ "votes_cast": votes_cast,
1041
+ "votes_needed": int(total_agents * 0.5),
1042
+ }
1043
+
1044
+ def _coordinate_workflow(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
1045
+ """Coordinate a multi-agent workflow."""
1046
+ workflow_spec = kwargs.get("task", {})
1047
+ steps = workflow_spec.get("steps", [])
1048
+
1049
+ coordination_plan = []
1050
+ for step in steps:
1051
+ required_skills = step.get("required_skills", [])
1052
+ available_agents = [
1053
+ agent
1054
+ for agent in self.registered_agents.values()
1055
+ if any(skill in agent["skills"] for skill in required_skills)
1056
+ ]
1057
+
1058
+ if available_agents:
1059
+ selected_agent = self._find_best_match(step, available_agents)
1060
+ coordination_plan.append(
1061
+ {
1062
+ "step": step["name"],
1063
+ "assigned_to": selected_agent["id"],
1064
+ "skills_matched": [
1065
+ s for s in required_skills if s in selected_agent["skills"]
1066
+ ],
1067
+ }
1068
+ )
1069
+ else:
1070
+ coordination_plan.append(
1071
+ {
1072
+ "step": step["name"],
1073
+ "assigned_to": None,
1074
+ "error": "No agent with required skills",
1075
+ }
1076
+ )
1077
+
1078
+ return {
1079
+ "success": True,
1080
+ "workflow": workflow_spec.get("name", "unnamed"),
1081
+ "coordination_plan": coordination_plan,
1082
+ "total_steps": len(steps),
1083
+ "assigned_steps": sum(1 for p in coordination_plan if p.get("assigned_to")),
1084
+ }
1085
+
1086
+ def _find_best_match(
1087
+ self, task: Dict[str, Any], agents: List[Dict[str, Any]]
1088
+ ) -> Optional[Dict[str, Any]]:
1089
+ """Find best matching agent for task."""
1090
+ required_skills = task.get("required_skills", [])
1091
+ if not required_skills:
1092
+ return agents[0] if agents else None
1093
+
1094
+ best_agent = None
1095
+ best_score = 0
1096
+
1097
+ for agent in agents:
1098
+ agent_skills = set(agent.get("skills", []))
1099
+ required_set = set(required_skills)
1100
+
1101
+ # Calculate match score
1102
+ matches = agent_skills & required_set
1103
+ score = len(matches) / len(required_set) if required_set else 0
1104
+
1105
+ # Consider success rate
1106
+ success_rate = agent.get("success_rate", 1.0)
1107
+ score *= success_rate
1108
+
1109
+ if score > best_score:
1110
+ best_score = score
1111
+ best_agent = agent
1112
+
1113
+ return best_agent
1114
+
1115
+ def _run_auction(
1116
+ self, task: Dict[str, Any], agents: List[Dict[str, Any]]
1117
+ ) -> Optional[Dict[str, Any]]:
1118
+ """Run auction-based task assignment."""
1119
+ # Simplified auction - agents bid based on their capability
1120
+ bids = []
1121
+
1122
+ for agent in agents:
1123
+ # Calculate bid based on skill match and availability
1124
+ required_skills = set(task.get("required_skills", []))
1125
+ agent_skills = set(agent.get("skills", []))
1126
+
1127
+ skill_match = (
1128
+ len(required_skills & agent_skills) / len(required_skills)
1129
+ if required_skills
1130
+ else 1.0
1131
+ )
1132
+ workload = 1.0 - (agent.get("task_count", 0) / 10.0) # Lower bid if busy
1133
+
1134
+ bid_value = skill_match * workload * agent.get("success_rate", 1.0)
1135
+
1136
+ bids.append({"agent": agent, "bid": bid_value})
1137
+
1138
+ # Select highest bidder
1139
+ if bids:
1140
+ bids.sort(key=lambda x: x["bid"], reverse=True)
1141
+ return bids[0]["agent"]
1142
+
1143
+ return None