kailash 0.9.19__py3-none-any.whl → 0.9.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/mcp_server/subscriptions.py +20 -20
- kailash/nodes/__init__.py +1 -0
- kailash/nodes/ai/ai_providers.py +237 -25
- kailash/nodes/ai/llm_agent.py +73 -15
- kailash/nodes/code/python.py +178 -15
- kailash/nodes/data/async_sql.py +76 -10
- kailash/workflow/builder.py +1 -1
- kailash/workflow/templates.py +2 -1
- {kailash-0.9.19.dist-info → kailash-0.9.21.dist-info}/METADATA +27 -19
- {kailash-0.9.19.dist-info → kailash-0.9.21.dist-info}/RECORD +16 -17
- kailash/nodes/ai/a2a_backup.py +0 -1807
- {kailash-0.9.19.dist-info → kailash-0.9.21.dist-info}/WHEEL +0 -0
- {kailash-0.9.19.dist-info → kailash-0.9.21.dist-info}/entry_points.txt +0 -0
- {kailash-0.9.19.dist-info → kailash-0.9.21.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.9.19.dist-info → kailash-0.9.21.dist-info}/licenses/NOTICE +0 -0
- {kailash-0.9.19.dist-info → kailash-0.9.21.dist-info}/top_level.txt +0 -0
kailash/nodes/ai/a2a_backup.py
DELETED
@@ -1,1807 +0,0 @@
|
|
1
|
-
"""Agent-to-Agent (A2A) communication nodes with shared memory pools.
|
2
|
-
|
3
|
-
This module implements multi-agent communication with selective attention mechanisms,
|
4
|
-
enabling efficient collaboration between AI agents while preventing information overload.
|
5
|
-
|
6
|
-
Design Philosophy:
|
7
|
-
The A2A system enables decentralized multi-agent collaboration through shared
|
8
|
-
memory pools and attention mechanisms. Agents can share insights, coordinate
|
9
|
-
tasks, and build collective intelligence without centralized control.
|
10
|
-
"""
|
11
|
-
|
12
|
-
import json
|
13
|
-
import time
|
14
|
-
import uuid
|
15
|
-
from collections import defaultdict, deque
|
16
|
-
from datetime import datetime
|
17
|
-
from typing import Any, Dict, List, Optional, Set
|
18
|
-
|
19
|
-
from kailash.nodes.ai.llm_agent import LLMAgentNode
|
20
|
-
from kailash.nodes.base import Node, NodeParameter, register_node
|
21
|
-
from kailash.nodes.base_cycle_aware import CycleAwareNode
|
22
|
-
|
23
|
-
|
24
|
-
@register_node()
|
25
|
-
class SharedMemoryPoolNode(Node):
|
26
|
-
"""
|
27
|
-
Central memory pool that multiple agents can read from and write to.
|
28
|
-
|
29
|
-
This node implements a sophisticated shared memory system with selective attention
|
30
|
-
mechanisms, enabling efficient multi-agent collaboration while preventing information
|
31
|
-
overload through intelligent filtering and segmentation.
|
32
|
-
|
33
|
-
Design Philosophy:
|
34
|
-
The SharedMemoryPoolNode acts as a cognitive workspace where agents can share
|
35
|
-
discoveries, insights, and intermediate results. It implements attention-based
|
36
|
-
filtering inspired by human selective attention, allowing agents to focus on
|
37
|
-
relevant information without being overwhelmed by the full memory pool.
|
38
|
-
|
39
|
-
Upstream Dependencies:
|
40
|
-
- A2AAgentNode: Primary writer of memories with insights and discoveries
|
41
|
-
- A2ACoordinatorNode: Writes coordination messages and task assignments
|
42
|
-
- Any custom agent nodes that need to share information
|
43
|
-
|
44
|
-
Downstream Consumers:
|
45
|
-
- A2AAgentNode: Reads relevant memories to enhance context
|
46
|
-
- A2ACoordinatorNode: Monitors agent progress through memory queries
|
47
|
-
- SolutionEvaluatorNode: Aggregates insights for evaluation
|
48
|
-
- Any analysis or visualization nodes needing shared context
|
49
|
-
|
50
|
-
Configuration:
|
51
|
-
This node is typically configured at workflow initialization and doesn't
|
52
|
-
require runtime configuration. Memory segmentation and size limits can
|
53
|
-
be adjusted through class attributes.
|
54
|
-
|
55
|
-
Implementation Details:
|
56
|
-
- Uses segmented memory pools for different types of information
|
57
|
-
- Implements tag-based indexing for fast retrieval
|
58
|
-
- Supports importance-weighted attention filtering
|
59
|
-
- Maintains agent subscription patterns for targeted delivery
|
60
|
-
- Automatically manages memory size through FIFO eviction
|
61
|
-
|
62
|
-
Error Handling:
|
63
|
-
- Returns empty results for invalid queries rather than failing
|
64
|
-
- Handles missing segments gracefully
|
65
|
-
- Validates importance scores to [0, 1] range
|
66
|
-
|
67
|
-
Side Effects:
|
68
|
-
- Maintains internal memory state across workflow execution
|
69
|
-
- Memory persists for the lifetime of the node instance
|
70
|
-
- Does not persist to disk or external storage
|
71
|
-
|
72
|
-
Examples:
|
73
|
-
>>> # Create a shared memory pool
|
74
|
-
>>> memory_pool = SharedMemoryPoolNode()
|
75
|
-
>>>
|
76
|
-
>>> # Write memory from an agent
|
77
|
-
>>> result = memory_pool.execute(
|
78
|
-
... action="write",
|
79
|
-
... agent_id="researcher_001",
|
80
|
-
... content="Found correlation between X and Y",
|
81
|
-
... tags=["research", "correlation", "data"],
|
82
|
-
... importance=0.8,
|
83
|
-
... segment="findings"
|
84
|
-
... )
|
85
|
-
>>> assert result["success"] == True
|
86
|
-
>>> assert result["memory_id"] is not None
|
87
|
-
>>>
|
88
|
-
>>> # Read with attention filter
|
89
|
-
>>> memories = memory_pool.execute(
|
90
|
-
... action="read",
|
91
|
-
... agent_id="analyst_001",
|
92
|
-
... attention_filter={
|
93
|
-
... "tags": ["correlation"],
|
94
|
-
... "importance_threshold": 0.7,
|
95
|
-
... "window_size": 5
|
96
|
-
... }
|
97
|
-
... )
|
98
|
-
>>> assert len(memories["memories"]) > 0
|
99
|
-
>>>
|
100
|
-
>>> # Subscribe to specific segments
|
101
|
-
>>> memory_pool.execute(
|
102
|
-
... action="subscribe",
|
103
|
-
... agent_id="monitor_001",
|
104
|
-
... segments=["findings", "alerts"]
|
105
|
-
... )
|
106
|
-
>>>
|
107
|
-
>>> # Semantic query across all memories
|
108
|
-
>>> results = memory_pool.execute(
|
109
|
-
... action="query",
|
110
|
-
... query="correlation analysis",
|
111
|
-
... top_k=3
|
112
|
-
... )
|
113
|
-
"""
|
114
|
-
|
115
|
-
def __init__(self, name=None, **kwargs):
|
116
|
-
# Accept name parameter and pass all kwargs to parent
|
117
|
-
if name:
|
118
|
-
kwargs["name"] = name
|
119
|
-
super().__init__(**kwargs)
|
120
|
-
self.memory_segments = defaultdict(deque)
|
121
|
-
self.agent_subscriptions = defaultdict(set)
|
122
|
-
self.attention_indices = defaultdict(lambda: defaultdict(list))
|
123
|
-
self.memory_id_counter = 0
|
124
|
-
self.max_segment_size = 1000
|
125
|
-
|
126
|
-
def get_parameters(self) -> Dict[str, NodeParameter]:
|
127
|
-
return {
|
128
|
-
"action": NodeParameter(
|
129
|
-
name="action",
|
130
|
-
type=str,
|
131
|
-
required=False,
|
132
|
-
default="read",
|
133
|
-
description="Action to perform: 'write', 'read', 'subscribe', 'query'",
|
134
|
-
),
|
135
|
-
"agent_id": NodeParameter(
|
136
|
-
name="agent_id",
|
137
|
-
type=str,
|
138
|
-
required=False,
|
139
|
-
default="system",
|
140
|
-
description="ID of the agent performing the action",
|
141
|
-
),
|
142
|
-
"content": NodeParameter(
|
143
|
-
name="content",
|
144
|
-
type=Any,
|
145
|
-
required=False,
|
146
|
-
description="Content to write to memory (for write action)",
|
147
|
-
),
|
148
|
-
"tags": NodeParameter(
|
149
|
-
name="tags",
|
150
|
-
type=list,
|
151
|
-
required=False,
|
152
|
-
default=[],
|
153
|
-
description="Tags to categorize the memory",
|
154
|
-
),
|
155
|
-
"importance": NodeParameter(
|
156
|
-
name="importance",
|
157
|
-
type=float,
|
158
|
-
required=False,
|
159
|
-
default=0.5,
|
160
|
-
description="Importance score (0.0 to 1.0)",
|
161
|
-
),
|
162
|
-
"segment": NodeParameter(
|
163
|
-
name="segment",
|
164
|
-
type=str,
|
165
|
-
required=False,
|
166
|
-
default="general",
|
167
|
-
description="Memory segment to write to",
|
168
|
-
),
|
169
|
-
"attention_filter": NodeParameter(
|
170
|
-
name="attention_filter",
|
171
|
-
type=dict,
|
172
|
-
required=False,
|
173
|
-
default={},
|
174
|
-
description="Filter criteria for reading memories",
|
175
|
-
),
|
176
|
-
"context": NodeParameter(
|
177
|
-
name="context",
|
178
|
-
type=dict,
|
179
|
-
required=False,
|
180
|
-
default={},
|
181
|
-
description="Additional context for the memory",
|
182
|
-
),
|
183
|
-
"query": NodeParameter(
|
184
|
-
name="query",
|
185
|
-
type=str,
|
186
|
-
required=False,
|
187
|
-
description="Search query for semantic memory search",
|
188
|
-
),
|
189
|
-
"segments": NodeParameter(
|
190
|
-
name="segments",
|
191
|
-
type=list,
|
192
|
-
required=False,
|
193
|
-
default=["general"],
|
194
|
-
description="Memory segments to subscribe to",
|
195
|
-
),
|
196
|
-
}
|
197
|
-
|
198
|
-
def run(self, **kwargs) -> Dict[str, Any]:
|
199
|
-
"""
|
200
|
-
Execute memory pool operations.
|
201
|
-
|
202
|
-
This method routes requests to appropriate handlers based on the action
|
203
|
-
parameter, supporting write, read, subscribe, and query operations.
|
204
|
-
|
205
|
-
Args:
|
206
|
-
**kwargs: Operation parameters including:
|
207
|
-
action (str): Operation type ('write', 'read', 'subscribe', 'query')
|
208
|
-
Additional parameters specific to each action
|
209
|
-
|
210
|
-
Returns:
|
211
|
-
Dict[str, Any]: Operation results with 'success' status and action-specific data
|
212
|
-
|
213
|
-
Raises:
|
214
|
-
No exceptions raised - errors returned in response dict
|
215
|
-
|
216
|
-
Side Effects:
|
217
|
-
Modifies internal memory state for write operations
|
218
|
-
Updates subscription lists for subscribe operations
|
219
|
-
"""
|
220
|
-
action = kwargs.get("action")
|
221
|
-
|
222
|
-
if action == "write":
|
223
|
-
return self._write_memory(kwargs)
|
224
|
-
elif action == "read":
|
225
|
-
return self._read_with_attention(kwargs)
|
226
|
-
elif action == "subscribe":
|
227
|
-
return self._subscribe_agent(kwargs)
|
228
|
-
elif action == "query":
|
229
|
-
return self._semantic_query(kwargs)
|
230
|
-
elif action == "metrics":
|
231
|
-
return self._get_metrics()
|
232
|
-
else:
|
233
|
-
return {"success": False, "error": f"Unknown action: {action}"}
|
234
|
-
|
235
|
-
def _write_memory(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
236
|
-
"""Write information to shared pool with metadata."""
|
237
|
-
self.memory_id_counter += 1
|
238
|
-
memory_item = {
|
239
|
-
"id": f"mem_{self.memory_id_counter}",
|
240
|
-
"content": kwargs["content"],
|
241
|
-
"agent_id": kwargs["agent_id"],
|
242
|
-
"timestamp": time.time(),
|
243
|
-
"datetime": datetime.now().isoformat(),
|
244
|
-
"tags": kwargs.get("tags", []),
|
245
|
-
"importance": kwargs.get("importance", 0.5),
|
246
|
-
"context": kwargs.get("context", {}),
|
247
|
-
"access_count": 0,
|
248
|
-
}
|
249
|
-
|
250
|
-
# Store in appropriate segment
|
251
|
-
segment = kwargs.get("segment", "general")
|
252
|
-
self.memory_segments[segment].append(memory_item)
|
253
|
-
|
254
|
-
# Maintain segment size limit
|
255
|
-
if len(self.memory_segments[segment]) > self.max_segment_size:
|
256
|
-
self.memory_segments[segment].popleft()
|
257
|
-
|
258
|
-
# Update attention indices
|
259
|
-
self._update_attention_indices(memory_item, segment)
|
260
|
-
|
261
|
-
# Get relevant agents
|
262
|
-
relevant_agents = self._get_relevant_agents(memory_item, segment)
|
263
|
-
|
264
|
-
return {
|
265
|
-
"success": True,
|
266
|
-
"memory_id": memory_item["id"],
|
267
|
-
"segment": segment,
|
268
|
-
"notified_agents": list(relevant_agents),
|
269
|
-
"timestamp": memory_item["timestamp"],
|
270
|
-
}
|
271
|
-
|
272
|
-
def _read_with_attention(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
273
|
-
"""Read relevant memories based on attention filter."""
|
274
|
-
agent_id = kwargs["agent_id"]
|
275
|
-
attention_filter = kwargs.get("attention_filter", {})
|
276
|
-
|
277
|
-
relevant_memories = []
|
278
|
-
|
279
|
-
# Apply attention mechanism
|
280
|
-
for segment, memories in self.memory_segments.items():
|
281
|
-
if self._matches_attention_filter(segment, attention_filter):
|
282
|
-
for memory in memories:
|
283
|
-
relevance_score = self._calculate_relevance(
|
284
|
-
memory, attention_filter, agent_id
|
285
|
-
)
|
286
|
-
if relevance_score > attention_filter.get("threshold", 0.3):
|
287
|
-
memory["access_count"] += 1
|
288
|
-
relevant_memories.append(
|
289
|
-
{
|
290
|
-
**memory,
|
291
|
-
"relevance_score": relevance_score,
|
292
|
-
"segment": segment,
|
293
|
-
}
|
294
|
-
)
|
295
|
-
|
296
|
-
# Sort by relevance and recency
|
297
|
-
relevant_memories.sort(
|
298
|
-
key=lambda x: (x["relevance_score"], x["timestamp"]), reverse=True
|
299
|
-
)
|
300
|
-
|
301
|
-
# Limit to attention window
|
302
|
-
window_size = attention_filter.get("window_size", 10)
|
303
|
-
selected_memories = relevant_memories[:window_size]
|
304
|
-
|
305
|
-
return {
|
306
|
-
"success": True,
|
307
|
-
"memories": selected_memories,
|
308
|
-
"total_available": len(relevant_memories),
|
309
|
-
"segments_scanned": list(self.memory_segments.keys()),
|
310
|
-
"agent_id": agent_id,
|
311
|
-
}
|
312
|
-
|
313
|
-
def _subscribe_agent(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
314
|
-
"""Subscribe an agent to specific memory segments or tags."""
|
315
|
-
agent_id = kwargs["agent_id"]
|
316
|
-
segments = kwargs.get("segments", ["general"])
|
317
|
-
tags = kwargs.get("tags", [])
|
318
|
-
|
319
|
-
for segment in segments:
|
320
|
-
self.agent_subscriptions[segment].add(agent_id)
|
321
|
-
|
322
|
-
# Store subscription preferences
|
323
|
-
if not hasattr(self, "agent_preferences"):
|
324
|
-
self.agent_preferences = {}
|
325
|
-
|
326
|
-
self.agent_preferences[agent_id] = {
|
327
|
-
"segments": segments,
|
328
|
-
"tags": tags,
|
329
|
-
"attention_filter": kwargs.get("attention_filter", {}),
|
330
|
-
}
|
331
|
-
|
332
|
-
return {
|
333
|
-
"success": True,
|
334
|
-
"agent_id": agent_id,
|
335
|
-
"subscribed_segments": segments,
|
336
|
-
"subscribed_tags": tags,
|
337
|
-
}
|
338
|
-
|
339
|
-
def _semantic_query(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
340
|
-
"""Perform semantic search across memories."""
|
341
|
-
query = kwargs.get("query", "")
|
342
|
-
kwargs["agent_id"]
|
343
|
-
|
344
|
-
# Simple keyword matching for now (can be enhanced with embeddings)
|
345
|
-
matching_memories = []
|
346
|
-
query_lower = query.lower()
|
347
|
-
|
348
|
-
for segment, memories in self.memory_segments.items():
|
349
|
-
for memory in memories:
|
350
|
-
content_str = str(memory.get("content", "")).lower()
|
351
|
-
if query_lower in content_str:
|
352
|
-
score = content_str.count(query_lower) / len(content_str.split())
|
353
|
-
matching_memories.append(
|
354
|
-
{**memory, "match_score": score, "segment": segment}
|
355
|
-
)
|
356
|
-
|
357
|
-
# Sort by match score
|
358
|
-
matching_memories.sort(key=lambda x: x["match_score"], reverse=True)
|
359
|
-
|
360
|
-
return {
|
361
|
-
"success": True,
|
362
|
-
"query": query,
|
363
|
-
"results": matching_memories[:10],
|
364
|
-
"total_matches": len(matching_memories),
|
365
|
-
}
|
366
|
-
|
367
|
-
def _update_attention_indices(self, memory_item: Dict[str, Any], segment: str):
|
368
|
-
"""Update indices for efficient attention-based retrieval."""
|
369
|
-
# Index by tags
|
370
|
-
for tag in memory_item.get("tags", []):
|
371
|
-
self.attention_indices["tags"][tag].append(memory_item["id"])
|
372
|
-
|
373
|
-
# Index by agent
|
374
|
-
agent_id = memory_item["agent_id"]
|
375
|
-
self.attention_indices["agents"][agent_id].append(memory_item["id"])
|
376
|
-
|
377
|
-
# Index by importance level
|
378
|
-
importance = memory_item["importance"]
|
379
|
-
if importance >= 0.8:
|
380
|
-
self.attention_indices["importance"]["high"].append(memory_item["id"])
|
381
|
-
elif importance >= 0.5:
|
382
|
-
self.attention_indices["importance"]["medium"].append(memory_item["id"])
|
383
|
-
else:
|
384
|
-
self.attention_indices["importance"]["low"].append(memory_item["id"])
|
385
|
-
|
386
|
-
def _matches_attention_filter(
|
387
|
-
self, segment: str, attention_filter: Dict[str, Any]
|
388
|
-
) -> bool:
|
389
|
-
"""Check if a segment matches the attention filter."""
|
390
|
-
# Check segment filter
|
391
|
-
if "segments" in attention_filter:
|
392
|
-
if segment not in attention_filter["segments"]:
|
393
|
-
return False
|
394
|
-
|
395
|
-
return True
|
396
|
-
|
397
|
-
def _calculate_relevance(
|
398
|
-
self, memory: Dict[str, Any], attention_filter: Dict[str, Any], agent_id: str
|
399
|
-
) -> float:
|
400
|
-
"""Calculate relevance score for a memory item."""
|
401
|
-
score = 0.0
|
402
|
-
weights = attention_filter.get(
|
403
|
-
"weights", {"tags": 0.3, "importance": 0.3, "recency": 0.2, "agent": 0.2}
|
404
|
-
)
|
405
|
-
|
406
|
-
# Tag matching
|
407
|
-
if "tags" in attention_filter:
|
408
|
-
filter_tags = set(attention_filter["tags"])
|
409
|
-
memory_tags = set(memory.get("tags", []))
|
410
|
-
if filter_tags & memory_tags:
|
411
|
-
score += (
|
412
|
-
weights.get("tags", 0.3)
|
413
|
-
* len(filter_tags & memory_tags)
|
414
|
-
/ len(filter_tags)
|
415
|
-
)
|
416
|
-
|
417
|
-
# Importance threshold
|
418
|
-
importance_threshold = attention_filter.get("importance_threshold", 0.0)
|
419
|
-
if memory.get("importance", 0) >= importance_threshold:
|
420
|
-
score += weights.get("importance", 0.3) * memory["importance"]
|
421
|
-
|
422
|
-
# Recency
|
423
|
-
current_time = time.time()
|
424
|
-
age_seconds = current_time - memory["timestamp"]
|
425
|
-
recency_window = attention_filter.get("recency_window", 3600) # 1 hour default
|
426
|
-
if age_seconds < recency_window:
|
427
|
-
recency_score = 1.0 - (age_seconds / recency_window)
|
428
|
-
score += weights.get("recency", 0.2) * recency_score
|
429
|
-
|
430
|
-
# Agent affinity
|
431
|
-
if "preferred_agents" in attention_filter:
|
432
|
-
if memory["agent_id"] in attention_filter["preferred_agents"]:
|
433
|
-
score += weights.get("agent", 0.2)
|
434
|
-
|
435
|
-
return min(score, 1.0)
|
436
|
-
|
437
|
-
def _get_relevant_agents(
|
438
|
-
self, memory_item: Dict[str, Any], segment: str
|
439
|
-
) -> Set[str]:
|
440
|
-
"""Get agents that should be notified about this memory."""
|
441
|
-
relevant_agents = set()
|
442
|
-
|
443
|
-
# Agents subscribed to this segment
|
444
|
-
relevant_agents.update(self.agent_subscriptions.get(segment, set()))
|
445
|
-
|
446
|
-
# Agents with matching tag subscriptions
|
447
|
-
if hasattr(self, "agent_preferences"):
|
448
|
-
for agent_id, prefs in self.agent_preferences.items():
|
449
|
-
if any(
|
450
|
-
tag in memory_item.get("tags", []) for tag in prefs.get("tags", [])
|
451
|
-
):
|
452
|
-
relevant_agents.add(agent_id)
|
453
|
-
|
454
|
-
# Remove the writing agent
|
455
|
-
relevant_agents.discard(memory_item["agent_id"])
|
456
|
-
|
457
|
-
return relevant_agents
|
458
|
-
|
459
|
-
def _get_metrics(self) -> Dict[str, Any]:
|
460
|
-
"""Get memory pool metrics."""
|
461
|
-
total_memories = sum(
|
462
|
-
len(memories) for memories in self.memory_segments.values()
|
463
|
-
)
|
464
|
-
|
465
|
-
return {
|
466
|
-
"success": True,
|
467
|
-
"total_memories": total_memories,
|
468
|
-
"segments": list(self.memory_segments.keys()),
|
469
|
-
"segment_sizes": {
|
470
|
-
segment: len(memories)
|
471
|
-
for segment, memories in self.memory_segments.items()
|
472
|
-
},
|
473
|
-
"total_agents": len(self.agent_subscriptions),
|
474
|
-
"memory_id_counter": self.memory_id_counter,
|
475
|
-
}
|
476
|
-
|
477
|
-
|
478
|
-
@register_node()
|
479
|
-
class A2AAgentNode(LLMAgentNode):
|
480
|
-
"""
|
481
|
-
Enhanced LLM agent with agent-to-agent communication capabilities.
|
482
|
-
|
483
|
-
This node extends the standard LLMAgentNode with sophisticated A2A communication
|
484
|
-
features, enabling agents to share insights through a shared memory pool, enhance
|
485
|
-
their context with relevant information from other agents, and collaborate
|
486
|
-
effectively on complex tasks.
|
487
|
-
|
488
|
-
Design Philosophy:
|
489
|
-
A2AAgentNode represents an intelligent agent that can both contribute to and
|
490
|
-
benefit from collective intelligence. It automatically extracts insights from
|
491
|
-
its responses and shares them with other agents while selectively attending
|
492
|
-
to relevant information from the shared memory pool. This creates an emergent
|
493
|
-
collaborative intelligence system.
|
494
|
-
|
495
|
-
Upstream Dependencies:
|
496
|
-
- QueryAnalysisNode: Provides analyzed queries and context
|
497
|
-
- TeamFormationNode: Assigns roles and capabilities to agents
|
498
|
-
- A2ACoordinatorNode: Delegates tasks and coordinates activities
|
499
|
-
- SharedMemoryPoolNode: Provides access to shared memories
|
500
|
-
|
501
|
-
Downstream Consumers:
|
502
|
-
- SharedMemoryPoolNode: Receives insights and discoveries
|
503
|
-
- A2ACoordinatorNode: Reports progress and results
|
504
|
-
- SolutionEvaluatorNode: Provides solutions for evaluation
|
505
|
-
- Other A2AAgentNodes: Indirect consumers through shared memory
|
506
|
-
|
507
|
-
Configuration:
|
508
|
-
Inherits all configuration from LLMAgentNode plus A2A-specific parameters
|
509
|
-
for memory pool integration, attention filtering, and collaboration modes.
|
510
|
-
|
511
|
-
Implementation Details:
|
512
|
-
- Automatically extracts insights from LLM responses
|
513
|
-
- Enhances prompts with relevant context from shared memory
|
514
|
-
- Supports multiple collaboration modes (cooperative, competitive, hierarchical)
|
515
|
-
- Tracks conversation context and shares key discoveries
|
516
|
-
- Implements attention filtering to prevent information overload
|
517
|
-
|
518
|
-
Error Handling:
|
519
|
-
- Gracefully handles missing memory pool connections
|
520
|
-
- Falls back to standard LLM behavior if A2A features fail
|
521
|
-
- Validates insight extraction to prevent malformed memories
|
522
|
-
|
523
|
-
Side Effects:
|
524
|
-
- Writes insights to SharedMemoryPoolNode after each interaction
|
525
|
-
- Maintains conversation history for context
|
526
|
-
- May influence other agents through shared memories
|
527
|
-
|
528
|
-
Examples:
|
529
|
-
>>> # Create an A2A agent with specific expertise
|
530
|
-
>>> agent = A2AAgentNode()
|
531
|
-
>>>
|
532
|
-
>>> # Execute with A2A features
|
533
|
-
>>> result = agent.execute(
|
534
|
-
... agent_id="researcher_001",
|
535
|
-
... agent_role="research_specialist",
|
536
|
-
... provider="openai",
|
537
|
-
... model="gpt-4",
|
538
|
-
... messages=[{
|
539
|
-
... "role": "user",
|
540
|
-
... "content": "Analyze the impact of AI on productivity"
|
541
|
-
... }],
|
542
|
-
... memory_pool=memory_pool_instance,
|
543
|
-
... attention_filter={
|
544
|
-
... "tags": ["productivity", "AI", "research"],
|
545
|
-
... "importance_threshold": 0.7
|
546
|
-
... },
|
547
|
-
... collaboration_mode="cooperative"
|
548
|
-
... )
|
549
|
-
>>> assert result["success"] == True
|
550
|
-
>>> assert "insights_generated" in result["a2a_metadata"]
|
551
|
-
>>>
|
552
|
-
>>> # Agent automatically shares insights
|
553
|
-
>>> insights = result["a2a_metadata"]["insights_generated"]
|
554
|
-
>>> assert len(insights) > 0
|
555
|
-
>>> assert all("content" in i for i in insights)
|
556
|
-
"""
|
557
|
-
|
558
|
-
def __init__(self, name=None, **kwargs):
|
559
|
-
# Accept name parameter and pass all kwargs to parent
|
560
|
-
if name:
|
561
|
-
kwargs["name"] = name
|
562
|
-
super().__init__(**kwargs)
|
563
|
-
self.local_memory = deque(maxlen=100)
|
564
|
-
self.communication_log = deque(maxlen=50)
|
565
|
-
|
566
|
-
def get_parameters(self) -> Dict[str, NodeParameter]:
|
567
|
-
# Inherit all LLMAgentNode parameters
|
568
|
-
params = super().get_parameters()
|
569
|
-
|
570
|
-
# Add A2A-specific parameters
|
571
|
-
params.update(
|
572
|
-
{
|
573
|
-
"agent_id": NodeParameter(
|
574
|
-
name="agent_id",
|
575
|
-
type=str,
|
576
|
-
required=False,
|
577
|
-
default=f"agent_{uuid.uuid4().hex[:8]}",
|
578
|
-
description="Unique identifier for this agent",
|
579
|
-
),
|
580
|
-
"agent_role": NodeParameter(
|
581
|
-
name="agent_role",
|
582
|
-
type=str,
|
583
|
-
required=False,
|
584
|
-
default="general",
|
585
|
-
description="Role of the agent (researcher, analyst, coordinator, etc.)",
|
586
|
-
),
|
587
|
-
"memory_pool": NodeParameter(
|
588
|
-
name="memory_pool",
|
589
|
-
type=Node,
|
590
|
-
required=False,
|
591
|
-
description="Reference to SharedMemoryPoolNode",
|
592
|
-
),
|
593
|
-
"attention_filter": NodeParameter(
|
594
|
-
name="attention_filter",
|
595
|
-
type=dict,
|
596
|
-
required=False,
|
597
|
-
default={},
|
598
|
-
description="Criteria for filtering relevant information from shared memory",
|
599
|
-
),
|
600
|
-
"communication_config": NodeParameter(
|
601
|
-
name="communication_config",
|
602
|
-
type=dict,
|
603
|
-
required=False,
|
604
|
-
default={"mode": "direct", "protocol": "json-rpc"},
|
605
|
-
description="A2A communication settings",
|
606
|
-
),
|
607
|
-
"collaboration_mode": NodeParameter(
|
608
|
-
name="collaboration_mode",
|
609
|
-
type=str,
|
610
|
-
required=False,
|
611
|
-
default="cooperative",
|
612
|
-
description="How agent collaborates: cooperative, competitive, hierarchical",
|
613
|
-
),
|
614
|
-
"peer_agents": NodeParameter(
|
615
|
-
name="peer_agents",
|
616
|
-
type=list,
|
617
|
-
required=False,
|
618
|
-
default=[],
|
619
|
-
description="List of peer agent IDs for direct communication",
|
620
|
-
),
|
621
|
-
}
|
622
|
-
)
|
623
|
-
return params
|
624
|
-
|
625
|
-
def run(self, **kwargs) -> Dict[str, Any]:
|
626
|
-
"""
|
627
|
-
Execute the A2A agent with enhanced communication capabilities.
|
628
|
-
|
629
|
-
This method extends the base LLMAgentNode execution by:
|
630
|
-
1. Reading relevant context from the shared memory pool
|
631
|
-
2. Enhancing the prompt with shared discoveries
|
632
|
-
3. Executing the LLM call with enriched context
|
633
|
-
4. Extracting insights from the response
|
634
|
-
5. Sharing valuable insights back to the memory pool
|
635
|
-
|
636
|
-
Args:
|
637
|
-
**kwargs: All LLMAgentNode parameters plus:
|
638
|
-
agent_id (str): Unique identifier for this agent
|
639
|
-
agent_role (str): Agent's role in the team
|
640
|
-
memory_pool (SharedMemoryPoolNode): Shared memory instance
|
641
|
-
attention_filter (dict): Criteria for filtering memories
|
642
|
-
collaboration_mode (str): How agent collaborates
|
643
|
-
|
644
|
-
Returns:
|
645
|
-
Dict[str, Any]: LLMAgentNode response plus:
|
646
|
-
a2a_metadata: Information about A2A interactions including
|
647
|
-
insights_generated, shared_context_used, collaboration_stats
|
648
|
-
|
649
|
-
Side Effects:
|
650
|
-
Writes insights to shared memory pool if available
|
651
|
-
Updates internal conversation history
|
652
|
-
"""
|
653
|
-
# Extract A2A specific parameters
|
654
|
-
agent_id = kwargs.get("agent_id")
|
655
|
-
agent_role = kwargs.get("agent_role", "general")
|
656
|
-
memory_pool = kwargs.get("memory_pool")
|
657
|
-
attention_filter = kwargs.get("attention_filter", {})
|
658
|
-
|
659
|
-
# Read from shared memory if available
|
660
|
-
shared_context = []
|
661
|
-
if memory_pool:
|
662
|
-
memory_result = memory_pool.execute(
|
663
|
-
action="read", agent_id=agent_id, attention_filter=attention_filter
|
664
|
-
)
|
665
|
-
if memory_result.get("success"):
|
666
|
-
shared_context = memory_result.get("memories", [])
|
667
|
-
|
668
|
-
# Store provider and model for use in summarization
|
669
|
-
self._current_provider = kwargs.get("provider", "mock")
|
670
|
-
self._current_model = kwargs.get("model", "mock-model")
|
671
|
-
|
672
|
-
# Enhance messages with shared context
|
673
|
-
messages = kwargs.get("messages", [])
|
674
|
-
if shared_context:
|
675
|
-
context_summary = self._summarize_shared_context(shared_context)
|
676
|
-
enhanced_system_prompt = f"""You are agent {agent_id} with role: {agent_role}.
|
677
|
-
|
678
|
-
Relevant shared context from other agents:
|
679
|
-
{context_summary}
|
680
|
-
|
681
|
-
{kwargs.get('system_prompt', '')}"""
|
682
|
-
kwargs["system_prompt"] = enhanced_system_prompt
|
683
|
-
|
684
|
-
# Execute LLM agent
|
685
|
-
result = super().run(**kwargs)
|
686
|
-
|
687
|
-
# If successful, write insights to shared memory
|
688
|
-
if result.get("success") and memory_pool:
|
689
|
-
response_content = result.get("response", {}).get("content", "")
|
690
|
-
|
691
|
-
# Use LLM to extract insights if provider supports it
|
692
|
-
use_llm_extraction = kwargs.get("use_llm_insight_extraction", True)
|
693
|
-
provider = kwargs.get("provider", "mock")
|
694
|
-
|
695
|
-
if use_llm_extraction and provider not in ["mock"]:
|
696
|
-
# Use LLM to extract and analyze insights
|
697
|
-
insights = self._extract_insights_with_llm(
|
698
|
-
response_content, agent_role, agent_id, kwargs
|
699
|
-
)
|
700
|
-
else:
|
701
|
-
# Fallback to rule-based extraction
|
702
|
-
insights = self._extract_insights(response_content, agent_role)
|
703
|
-
|
704
|
-
# Track insight statistics
|
705
|
-
insight_stats = {
|
706
|
-
"total": len(insights),
|
707
|
-
"high_importance": sum(1 for i in insights if i["importance"] >= 0.8),
|
708
|
-
"by_type": {},
|
709
|
-
"extraction_method": (
|
710
|
-
"llm"
|
711
|
-
if use_llm_extraction and provider not in ["mock"]
|
712
|
-
else "rule-based"
|
713
|
-
),
|
714
|
-
}
|
715
|
-
|
716
|
-
for insight in insights:
|
717
|
-
# Update type statistics
|
718
|
-
insight_type = insight.get("metadata", {}).get(
|
719
|
-
"insight_type", "general"
|
720
|
-
)
|
721
|
-
insight_stats["by_type"][insight_type] = (
|
722
|
-
insight_stats["by_type"].get(insight_type, 0) + 1
|
723
|
-
)
|
724
|
-
|
725
|
-
# Write to memory pool with enhanced context
|
726
|
-
memory_pool.execute(
|
727
|
-
action="write",
|
728
|
-
agent_id=agent_id,
|
729
|
-
content=insight["content"],
|
730
|
-
tags=insight.get("tags", [agent_role]),
|
731
|
-
importance=insight.get("importance", 0.6),
|
732
|
-
segment=insight.get("segment", agent_role),
|
733
|
-
context={
|
734
|
-
"source_message": messages[-1] if messages else None,
|
735
|
-
"agent_role": agent_role,
|
736
|
-
"insight_metadata": insight.get("metadata", {}),
|
737
|
-
"timestamp": kwargs.get("timestamp", time.time()),
|
738
|
-
},
|
739
|
-
)
|
740
|
-
|
741
|
-
# Store insights in local memory for agent's own reference
|
742
|
-
for insight in insights:
|
743
|
-
self.local_memory.append(
|
744
|
-
{
|
745
|
-
"type": "insight",
|
746
|
-
"content": insight["content"],
|
747
|
-
"importance": insight["importance"],
|
748
|
-
"timestamp": time.time(),
|
749
|
-
}
|
750
|
-
)
|
751
|
-
|
752
|
-
# Add A2A metadata to result
|
753
|
-
result["a2a_metadata"] = {
|
754
|
-
"agent_id": agent_id,
|
755
|
-
"agent_role": agent_role,
|
756
|
-
"shared_context_used": len(shared_context),
|
757
|
-
"insights_generated": len(insights) if "insights" in locals() else 0,
|
758
|
-
"insight_statistics": insight_stats if "insight_stats" in locals() else {},
|
759
|
-
"memory_pool_active": memory_pool is not None,
|
760
|
-
"local_memory_size": len(self.local_memory),
|
761
|
-
}
|
762
|
-
|
763
|
-
return result
|
764
|
-
|
765
|
-
def _summarize_shared_context(self, shared_context: List[Dict[str, Any]]) -> str:
|
766
|
-
"""Summarize shared context for inclusion in prompt."""
|
767
|
-
if not shared_context:
|
768
|
-
return "No relevant shared context available."
|
769
|
-
|
770
|
-
# For small context, use simple formatting
|
771
|
-
if len(shared_context) <= 3:
|
772
|
-
summary_parts = []
|
773
|
-
for memory in shared_context:
|
774
|
-
agent_id = memory.get("agent_id", "unknown")
|
775
|
-
content = memory.get("content", "")
|
776
|
-
importance = memory.get("importance", 0)
|
777
|
-
tags = ", ".join(memory.get("tags", []))
|
778
|
-
|
779
|
-
summary_parts.append(
|
780
|
-
f"- Agent {agent_id} ({importance:.1f} importance, tags: {tags}): {content}"
|
781
|
-
)
|
782
|
-
return "\n".join(summary_parts)
|
783
|
-
|
784
|
-
# For larger context, use LLM to create intelligent summary
|
785
|
-
return self._summarize_with_llm(shared_context)
|
786
|
-
|
787
|
-
def _summarize_with_llm(self, shared_context: List[Dict[str, Any]]) -> str:
|
788
|
-
"""Use LLM to create an intelligent summary of shared context."""
|
789
|
-
|
790
|
-
# Prepare context for summarization
|
791
|
-
context_items = []
|
792
|
-
for memory in shared_context[:10]: # Process up to 10 most relevant
|
793
|
-
context_items.append(
|
794
|
-
{
|
795
|
-
"agent": memory.get("agent_id", "unknown"),
|
796
|
-
"content": memory.get("content", ""),
|
797
|
-
"importance": memory.get("importance", 0),
|
798
|
-
"tags": memory.get("tags", []),
|
799
|
-
"type": memory.get("context", {})
|
800
|
-
.get("insight_metadata", {})
|
801
|
-
.get("insight_type", "general"),
|
802
|
-
}
|
803
|
-
)
|
804
|
-
|
805
|
-
# Create summarization prompt
|
806
|
-
summarization_prompt = f"""Summarize the following shared insights from other agents into a concise, actionable briefing.
|
807
|
-
|
808
|
-
Shared Context Items:
|
809
|
-
{json.dumps(context_items, indent=2)}
|
810
|
-
|
811
|
-
Create a summary that:
|
812
|
-
1. Groups related insights by theme
|
813
|
-
2. Highlights the most important findings (importance >= 0.8)
|
814
|
-
3. Identifies consensus points where multiple agents agree
|
815
|
-
4. Notes any contradictions or disagreements
|
816
|
-
5. Extracts key metrics and data points
|
817
|
-
6. Suggests areas needing further investigation
|
818
|
-
|
819
|
-
Format the summary as a brief paragraph (max 200 words) that another agent can quickly understand and act upon.
|
820
|
-
Focus on actionable intelligence rather than just listing what each agent said."""
|
821
|
-
|
822
|
-
try:
|
823
|
-
# Use the current agent's LLM configuration for summarization
|
824
|
-
provider = getattr(self, "_current_provider", "mock")
|
825
|
-
model = getattr(self, "_current_model", "mock-model")
|
826
|
-
|
827
|
-
if provider not in ["mock"]:
|
828
|
-
summary_kwargs = {
|
829
|
-
"provider": provider,
|
830
|
-
"model": model,
|
831
|
-
"temperature": 0.3,
|
832
|
-
"messages": [
|
833
|
-
{
|
834
|
-
"role": "system",
|
835
|
-
"content": "You are an expert at synthesizing information from multiple sources into clear, actionable summaries.",
|
836
|
-
},
|
837
|
-
{"role": "user", "content": summarization_prompt},
|
838
|
-
],
|
839
|
-
"max_tokens": 300,
|
840
|
-
}
|
841
|
-
|
842
|
-
result = super().run(**summary_kwargs)
|
843
|
-
|
844
|
-
if result.get("success"):
|
845
|
-
summary = result.get("response", {}).get("content", "")
|
846
|
-
if summary:
|
847
|
-
return f"Shared Context Summary:\n{summary}"
|
848
|
-
except Exception:
|
849
|
-
pass
|
850
|
-
|
851
|
-
# Fallback to simple summary
|
852
|
-
summary_parts = []
|
853
|
-
for memory in shared_context[:5]:
|
854
|
-
agent_id = memory.get("agent_id", "unknown")
|
855
|
-
content = memory.get("content", "")[:100] + "..."
|
856
|
-
importance = memory.get("importance", 0)
|
857
|
-
|
858
|
-
summary_parts.append(f"- {agent_id} [{importance:.1f}]: {content}")
|
859
|
-
|
860
|
-
return "Recent insights:\n" + "\n".join(summary_parts)
|
861
|
-
|
862
|
-
def _extract_insights(self, response: str, agent_role: str) -> List[Dict[str, Any]]:
|
863
|
-
"""Extract important insights from agent response using advanced NLP techniques."""
|
864
|
-
insights = []
|
865
|
-
|
866
|
-
# Enhanced keyword patterns for different types of insights
|
867
|
-
insight_patterns = {
|
868
|
-
"findings": {
|
869
|
-
"keywords": [
|
870
|
-
"found",
|
871
|
-
"discovered",
|
872
|
-
"identified",
|
873
|
-
"revealed",
|
874
|
-
"uncovered",
|
875
|
-
"detected",
|
876
|
-
"observed",
|
877
|
-
"noted",
|
878
|
-
"recognized",
|
879
|
-
],
|
880
|
-
"importance": 0.8,
|
881
|
-
"tags": ["finding", "discovery"],
|
882
|
-
},
|
883
|
-
"conclusions": {
|
884
|
-
"keywords": [
|
885
|
-
"conclude",
|
886
|
-
"therefore",
|
887
|
-
"thus",
|
888
|
-
"hence",
|
889
|
-
"consequently",
|
890
|
-
"as a result",
|
891
|
-
"in summary",
|
892
|
-
"overall",
|
893
|
-
"in conclusion",
|
894
|
-
],
|
895
|
-
"importance": 0.9,
|
896
|
-
"tags": ["conclusion", "summary"],
|
897
|
-
},
|
898
|
-
"comparisons": {
|
899
|
-
"keywords": [
|
900
|
-
"compared to",
|
901
|
-
"versus",
|
902
|
-
"vs",
|
903
|
-
"better than",
|
904
|
-
"worse than",
|
905
|
-
"improvement",
|
906
|
-
"decline",
|
907
|
-
"increase",
|
908
|
-
"decrease",
|
909
|
-
"change",
|
910
|
-
],
|
911
|
-
"importance": 0.7,
|
912
|
-
"tags": ["comparison", "analysis"],
|
913
|
-
},
|
914
|
-
"recommendations": {
|
915
|
-
"keywords": [
|
916
|
-
"recommend",
|
917
|
-
"suggest",
|
918
|
-
"should",
|
919
|
-
"advise",
|
920
|
-
"propose",
|
921
|
-
"best practice",
|
922
|
-
"optimal",
|
923
|
-
"ideal",
|
924
|
-
],
|
925
|
-
"importance": 0.85,
|
926
|
-
"tags": ["recommendation", "advice"],
|
927
|
-
},
|
928
|
-
"problems": {
|
929
|
-
"keywords": [
|
930
|
-
"issue",
|
931
|
-
"problem",
|
932
|
-
"challenge",
|
933
|
-
"limitation",
|
934
|
-
"constraint",
|
935
|
-
"difficulty",
|
936
|
-
"obstacle",
|
937
|
-
"concern",
|
938
|
-
"risk",
|
939
|
-
],
|
940
|
-
"importance": 0.75,
|
941
|
-
"tags": ["problem", "challenge"],
|
942
|
-
},
|
943
|
-
"metrics": {
|
944
|
-
"keywords": [
|
945
|
-
"percent",
|
946
|
-
"%",
|
947
|
-
"score",
|
948
|
-
"rating",
|
949
|
-
"benchmark",
|
950
|
-
"metric",
|
951
|
-
"measurement",
|
952
|
-
"performance",
|
953
|
-
"efficiency",
|
954
|
-
],
|
955
|
-
"importance": 0.65,
|
956
|
-
"tags": ["metric", "measurement"],
|
957
|
-
},
|
958
|
-
}
|
959
|
-
|
960
|
-
# Process response by sentences for better context
|
961
|
-
import re
|
962
|
-
|
963
|
-
sentences = re.split(r"[.!?]+", response)
|
964
|
-
|
965
|
-
for sentence in sentences:
|
966
|
-
sentence = sentence.strip()
|
967
|
-
if not sentence or len(sentence) < 20:
|
968
|
-
continue
|
969
|
-
|
970
|
-
# Calculate importance based on multiple factors
|
971
|
-
importance = 0.5 # Base importance
|
972
|
-
matched_tags = set([agent_role])
|
973
|
-
insight_type = None
|
974
|
-
|
975
|
-
# Check for insight patterns
|
976
|
-
sentence_lower = sentence.lower()
|
977
|
-
for pattern_type, pattern_info in insight_patterns.items():
|
978
|
-
if any(
|
979
|
-
keyword in sentence_lower for keyword in pattern_info["keywords"]
|
980
|
-
):
|
981
|
-
importance = max(importance, pattern_info["importance"])
|
982
|
-
matched_tags.update(pattern_info["tags"])
|
983
|
-
insight_type = pattern_type
|
984
|
-
break
|
985
|
-
|
986
|
-
# Extract entities and add as tags
|
987
|
-
# Simple entity extraction - numbers, capitalized words, technical terms
|
988
|
-
numbers = re.findall(r"\b\d+(?:\.\d+)?%?\b", sentence)
|
989
|
-
if numbers:
|
990
|
-
matched_tags.add("quantitative")
|
991
|
-
importance += 0.1
|
992
|
-
|
993
|
-
# Extract technical terms (words with specific patterns)
|
994
|
-
tech_terms = re.findall(r"\b[A-Z][a-z]+(?:[A-Z][a-z]+)*\b", sentence)
|
995
|
-
if tech_terms:
|
996
|
-
matched_tags.update(
|
997
|
-
[term.lower() for term in tech_terms[:2]]
|
998
|
-
) # Limit tags
|
999
|
-
|
1000
|
-
# Boost importance for sentences with multiple capital letters (proper nouns)
|
1001
|
-
capital_words = re.findall(r"\b[A-Z][A-Za-z]+\b", sentence)
|
1002
|
-
if len(capital_words) > 2:
|
1003
|
-
importance += 0.05
|
1004
|
-
|
1005
|
-
# Check for structured data (JSON, lists, etc.)
|
1006
|
-
if any(char in sentence for char in ["{", "[", ":", "-"]):
|
1007
|
-
matched_tags.add("structured")
|
1008
|
-
importance += 0.05
|
1009
|
-
|
1010
|
-
# Determine segment based on insight type and role
|
1011
|
-
segment = f"{agent_role}_{insight_type}" if insight_type else agent_role
|
1012
|
-
|
1013
|
-
# Create insight with rich metadata
|
1014
|
-
insight = {
|
1015
|
-
"content": sentence,
|
1016
|
-
"importance": min(importance, 1.0), # Cap at 1.0
|
1017
|
-
"tags": list(matched_tags),
|
1018
|
-
"segment": segment,
|
1019
|
-
"metadata": {
|
1020
|
-
"length": len(sentence),
|
1021
|
-
"has_numbers": bool(numbers),
|
1022
|
-
"insight_type": insight_type or "general",
|
1023
|
-
"extracted_entities": tech_terms[:3] if tech_terms else [],
|
1024
|
-
},
|
1025
|
-
}
|
1026
|
-
|
1027
|
-
insights.append(insight)
|
1028
|
-
|
1029
|
-
# Sort by importance and return top insights
|
1030
|
-
insights.sort(key=lambda x: x["importance"], reverse=True)
|
1031
|
-
|
1032
|
-
# Dynamic limit based on response quality
|
1033
|
-
# If we have many high-quality insights, return more
|
1034
|
-
high_quality_count = sum(1 for i in insights if i["importance"] >= 0.7)
|
1035
|
-
limit = min(5 if high_quality_count > 3 else 3, len(insights))
|
1036
|
-
|
1037
|
-
return insights[:limit]
|
1038
|
-
|
1039
|
-
def _extract_insights_with_llm(
|
1040
|
-
self,
|
1041
|
-
response: str,
|
1042
|
-
agent_role: str,
|
1043
|
-
agent_id: str,
|
1044
|
-
original_kwargs: Dict[str, Any],
|
1045
|
-
) -> List[Dict[str, Any]]:
|
1046
|
-
"""Use LLM to extract and analyze insights from the response."""
|
1047
|
-
|
1048
|
-
# Prepare a focused prompt for insight extraction
|
1049
|
-
insight_extraction_prompt = f"""You are an AI insight extraction specialist. Analyze the following response and extract the most important insights.
|
1050
|
-
|
1051
|
-
Agent Role: {agent_role}
|
1052
|
-
Original Response:
|
1053
|
-
{response}
|
1054
|
-
|
1055
|
-
Extract 3-5 key insights from this response. For each insight:
|
1056
|
-
1. Summarize the core finding or conclusion (max 100 words)
|
1057
|
-
2. Assign an importance score (0.0-1.0) based on:
|
1058
|
-
- Novelty and uniqueness (0.3 weight)
|
1059
|
-
- Impact on decision-making (0.4 weight)
|
1060
|
-
- Supporting evidence quality (0.3 weight)
|
1061
|
-
3. Categorize the insight type: finding, conclusion, comparison, recommendation, problem, metric, or pattern
|
1062
|
-
4. Extract key entities mentioned (products, technologies, metrics, etc.)
|
1063
|
-
5. Suggest relevant tags for categorization
|
1064
|
-
|
1065
|
-
Output your analysis as a JSON array with this structure:
|
1066
|
-
[
|
1067
|
-
{{
|
1068
|
-
"content": "The core insight summarized concisely",
|
1069
|
-
"importance": 0.85,
|
1070
|
-
"type": "finding",
|
1071
|
-
"entities": ["MacBook Air M3", "M2", "battery life"],
|
1072
|
-
"tags": ["performance", "comparison", "hardware"],
|
1073
|
-
"evidence": "Brief supporting evidence from the text"
|
1074
|
-
}}
|
1075
|
-
]
|
1076
|
-
|
1077
|
-
Focus on insights that would be valuable for other agents to know. Ensure the JSON is valid."""
|
1078
|
-
|
1079
|
-
try:
|
1080
|
-
# Create a sub-call to the LLM for insight extraction
|
1081
|
-
extraction_kwargs = {
|
1082
|
-
"provider": original_kwargs.get("provider", "ollama"),
|
1083
|
-
"model": original_kwargs.get("model", "mistral"),
|
1084
|
-
"temperature": 0.3, # Lower temperature for more focused extraction
|
1085
|
-
"messages": [
|
1086
|
-
{
|
1087
|
-
"role": "system",
|
1088
|
-
"content": "You are an expert at analyzing text and extracting structured insights. Always respond with valid JSON.",
|
1089
|
-
},
|
1090
|
-
{"role": "user", "content": insight_extraction_prompt},
|
1091
|
-
],
|
1092
|
-
"max_tokens": original_kwargs.get("max_tokens", 1000),
|
1093
|
-
}
|
1094
|
-
|
1095
|
-
# Execute LLM call for insight extraction
|
1096
|
-
extraction_result = super().run(**extraction_kwargs)
|
1097
|
-
|
1098
|
-
if extraction_result.get("success"):
|
1099
|
-
extracted_content = extraction_result.get("response", {}).get(
|
1100
|
-
"content", ""
|
1101
|
-
)
|
1102
|
-
|
1103
|
-
# Parse the JSON response
|
1104
|
-
import json
|
1105
|
-
import re
|
1106
|
-
|
1107
|
-
# Try to extract JSON from the response
|
1108
|
-
json_match = re.search(r"\[.*?\]", extracted_content, re.DOTALL)
|
1109
|
-
if json_match:
|
1110
|
-
try:
|
1111
|
-
extracted_insights = json.loads(json_match.group())
|
1112
|
-
|
1113
|
-
# Convert to our insight format
|
1114
|
-
insights = []
|
1115
|
-
for item in extracted_insights[:5]: # Limit to 5 insights
|
1116
|
-
insight = {
|
1117
|
-
"content": item.get("content", ""),
|
1118
|
-
"importance": min(
|
1119
|
-
max(item.get("importance", 0.5), 0.0), 1.0
|
1120
|
-
),
|
1121
|
-
"tags": item.get("tags", []) + [agent_role],
|
1122
|
-
"segment": f"{agent_role}_{item.get('type', 'general')}",
|
1123
|
-
"metadata": {
|
1124
|
-
"insight_type": item.get("type", "general"),
|
1125
|
-
"extracted_entities": item.get("entities", []),
|
1126
|
-
"evidence": item.get("evidence", ""),
|
1127
|
-
"llm_extracted": True,
|
1128
|
-
},
|
1129
|
-
}
|
1130
|
-
insights.append(insight)
|
1131
|
-
|
1132
|
-
return insights
|
1133
|
-
except json.JSONDecodeError:
|
1134
|
-
pass
|
1135
|
-
|
1136
|
-
except Exception:
|
1137
|
-
# Log the error but don't fail - fall back to rule-based extraction
|
1138
|
-
pass
|
1139
|
-
|
1140
|
-
# If LLM extraction fails, fall back to rule-based
|
1141
|
-
return self._extract_insights(response, agent_role)
|
1142
|
-
|
1143
|
-
|
1144
|
-
@register_node()
|
1145
|
-
class A2ACoordinatorNode(CycleAwareNode):
|
1146
|
-
"""
|
1147
|
-
Coordinates communication and task delegation between A2A agents.
|
1148
|
-
|
1149
|
-
This node acts as a central orchestrator for multi-agent systems, managing task
|
1150
|
-
distribution, consensus building, and workflow coordination. It implements various
|
1151
|
-
coordination strategies to optimize agent utilization and ensure effective
|
1152
|
-
collaboration across heterogeneous agent teams.
|
1153
|
-
|
1154
|
-
Design Philosophy:
|
1155
|
-
The A2ACoordinatorNode serves as a decentralized coordination mechanism that
|
1156
|
-
enables agents to self-organize without requiring a fixed hierarchy. It provides
|
1157
|
-
flexible coordination patterns (delegation, broadcast, consensus, workflow)
|
1158
|
-
that can be composed to create sophisticated multi-agent behaviors.
|
1159
|
-
|
1160
|
-
Upstream Dependencies:
|
1161
|
-
- ProblemAnalyzerNode: Provides decomposed tasks and requirements
|
1162
|
-
- TeamFormationNode: Supplies formed teams and agent assignments
|
1163
|
-
- QueryAnalysisNode: Delivers analyzed queries needing coordination
|
1164
|
-
- OrchestrationManagerNode: High-level orchestration directives
|
1165
|
-
|
1166
|
-
Downstream Consumers:
|
1167
|
-
- A2AAgentNode: Receives task assignments and coordination messages
|
1168
|
-
- SharedMemoryPoolNode: Stores coordination decisions and progress
|
1169
|
-
- SolutionEvaluatorNode: Evaluates coordinated solution components
|
1170
|
-
- ConvergenceDetectorNode: Monitors coordination effectiveness
|
1171
|
-
|
1172
|
-
Configuration:
|
1173
|
-
The coordinator adapts its behavior based on the coordination strategy
|
1174
|
-
selected and the characteristics of available agents. No static configuration
|
1175
|
-
is required, but runtime parameters control coordination behavior.
|
1176
|
-
|
1177
|
-
Implementation Details:
|
1178
|
-
- Maintains registry of active agents with capabilities and status
|
1179
|
-
- Implements multiple delegation strategies (best_match, round_robin, auction)
|
1180
|
-
- Tracks task assignments and agent performance metrics
|
1181
|
-
- Supports both synchronous and asynchronous coordination patterns
|
1182
|
-
- Manages consensus voting with configurable thresholds
|
1183
|
-
|
1184
|
-
Error Handling:
|
1185
|
-
- Handles agent failures with automatic reassignment
|
1186
|
-
- Validates task requirements before delegation
|
1187
|
-
- Falls back to broadcast when specific agents unavailable
|
1188
|
-
- Returns partial results if consensus cannot be reached
|
1189
|
-
|
1190
|
-
Side Effects:
|
1191
|
-
- Maintains internal agent registry across calls
|
1192
|
-
- Updates agent performance metrics after task completion
|
1193
|
-
- May modify task priorities based on agent availability
|
1194
|
-
|
1195
|
-
Examples:
|
1196
|
-
>>> # Create coordinator
|
1197
|
-
>>> coordinator = A2ACoordinatorNode()
|
1198
|
-
>>>
|
1199
|
-
>>> # Register agents
|
1200
|
-
>>> coordinator.execute(
|
1201
|
-
... action="register",
|
1202
|
-
... agent_info={
|
1203
|
-
... "id": "analyst_001",
|
1204
|
-
... "skills": ["data_analysis", "statistics"],
|
1205
|
-
... "role": "analyst"
|
1206
|
-
... }
|
1207
|
-
... )
|
1208
|
-
>>>
|
1209
|
-
>>> # Delegate task with best match strategy
|
1210
|
-
>>> result = coordinator.execute(
|
1211
|
-
... action="delegate",
|
1212
|
-
... task={
|
1213
|
-
... "type": "analysis",
|
1214
|
-
... "description": "Analyze sales data",
|
1215
|
-
... "required_skills": ["data_analysis"],
|
1216
|
-
... "priority": "high"
|
1217
|
-
... },
|
1218
|
-
... available_agents=[
|
1219
|
-
... {"id": "analyst_001", "skills": ["data_analysis"]},
|
1220
|
-
... {"id": "researcher_001", "skills": ["research"]}
|
1221
|
-
... ],
|
1222
|
-
... coordination_strategy="best_match"
|
1223
|
-
... )
|
1224
|
-
>>> assert result["success"] == True
|
1225
|
-
>>> assert result["assigned_agent"] == "analyst_001"
|
1226
|
-
>>>
|
1227
|
-
>>> # Build consensus among agents
|
1228
|
-
>>> consensus_result = coordinator.execute(
|
1229
|
-
... action="consensus",
|
1230
|
-
... proposal="Implement new feature X",
|
1231
|
-
... voting_agents=["agent1", "agent2", "agent3"],
|
1232
|
-
... consensus_threshold=0.66
|
1233
|
-
... )
|
1234
|
-
"""
|
1235
|
-
|
1236
|
-
def __init__(self, name=None, **kwargs):
|
1237
|
-
# Accept name parameter and pass all kwargs to parent
|
1238
|
-
if name:
|
1239
|
-
kwargs["name"] = name
|
1240
|
-
super().__init__(**kwargs)
|
1241
|
-
self.registered_agents = {}
|
1242
|
-
self.task_queue = deque()
|
1243
|
-
self.consensus_sessions = {}
|
1244
|
-
|
1245
|
-
def get_parameters(self) -> Dict[str, NodeParameter]:
|
1246
|
-
return {
|
1247
|
-
"action": NodeParameter(
|
1248
|
-
name="action",
|
1249
|
-
type=str,
|
1250
|
-
required=False,
|
1251
|
-
default="coordinate",
|
1252
|
-
description="Action: 'register', 'delegate', 'broadcast', 'consensus', 'coordinate'",
|
1253
|
-
),
|
1254
|
-
"agent_info": NodeParameter(
|
1255
|
-
name="agent_info",
|
1256
|
-
type=dict,
|
1257
|
-
required=False,
|
1258
|
-
description="Information about agent (for registration)",
|
1259
|
-
),
|
1260
|
-
"task": NodeParameter(
|
1261
|
-
name="task",
|
1262
|
-
type=dict,
|
1263
|
-
required=False,
|
1264
|
-
description="Task to delegate or coordinate",
|
1265
|
-
),
|
1266
|
-
"message": NodeParameter(
|
1267
|
-
name="message",
|
1268
|
-
type=dict,
|
1269
|
-
required=False,
|
1270
|
-
description="Message to broadcast",
|
1271
|
-
),
|
1272
|
-
"consensus_proposal": NodeParameter(
|
1273
|
-
name="consensus_proposal",
|
1274
|
-
type=dict,
|
1275
|
-
required=False,
|
1276
|
-
description="Proposal for consensus",
|
1277
|
-
),
|
1278
|
-
"available_agents": NodeParameter(
|
1279
|
-
name="available_agents",
|
1280
|
-
type=list,
|
1281
|
-
required=False,
|
1282
|
-
default=[],
|
1283
|
-
description="List of available agents",
|
1284
|
-
),
|
1285
|
-
"coordination_strategy": NodeParameter(
|
1286
|
-
name="coordination_strategy",
|
1287
|
-
type=str,
|
1288
|
-
required=False,
|
1289
|
-
default="best_match",
|
1290
|
-
description="Strategy: 'best_match', 'round_robin', 'broadcast', 'auction'",
|
1291
|
-
),
|
1292
|
-
}
|
1293
|
-
|
1294
|
-
def run(self, **kwargs) -> Dict[str, Any]:
|
1295
|
-
"""
|
1296
|
-
Execute coordination action with cycle awareness.
|
1297
|
-
|
1298
|
-
Routes coordination requests to appropriate handlers based on action
|
1299
|
-
parameter. Tracks coordination history and agent performance across
|
1300
|
-
iterations for cycle-aware optimization.
|
1301
|
-
|
1302
|
-
Args:
|
1303
|
-
context: Execution context with cycle information
|
1304
|
-
**kwargs: Action-specific parameters including:
|
1305
|
-
action (str): Type of coordination action
|
1306
|
-
agent_info (dict): Agent registration details
|
1307
|
-
task (dict): Task to delegate
|
1308
|
-
available_agents (list): Agents available for tasks
|
1309
|
-
coordination_strategy (str): Delegation strategy
|
1310
|
-
|
1311
|
-
Returns:
|
1312
|
-
Dict[str, Any]: Action results with cycle metadata including:
|
1313
|
-
success (bool): Whether action succeeded
|
1314
|
-
cycle_info (dict): Iteration and history information
|
1315
|
-
Additional action-specific fields
|
1316
|
-
|
1317
|
-
Raises:
|
1318
|
-
None - errors returned in result dictionary
|
1319
|
-
|
1320
|
-
Side Effects:
|
1321
|
-
Updates internal agent registry
|
1322
|
-
Modifies coordination history
|
1323
|
-
Updates agent performance metrics
|
1324
|
-
|
1325
|
-
Examples:
|
1326
|
-
>>> coordinator = A2ACoordinatorNode()
|
1327
|
-
>>> result = coordinator.execute(context,
|
1328
|
-
... action=\"delegate\",
|
1329
|
-
... task={\"type\": \"analysis\", \"required_skills\": [\"data\"]},
|
1330
|
-
... coordination_strategy=\"best_match\"
|
1331
|
-
... )
|
1332
|
-
>>> assert result[\"success\"] == True
|
1333
|
-
"""
|
1334
|
-
context = kwargs.get("context", {})
|
1335
|
-
action = kwargs.get("action")
|
1336
|
-
|
1337
|
-
# Get cycle information using CycleAwareNode helpers
|
1338
|
-
iteration = self.get_iteration(context)
|
1339
|
-
is_first = self.is_first_iteration(context)
|
1340
|
-
prev_state = self.get_previous_state(context)
|
1341
|
-
|
1342
|
-
# Initialize cycle-aware coordination state
|
1343
|
-
if is_first:
|
1344
|
-
self.log_cycle_info(context, f"Starting coordination with action: {action}")
|
1345
|
-
coordination_history = []
|
1346
|
-
agent_performance_history = {}
|
1347
|
-
else:
|
1348
|
-
coordination_history = prev_state.get("coordination_history", [])
|
1349
|
-
agent_performance_history = prev_state.get("agent_performance", {})
|
1350
|
-
|
1351
|
-
# Execute the coordination action
|
1352
|
-
if action == "register":
|
1353
|
-
result = self._register_agent(kwargs, context)
|
1354
|
-
elif action == "delegate":
|
1355
|
-
result = self._delegate_task(
|
1356
|
-
kwargs, context, coordination_history, agent_performance_history
|
1357
|
-
)
|
1358
|
-
elif action == "broadcast":
|
1359
|
-
result = self._broadcast_message(kwargs, context)
|
1360
|
-
elif action == "consensus":
|
1361
|
-
result = self._manage_consensus(kwargs, context, coordination_history)
|
1362
|
-
elif action == "coordinate":
|
1363
|
-
result = self._coordinate_workflow(kwargs, context, iteration)
|
1364
|
-
else:
|
1365
|
-
result = {"success": False, "error": f"Unknown action: {action}"}
|
1366
|
-
|
1367
|
-
# Track coordination history for cycle learning
|
1368
|
-
coordination_event = {
|
1369
|
-
"iteration": iteration,
|
1370
|
-
"action": action,
|
1371
|
-
"success": result.get("success", False),
|
1372
|
-
"timestamp": time.time(),
|
1373
|
-
"details": {k: v for k, v in result.items() if k not in ["success"]},
|
1374
|
-
}
|
1375
|
-
coordination_history.append(coordination_event)
|
1376
|
-
|
1377
|
-
# Update agent performance tracking
|
1378
|
-
if action == "delegate" and result.get("success"):
|
1379
|
-
agent_id = result.get("delegated_to")
|
1380
|
-
if agent_id:
|
1381
|
-
if agent_id not in agent_performance_history:
|
1382
|
-
agent_performance_history[agent_id] = {
|
1383
|
-
"assignments": 0,
|
1384
|
-
"success_rate": 1.0,
|
1385
|
-
}
|
1386
|
-
agent_performance_history[agent_id]["assignments"] += 1
|
1387
|
-
|
1388
|
-
# Add cycle-aware metadata to result
|
1389
|
-
result.update(
|
1390
|
-
{
|
1391
|
-
"cycle_info": {
|
1392
|
-
"iteration": iteration,
|
1393
|
-
"coordination_history_length": len(coordination_history),
|
1394
|
-
"active_agents": len(self.registered_agents),
|
1395
|
-
"performance_tracked_agents": len(agent_performance_history),
|
1396
|
-
}
|
1397
|
-
}
|
1398
|
-
)
|
1399
|
-
|
1400
|
-
# Log progress
|
1401
|
-
if iteration % 5 == 0: # Log every 5 iterations
|
1402
|
-
self.log_cycle_info(
|
1403
|
-
context,
|
1404
|
-
f"Coordination stats: {len(coordination_history)} events, {len(self.registered_agents)} agents",
|
1405
|
-
)
|
1406
|
-
|
1407
|
-
# Persist state for next iteration
|
1408
|
-
return {
|
1409
|
-
**result,
|
1410
|
-
**self.set_cycle_state(
|
1411
|
-
{
|
1412
|
-
"coordination_history": coordination_history[
|
1413
|
-
-50:
|
1414
|
-
], # Keep last 50 events
|
1415
|
-
"agent_performance": agent_performance_history,
|
1416
|
-
}
|
1417
|
-
),
|
1418
|
-
}
|
1419
|
-
|
1420
|
-
def _register_agent(
|
1421
|
-
self, kwargs: Dict[str, Any], context: Dict[str, Any]
|
1422
|
-
) -> Dict[str, Any]:
|
1423
|
-
"""Register an agent with the coordinator."""
|
1424
|
-
agent_info = kwargs.get("agent_info", {})
|
1425
|
-
agent_id = agent_info.get("id")
|
1426
|
-
|
1427
|
-
if not agent_id:
|
1428
|
-
return {"success": False, "error": "Agent ID required"}
|
1429
|
-
|
1430
|
-
self.registered_agents[agent_id] = {
|
1431
|
-
"id": agent_id,
|
1432
|
-
"skills": agent_info.get("skills", []),
|
1433
|
-
"role": agent_info.get("role", "general"),
|
1434
|
-
"status": "available",
|
1435
|
-
"registered_at": time.time(),
|
1436
|
-
"task_count": 0,
|
1437
|
-
"success_rate": 1.0,
|
1438
|
-
}
|
1439
|
-
|
1440
|
-
return {
|
1441
|
-
"success": True,
|
1442
|
-
"agent_id": agent_id,
|
1443
|
-
"registered_agents": list(self.registered_agents.keys()),
|
1444
|
-
}
|
1445
|
-
|
1446
|
-
def _delegate_task(
|
1447
|
-
self,
|
1448
|
-
kwargs: Dict[str, Any],
|
1449
|
-
context: Dict[str, Any],
|
1450
|
-
coordination_history: List[Dict],
|
1451
|
-
agent_performance: Dict,
|
1452
|
-
) -> Dict[str, Any]:
|
1453
|
-
"""Delegate task to most suitable agent with cycle-aware optimization."""
|
1454
|
-
task = kwargs.get("task", {})
|
1455
|
-
available_agents = kwargs.get("available_agents", [])
|
1456
|
-
strategy = kwargs.get("coordination_strategy", "best_match")
|
1457
|
-
|
1458
|
-
if not available_agents:
|
1459
|
-
available_agents = [
|
1460
|
-
agent
|
1461
|
-
for agent in self.registered_agents.values()
|
1462
|
-
if agent["status"] == "available"
|
1463
|
-
]
|
1464
|
-
|
1465
|
-
if not available_agents:
|
1466
|
-
return {"success": False, "error": "No available agents"}
|
1467
|
-
|
1468
|
-
# Use cycle-aware agent selection based on performance history
|
1469
|
-
iteration = self.get_iteration(context)
|
1470
|
-
|
1471
|
-
# Select agent based on strategy with cycle learning
|
1472
|
-
if strategy == "best_match":
|
1473
|
-
selected_agent = self._find_best_match_cycle_aware(
|
1474
|
-
task, available_agents, agent_performance, iteration
|
1475
|
-
)
|
1476
|
-
elif strategy == "round_robin":
|
1477
|
-
# Cycle-aware round-robin based on iteration
|
1478
|
-
agent_index = iteration % len(available_agents)
|
1479
|
-
selected_agent = available_agents[agent_index]
|
1480
|
-
elif strategy == "auction":
|
1481
|
-
selected_agent = self._run_auction_cycle_aware(
|
1482
|
-
task, available_agents, agent_performance
|
1483
|
-
)
|
1484
|
-
else:
|
1485
|
-
selected_agent = available_agents[0]
|
1486
|
-
|
1487
|
-
if not selected_agent:
|
1488
|
-
return {"success": False, "error": "No suitable agent found"}
|
1489
|
-
|
1490
|
-
# Update agent status
|
1491
|
-
agent_id = selected_agent.get("id")
|
1492
|
-
if agent_id in self.registered_agents:
|
1493
|
-
self.registered_agents[agent_id]["status"] = "busy"
|
1494
|
-
self.registered_agents[agent_id]["task_count"] += 1
|
1495
|
-
|
1496
|
-
return {
|
1497
|
-
"success": True,
|
1498
|
-
"delegated_to": agent_id,
|
1499
|
-
"task": task,
|
1500
|
-
"strategy": strategy,
|
1501
|
-
"agent_performance_score": agent_performance.get(agent_id, {}).get(
|
1502
|
-
"success_rate", 1.0
|
1503
|
-
),
|
1504
|
-
"iteration": iteration,
|
1505
|
-
}
|
1506
|
-
|
1507
|
-
def _broadcast_message(
|
1508
|
-
self, kwargs: Dict[str, Any], context: Dict[str, Any]
|
1509
|
-
) -> Dict[str, Any]:
|
1510
|
-
"""Broadcast message to relevant agents."""
|
1511
|
-
message = kwargs.get("message", {})
|
1512
|
-
target_roles = message.get("target_roles", [])
|
1513
|
-
target_skills = message.get("target_skills", [])
|
1514
|
-
|
1515
|
-
recipients = []
|
1516
|
-
for agent in self.registered_agents.values():
|
1517
|
-
# Check role match
|
1518
|
-
if target_roles and agent["role"] not in target_roles:
|
1519
|
-
continue
|
1520
|
-
|
1521
|
-
# Check skills match
|
1522
|
-
if target_skills:
|
1523
|
-
if not any(skill in agent["skills"] for skill in target_skills):
|
1524
|
-
continue
|
1525
|
-
|
1526
|
-
recipients.append(agent["id"])
|
1527
|
-
|
1528
|
-
return {
|
1529
|
-
"success": True,
|
1530
|
-
"recipients": recipients,
|
1531
|
-
"message": message,
|
1532
|
-
"broadcast_time": time.time(),
|
1533
|
-
}
|
1534
|
-
|
1535
|
-
def _manage_consensus(
|
1536
|
-
self,
|
1537
|
-
kwargs: Dict[str, Any],
|
1538
|
-
context: Dict[str, Any],
|
1539
|
-
coordination_history: List[Dict],
|
1540
|
-
) -> Dict[str, Any]:
|
1541
|
-
"""Manage consensus building among agents."""
|
1542
|
-
proposal = kwargs.get("consensus_proposal", {})
|
1543
|
-
session_id = proposal.get("session_id", str(uuid.uuid4()))
|
1544
|
-
|
1545
|
-
if session_id not in self.consensus_sessions:
|
1546
|
-
self.consensus_sessions[session_id] = {
|
1547
|
-
"proposal": proposal,
|
1548
|
-
"votes": {},
|
1549
|
-
"started_at": time.time(),
|
1550
|
-
"status": "open",
|
1551
|
-
}
|
1552
|
-
|
1553
|
-
session = self.consensus_sessions[session_id]
|
1554
|
-
|
1555
|
-
# Handle vote
|
1556
|
-
if "vote" in kwargs:
|
1557
|
-
agent_id = kwargs.get("agent_id")
|
1558
|
-
vote = kwargs.get("vote")
|
1559
|
-
session["votes"][agent_id] = vote
|
1560
|
-
|
1561
|
-
# Check if consensus reached
|
1562
|
-
total_agents = len(self.registered_agents)
|
1563
|
-
votes_cast = len(session["votes"])
|
1564
|
-
|
1565
|
-
if votes_cast >= total_agents * 0.5: # Simple majority
|
1566
|
-
yes_votes = sum(1 for v in session["votes"].values() if v)
|
1567
|
-
consensus_reached = yes_votes > votes_cast / 2
|
1568
|
-
|
1569
|
-
session["status"] = "completed"
|
1570
|
-
session["result"] = "approved" if consensus_reached else "rejected"
|
1571
|
-
|
1572
|
-
return {
|
1573
|
-
"success": True,
|
1574
|
-
"session_id": session_id,
|
1575
|
-
"consensus_reached": consensus_reached,
|
1576
|
-
"result": session["result"],
|
1577
|
-
"votes": session["votes"],
|
1578
|
-
}
|
1579
|
-
|
1580
|
-
return {
|
1581
|
-
"success": True,
|
1582
|
-
"session_id": session_id,
|
1583
|
-
"status": session["status"],
|
1584
|
-
"votes_cast": votes_cast,
|
1585
|
-
"votes_needed": int(total_agents * 0.5),
|
1586
|
-
}
|
1587
|
-
|
1588
|
-
def _coordinate_workflow(
|
1589
|
-
self, kwargs: Dict[str, Any], context: Dict[str, Any], iteration: int
|
1590
|
-
) -> Dict[str, Any]:
|
1591
|
-
"""Coordinate a multi-agent workflow."""
|
1592
|
-
workflow_spec = kwargs.get("task", {})
|
1593
|
-
steps = workflow_spec.get("steps", [])
|
1594
|
-
|
1595
|
-
coordination_plan = []
|
1596
|
-
for step in steps:
|
1597
|
-
required_skills = step.get("required_skills", [])
|
1598
|
-
available_agents = [
|
1599
|
-
agent
|
1600
|
-
for agent in self.registered_agents.values()
|
1601
|
-
if any(skill in agent["skills"] for skill in required_skills)
|
1602
|
-
]
|
1603
|
-
|
1604
|
-
if available_agents:
|
1605
|
-
selected_agent = self._find_best_match(step, available_agents)
|
1606
|
-
coordination_plan.append(
|
1607
|
-
{
|
1608
|
-
"step": step["name"],
|
1609
|
-
"assigned_to": selected_agent["id"],
|
1610
|
-
"skills_matched": [
|
1611
|
-
s for s in required_skills if s in selected_agent["skills"]
|
1612
|
-
],
|
1613
|
-
}
|
1614
|
-
)
|
1615
|
-
else:
|
1616
|
-
coordination_plan.append(
|
1617
|
-
{
|
1618
|
-
"step": step["name"],
|
1619
|
-
"assigned_to": None,
|
1620
|
-
"error": "No agent with required skills",
|
1621
|
-
}
|
1622
|
-
)
|
1623
|
-
|
1624
|
-
return {
|
1625
|
-
"success": True,
|
1626
|
-
"workflow": workflow_spec.get("name", "unnamed"),
|
1627
|
-
"coordination_plan": coordination_plan,
|
1628
|
-
"total_steps": len(steps),
|
1629
|
-
"assigned_steps": sum(1 for p in coordination_plan if p.get("assigned_to")),
|
1630
|
-
}
|
1631
|
-
|
1632
|
-
def _find_best_match(
|
1633
|
-
self, task: Dict[str, Any], agents: List[Dict[str, Any]]
|
1634
|
-
) -> Optional[Dict[str, Any]]:
|
1635
|
-
"""Find best matching agent for task."""
|
1636
|
-
required_skills = task.get("required_skills", [])
|
1637
|
-
if not required_skills:
|
1638
|
-
return agents[0] if agents else None
|
1639
|
-
|
1640
|
-
best_agent = None
|
1641
|
-
best_score = 0
|
1642
|
-
|
1643
|
-
for agent in agents:
|
1644
|
-
agent_skills = set(agent.get("skills", []))
|
1645
|
-
required_set = set(required_skills)
|
1646
|
-
|
1647
|
-
# Calculate match score
|
1648
|
-
matches = agent_skills & required_set
|
1649
|
-
score = len(matches) / len(required_set) if required_set else 0
|
1650
|
-
|
1651
|
-
# Consider success rate
|
1652
|
-
success_rate = agent.get("success_rate", 1.0)
|
1653
|
-
score *= success_rate
|
1654
|
-
|
1655
|
-
if score > best_score:
|
1656
|
-
best_score = score
|
1657
|
-
best_agent = agent
|
1658
|
-
|
1659
|
-
return best_agent
|
1660
|
-
|
1661
|
-
def _run_auction(
|
1662
|
-
self, task: Dict[str, Any], agents: List[Dict[str, Any]]
|
1663
|
-
) -> Optional[Dict[str, Any]]:
|
1664
|
-
"""Run auction-based task assignment."""
|
1665
|
-
# Simplified auction - agents bid based on their capability
|
1666
|
-
bids = []
|
1667
|
-
|
1668
|
-
for agent in agents:
|
1669
|
-
# Calculate bid based on skill match and availability
|
1670
|
-
required_skills = set(task.get("required_skills", []))
|
1671
|
-
agent_skills = set(agent.get("skills", []))
|
1672
|
-
|
1673
|
-
skill_match = (
|
1674
|
-
len(required_skills & agent_skills) / len(required_skills)
|
1675
|
-
if required_skills
|
1676
|
-
else 1.0
|
1677
|
-
)
|
1678
|
-
workload = 1.0 - (agent.get("task_count", 0) / 10.0) # Lower bid if busy
|
1679
|
-
|
1680
|
-
bid_value = skill_match * workload * agent.get("success_rate", 1.0)
|
1681
|
-
|
1682
|
-
bids.append({"agent": agent, "bid": bid_value})
|
1683
|
-
|
1684
|
-
# Select highest bidder
|
1685
|
-
if bids:
|
1686
|
-
bids.sort(key=lambda x: x["bid"], reverse=True)
|
1687
|
-
return bids[0]["agent"]
|
1688
|
-
|
1689
|
-
return None
|
1690
|
-
|
1691
|
-
def _find_best_match_cycle_aware(
|
1692
|
-
self,
|
1693
|
-
task: Dict[str, Any],
|
1694
|
-
agents: List[Dict[str, Any]],
|
1695
|
-
agent_performance: Dict[str, Dict],
|
1696
|
-
iteration: int,
|
1697
|
-
) -> Optional[Dict[str, Any]]:
|
1698
|
-
"""Find best matching agent using cycle-aware performance data."""
|
1699
|
-
required_skills = task.get("required_skills", [])
|
1700
|
-
if not required_skills:
|
1701
|
-
# When no specific skills required, prefer agents with better historical performance
|
1702
|
-
if agent_performance:
|
1703
|
-
best_agent = None
|
1704
|
-
best_score = 0
|
1705
|
-
for agent in agents:
|
1706
|
-
agent_id = agent.get("id")
|
1707
|
-
perf = agent_performance.get(
|
1708
|
-
agent_id, {"success_rate": 1.0, "assignments": 0}
|
1709
|
-
)
|
1710
|
-
# Balance experience and success rate
|
1711
|
-
experience_factor = min(
|
1712
|
-
perf["assignments"] / 10.0, 1.0
|
1713
|
-
) # Max at 10 assignments
|
1714
|
-
score = perf["success_rate"] * (0.7 + 0.3 * experience_factor)
|
1715
|
-
if score > best_score:
|
1716
|
-
best_score = score
|
1717
|
-
best_agent = agent
|
1718
|
-
return best_agent or (agents[0] if agents else None)
|
1719
|
-
return agents[0] if agents else None
|
1720
|
-
|
1721
|
-
best_agent = None
|
1722
|
-
best_score = 0
|
1723
|
-
|
1724
|
-
for agent in agents:
|
1725
|
-
agent_id = agent.get("id")
|
1726
|
-
agent_skills = set(agent.get("skills", []))
|
1727
|
-
required_set = set(required_skills)
|
1728
|
-
|
1729
|
-
# Calculate skill match score
|
1730
|
-
matches = agent_skills & required_set
|
1731
|
-
skill_score = len(matches) / len(required_set) if required_set else 0
|
1732
|
-
|
1733
|
-
# Get performance history
|
1734
|
-
perf = agent_performance.get(
|
1735
|
-
agent_id, {"success_rate": 1.0, "assignments": 0}
|
1736
|
-
)
|
1737
|
-
performance_score = perf["success_rate"]
|
1738
|
-
|
1739
|
-
# Experience bonus (agents with more assignments get slight preference)
|
1740
|
-
experience_bonus = min(perf["assignments"] * 0.05, 0.2) # Max 20% bonus
|
1741
|
-
|
1742
|
-
# Cycle adaptation: prefer different agents in different iterations to explore
|
1743
|
-
diversity_factor = 1.0
|
1744
|
-
if iteration > 0 and agent_performance:
|
1745
|
-
recent_assignments = sum(
|
1746
|
-
1 for p in agent_performance.values() if p["assignments"] > 0
|
1747
|
-
)
|
1748
|
-
if recent_assignments > 0:
|
1749
|
-
agent_usage_ratio = perf["assignments"] / recent_assignments
|
1750
|
-
if agent_usage_ratio > 0.5: # Over-used agent
|
1751
|
-
diversity_factor = 0.8 # Slight penalty
|
1752
|
-
|
1753
|
-
# Combined score
|
1754
|
-
final_score = (
|
1755
|
-
skill_score * performance_score * diversity_factor
|
1756
|
-
) + experience_bonus
|
1757
|
-
|
1758
|
-
if final_score > best_score:
|
1759
|
-
best_score = final_score
|
1760
|
-
best_agent = agent
|
1761
|
-
|
1762
|
-
return best_agent
|
1763
|
-
|
1764
|
-
def _run_auction_cycle_aware(
|
1765
|
-
self,
|
1766
|
-
task: Dict[str, Any],
|
1767
|
-
agents: List[Dict[str, Any]],
|
1768
|
-
agent_performance: Dict[str, Dict],
|
1769
|
-
) -> Optional[Dict[str, Any]]:
|
1770
|
-
"""Run auction-based task assignment with cycle-aware bidding."""
|
1771
|
-
bids = []
|
1772
|
-
|
1773
|
-
for agent in agents:
|
1774
|
-
agent_id = agent.get("id")
|
1775
|
-
|
1776
|
-
# Calculate bid based on skill match and availability (original logic)
|
1777
|
-
required_skills = set(task.get("required_skills", []))
|
1778
|
-
agent_skills = set(agent.get("skills", []))
|
1779
|
-
|
1780
|
-
skill_match = (
|
1781
|
-
len(required_skills & agent_skills) / len(required_skills)
|
1782
|
-
if required_skills
|
1783
|
-
else 1.0
|
1784
|
-
)
|
1785
|
-
workload = 1.0 - (agent.get("task_count", 0) / 10.0) # Lower bid if busy
|
1786
|
-
|
1787
|
-
# Enhance with performance history
|
1788
|
-
perf = agent_performance.get(
|
1789
|
-
agent_id, {"success_rate": 1.0, "assignments": 0}
|
1790
|
-
)
|
1791
|
-
performance_factor = perf["success_rate"]
|
1792
|
-
|
1793
|
-
# Experience factor (slight preference for experienced agents)
|
1794
|
-
experience_factor = min(
|
1795
|
-
1.0 + (perf["assignments"] * 0.02), 1.2
|
1796
|
-
) # Max 20% boost
|
1797
|
-
|
1798
|
-
bid_value = skill_match * workload * performance_factor * experience_factor
|
1799
|
-
|
1800
|
-
bids.append({"agent": agent, "bid": bid_value})
|
1801
|
-
|
1802
|
-
# Select highest bidder
|
1803
|
-
if bids:
|
1804
|
-
bids.sort(key=lambda x: x["bid"], reverse=True)
|
1805
|
-
return bids[0]["agent"]
|
1806
|
-
|
1807
|
-
return None
|