kailash 0.8.1__py3-none-any.whl → 0.8.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kailash/nodes/ai/a2a.py CHANGED
@@ -13,13 +13,987 @@ import json
13
13
  import time
14
14
  import uuid
15
15
  from collections import defaultdict, deque
16
+ from dataclasses import dataclass, field
16
17
  from datetime import datetime
17
- from typing import Any, Dict, List, Optional, Set
18
+ from enum import Enum
19
+ from typing import Any, Dict, List, Optional, Set, Tuple
18
20
 
19
21
  from kailash.nodes.ai.llm_agent import LLMAgentNode
20
22
  from kailash.nodes.base import Node, NodeParameter, register_node
21
23
  from kailash.nodes.base_cycle_aware import CycleAwareNode
22
24
 
25
+ # ============================================================================
26
+ # ENHANCED A2A COMPONENTS: Agent Cards and Task Management
27
+ # ============================================================================
28
+
29
+
30
+ class CapabilityLevel(Enum):
31
+ """Agent capability proficiency levels."""
32
+
33
+ NOVICE = "novice"
34
+ INTERMEDIATE = "intermediate"
35
+ ADVANCED = "advanced"
36
+ EXPERT = "expert"
37
+
38
+
39
+ class CollaborationStyle(Enum):
40
+ """Agent collaboration preferences."""
41
+
42
+ INDEPENDENT = "independent" # Prefers solo work
43
+ COOPERATIVE = "cooperative" # Works well in teams
44
+ LEADER = "leader" # Takes charge of coordination
45
+ SUPPORT = "support" # Provides assistance to others
46
+
47
+
48
+ class TaskState(Enum):
49
+ """Task lifecycle states."""
50
+
51
+ CREATED = "created"
52
+ ASSIGNED = "assigned"
53
+ IN_PROGRESS = "in_progress"
54
+ AWAITING_REVIEW = "awaiting_review"
55
+ ITERATING = "iterating"
56
+ COMPLETED = "completed"
57
+ FAILED = "failed"
58
+ CANCELLED = "cancelled"
59
+
60
+
61
+ class TaskPriority(Enum):
62
+ """Task priority levels."""
63
+
64
+ LOW = "low"
65
+ MEDIUM = "medium"
66
+ HIGH = "high"
67
+ CRITICAL = "critical"
68
+
69
+
70
+ class InsightType(Enum):
71
+ """Types of insights that can be generated."""
72
+
73
+ DISCOVERY = "discovery" # New finding
74
+ ANALYSIS = "analysis" # Deep analysis result
75
+ RECOMMENDATION = "recommendation" # Actionable recommendation
76
+ WARNING = "warning" # Potential issue
77
+ OPPORTUNITY = "opportunity" # Improvement opportunity
78
+ PATTERN = "pattern" # Identified pattern
79
+ ANOMALY = "anomaly" # Unusual finding
80
+
81
+
82
+ @dataclass
83
+ class Capability:
84
+ """Detailed capability description."""
85
+
86
+ name: str
87
+ domain: str
88
+ level: CapabilityLevel
89
+ description: str
90
+ keywords: List[str] = field(default_factory=list)
91
+ examples: List[str] = field(default_factory=list)
92
+ constraints: List[str] = field(default_factory=list)
93
+
94
+ def matches_requirement(self, requirement: str) -> float:
95
+ """Calculate match score for a requirement (0.0-1.0)."""
96
+ requirement_lower = requirement.lower()
97
+
98
+ # Direct name match
99
+ if self.name.lower() in requirement_lower:
100
+ return 0.9
101
+
102
+ # Domain match
103
+ if self.domain.lower() in requirement_lower:
104
+ return 0.7
105
+
106
+ # Keyword matches
107
+ keyword_matches = sum(
108
+ 1 for keyword in self.keywords if keyword.lower() in requirement_lower
109
+ )
110
+ if keyword_matches > 0:
111
+ return min(0.6 + (keyword_matches * 0.1), 0.8)
112
+
113
+ # Description similarity
114
+ desc_words = set(self.description.lower().split())
115
+ req_words = set(requirement_lower.split())
116
+ overlap = len(desc_words & req_words)
117
+ if overlap > 0:
118
+ return min(0.3 + (overlap * 0.05), 0.5)
119
+
120
+ return 0.0
121
+
122
+
123
+ @dataclass
124
+ class PerformanceMetrics:
125
+ """Agent performance tracking."""
126
+
127
+ total_tasks: int = 0
128
+ successful_tasks: int = 0
129
+ failed_tasks: int = 0
130
+
131
+ average_response_time_ms: float = 0.0
132
+ average_insight_quality: float = 0.0
133
+ average_confidence_score: float = 0.0
134
+
135
+ insights_generated: int = 0
136
+ unique_insights: int = 0
137
+ actionable_insights: int = 0
138
+
139
+ collaboration_score: float = 0.0
140
+ reliability_score: float = 0.0
141
+
142
+ last_active: Optional[datetime] = None
143
+
144
+ @property
145
+ def success_rate(self) -> float:
146
+ """Calculate task success rate."""
147
+ if self.total_tasks == 0:
148
+ return 0.0
149
+ return self.successful_tasks / self.total_tasks
150
+
151
+ @property
152
+ def insight_quality_score(self) -> float:
153
+ """Calculate overall insight quality score."""
154
+ if self.insights_generated == 0:
155
+ return 0.0
156
+
157
+ uniqueness = self.unique_insights / self.insights_generated
158
+ actionability = self.actionable_insights / self.insights_generated
159
+
160
+ return (
161
+ self.average_insight_quality * 0.4 + uniqueness * 0.3 + actionability * 0.3
162
+ )
163
+
164
+
165
+ @dataclass
166
+ class ResourceRequirements:
167
+ """Agent resource constraints and requirements."""
168
+
169
+ min_memory_mb: int = 512
170
+ max_memory_mb: int = 4096
171
+
172
+ min_tokens: int = 100
173
+ max_tokens: int = 4000
174
+
175
+ requires_gpu: bool = False
176
+ requires_internet: bool = True
177
+
178
+ estimated_cost_per_task: float = 0.0
179
+ max_concurrent_tasks: int = 5
180
+
181
+ supported_models: List[str] = field(default_factory=list)
182
+ required_apis: List[str] = field(default_factory=list)
183
+
184
+
185
+ @dataclass
186
+ class A2AAgentCard:
187
+ """
188
+ Enhanced agent card for rich capability description.
189
+
190
+ Provides comprehensive agent metadata for optimal matching,
191
+ team formation, and performance tracking.
192
+ """
193
+
194
+ # Identity
195
+ agent_id: str
196
+ agent_name: str
197
+ agent_type: str
198
+ version: str
199
+
200
+ # Capabilities
201
+ primary_capabilities: List[Capability] = field(default_factory=list)
202
+ secondary_capabilities: List[Capability] = field(default_factory=list)
203
+ emerging_capabilities: List[Capability] = field(default_factory=list)
204
+
205
+ # Collaboration
206
+ collaboration_style: CollaborationStyle = CollaborationStyle.COOPERATIVE
207
+ preferred_team_size: int = 3
208
+ compatible_agents: List[str] = field(default_factory=list)
209
+ incompatible_agents: List[str] = field(default_factory=list)
210
+
211
+ # Performance
212
+ performance: PerformanceMetrics = field(default_factory=PerformanceMetrics)
213
+
214
+ # Resources
215
+ resources: ResourceRequirements = field(default_factory=ResourceRequirements)
216
+
217
+ # Metadata
218
+ description: str = ""
219
+ tags: List[str] = field(default_factory=list)
220
+ created_at: datetime = field(default_factory=datetime.now)
221
+ updated_at: datetime = field(default_factory=datetime.now)
222
+
223
+ # Specializations
224
+ specializations: Dict[str, Any] = field(default_factory=dict)
225
+
226
+ def to_dict(self) -> Dict[str, Any]:
227
+ """Convert to dictionary for serialization."""
228
+ return {
229
+ "agent_id": self.agent_id,
230
+ "agent_name": self.agent_name,
231
+ "agent_type": self.agent_type,
232
+ "version": self.version,
233
+ "primary_capabilities": [
234
+ {
235
+ "name": cap.name,
236
+ "domain": cap.domain,
237
+ "level": cap.level.value,
238
+ "description": cap.description,
239
+ "keywords": cap.keywords,
240
+ }
241
+ for cap in self.primary_capabilities
242
+ ],
243
+ "secondary_capabilities": [
244
+ {
245
+ "name": cap.name,
246
+ "domain": cap.domain,
247
+ "level": cap.level.value,
248
+ "description": cap.description,
249
+ }
250
+ for cap in self.secondary_capabilities
251
+ ],
252
+ "collaboration_style": self.collaboration_style.value,
253
+ "performance": {
254
+ "success_rate": self.performance.success_rate,
255
+ "insight_quality_score": self.performance.insight_quality_score,
256
+ "average_response_time_ms": self.performance.average_response_time_ms,
257
+ "reliability_score": self.performance.reliability_score,
258
+ },
259
+ "resources": {
260
+ "max_tokens": self.resources.max_tokens,
261
+ "requires_gpu": self.resources.requires_gpu,
262
+ "estimated_cost_per_task": self.resources.estimated_cost_per_task,
263
+ },
264
+ "tags": self.tags,
265
+ "description": self.description,
266
+ }
267
+
268
+ @classmethod
269
+ def from_dict(cls, data: Dict[str, Any]) -> "A2AAgentCard":
270
+ """Create from dictionary."""
271
+ # Convert capability dictionaries back to objects
272
+ primary_caps = [
273
+ Capability(
274
+ name=cap["name"],
275
+ domain=cap["domain"],
276
+ level=CapabilityLevel(cap["level"]),
277
+ description=cap.get("description", ""),
278
+ keywords=cap.get("keywords", []),
279
+ )
280
+ for cap in data.get("primary_capabilities", [])
281
+ ]
282
+
283
+ secondary_caps = [
284
+ Capability(
285
+ name=cap["name"],
286
+ domain=cap["domain"],
287
+ level=CapabilityLevel(cap["level"]),
288
+ description=cap.get("description", ""),
289
+ )
290
+ for cap in data.get("secondary_capabilities", [])
291
+ ]
292
+
293
+ # Create performance metrics
294
+ perf_data = data.get("performance", {})
295
+ performance = PerformanceMetrics()
296
+ if perf_data:
297
+ performance.average_insight_quality = perf_data.get(
298
+ "insight_quality_score", 0.0
299
+ )
300
+ performance.average_response_time_ms = perf_data.get(
301
+ "average_response_time_ms", 0.0
302
+ )
303
+ performance.reliability_score = perf_data.get("reliability_score", 0.0)
304
+
305
+ # Create resource requirements
306
+ res_data = data.get("resources", {})
307
+ resources = ResourceRequirements()
308
+ if res_data:
309
+ resources.max_tokens = res_data.get("max_tokens", 4000)
310
+ resources.requires_gpu = res_data.get("requires_gpu", False)
311
+ resources.estimated_cost_per_task = res_data.get(
312
+ "estimated_cost_per_task", 0.0
313
+ )
314
+
315
+ return cls(
316
+ agent_id=data["agent_id"],
317
+ agent_name=data["agent_name"],
318
+ agent_type=data["agent_type"],
319
+ version=data.get("version", "1.0.0"),
320
+ primary_capabilities=primary_caps,
321
+ secondary_capabilities=secondary_caps,
322
+ collaboration_style=CollaborationStyle(
323
+ data.get("collaboration_style", "cooperative")
324
+ ),
325
+ performance=performance,
326
+ resources=resources,
327
+ tags=data.get("tags", []),
328
+ description=data.get("description", ""),
329
+ )
330
+
331
+ def calculate_match_score(self, requirements: List[str]) -> float:
332
+ """
333
+ Calculate how well this agent matches given requirements.
334
+
335
+ Returns a score between 0.0 and 1.0.
336
+ """
337
+ if not requirements:
338
+ return 0.5 # Neutral score for no requirements
339
+
340
+ total_score = 0.0
341
+
342
+ for requirement in requirements:
343
+ # Check primary capabilities (highest weight)
344
+ primary_scores = [
345
+ cap.matches_requirement(requirement) * 1.0
346
+ for cap in self.primary_capabilities
347
+ ]
348
+
349
+ # Check secondary capabilities (medium weight)
350
+ secondary_scores = [
351
+ cap.matches_requirement(requirement) * 0.7
352
+ for cap in self.secondary_capabilities
353
+ ]
354
+
355
+ # Check emerging capabilities (lower weight)
356
+ emerging_scores = [
357
+ cap.matches_requirement(requirement) * 0.4
358
+ for cap in self.emerging_capabilities
359
+ ]
360
+
361
+ # Take the best match for this requirement
362
+ all_scores = primary_scores + secondary_scores + emerging_scores
363
+ best_score = max(all_scores) if all_scores else 0.0
364
+ total_score += best_score
365
+
366
+ # Average across all requirements
367
+ avg_score = total_score / len(requirements)
368
+
369
+ # Apply performance modifier
370
+ performance_modifier = (
371
+ self.performance.success_rate * 0.3
372
+ + self.performance.insight_quality_score * 0.7
373
+ )
374
+
375
+ # Weighted final score
376
+ final_score = avg_score * 0.7 + performance_modifier * 0.3
377
+
378
+ return min(max(final_score, 0.0), 1.0)
379
+
380
+ def is_compatible_with(self, other_agent_id: str) -> bool:
381
+ """Check if compatible with another agent."""
382
+ if other_agent_id in self.incompatible_agents:
383
+ return False
384
+
385
+ # Could add more sophisticated compatibility logic here
386
+ return True
387
+
388
+ def update_performance(self, task_result: Dict[str, Any]) -> None:
389
+ """Update performance metrics based on task result."""
390
+ self.performance.total_tasks += 1
391
+
392
+ if task_result.get("success", False):
393
+ self.performance.successful_tasks += 1
394
+ else:
395
+ self.performance.failed_tasks += 1
396
+
397
+ # Update response time
398
+ if "response_time_ms" in task_result:
399
+ # Simple moving average
400
+ alpha = 0.1 # Learning rate
401
+ self.performance.average_response_time_ms = (
402
+ alpha * task_result["response_time_ms"]
403
+ + (1 - alpha) * self.performance.average_response_time_ms
404
+ )
405
+
406
+ # Update insight metrics
407
+ if "insights" in task_result:
408
+ insights = task_result["insights"]
409
+ self.performance.insights_generated += len(insights)
410
+
411
+ # Track unique insights (simple heuristic)
412
+ unique_count = len(
413
+ set(insight.get("key", str(i)) for i, insight in enumerate(insights))
414
+ )
415
+ self.performance.unique_insights += unique_count
416
+
417
+ # Track actionable insights
418
+ actionable_count = sum(
419
+ 1 for insight in insights if insight.get("actionable", False)
420
+ )
421
+ self.performance.actionable_insights += actionable_count
422
+
423
+ # Update quality score
424
+ if "quality_score" in task_result:
425
+ alpha = 0.1
426
+ self.performance.average_insight_quality = (
427
+ alpha * task_result["quality_score"]
428
+ + (1 - alpha) * self.performance.average_insight_quality
429
+ )
430
+
431
+ self.performance.last_active = datetime.now()
432
+ self.updated_at = datetime.now()
433
+
434
+
435
+ @dataclass
436
+ class Insight:
437
+ """Individual insight from task execution."""
438
+
439
+ insight_id: str = field(default_factory=lambda: str(uuid.uuid4()))
440
+ content: str = ""
441
+ insight_type: InsightType = InsightType.ANALYSIS
442
+ confidence: float = 0.0
443
+
444
+ # Quality metrics
445
+ novelty_score: float = 0.0 # How new/unique is this insight
446
+ actionability_score: float = 0.0 # How actionable is it
447
+ impact_score: float = 0.0 # Potential impact if acted upon
448
+
449
+ # Metadata
450
+ generated_by: str = "" # Agent ID
451
+ generated_at: datetime = field(default_factory=datetime.now)
452
+
453
+ # Related insights
454
+ builds_on: List[str] = field(default_factory=list) # IDs of insights this builds on
455
+ contradicts: List[str] = field(
456
+ default_factory=list
457
+ ) # IDs of insights this contradicts
458
+
459
+ # Supporting data
460
+ evidence: List[Dict[str, Any]] = field(default_factory=list)
461
+ keywords: List[str] = field(default_factory=list)
462
+
463
+ @property
464
+ def quality_score(self) -> float:
465
+ """Calculate overall quality score."""
466
+ return (
467
+ self.confidence * 0.3
468
+ + self.novelty_score * 0.3
469
+ + self.actionability_score * 0.3
470
+ + self.impact_score * 0.1
471
+ )
472
+
473
+ def to_dict(self) -> Dict[str, Any]:
474
+ """Convert to dictionary."""
475
+ return {
476
+ "insight_id": self.insight_id,
477
+ "content": self.content,
478
+ "type": self.insight_type.value,
479
+ "confidence": self.confidence,
480
+ "quality_score": self.quality_score,
481
+ "novelty_score": self.novelty_score,
482
+ "actionability_score": self.actionability_score,
483
+ "impact_score": self.impact_score,
484
+ "generated_by": self.generated_by,
485
+ "generated_at": self.generated_at.isoformat(),
486
+ "keywords": self.keywords,
487
+ }
488
+
489
+
490
+ @dataclass
491
+ class TaskIteration:
492
+ """Record of a single task iteration."""
493
+
494
+ iteration_number: int
495
+ started_at: datetime
496
+ completed_at: Optional[datetime] = None
497
+
498
+ # What changed
499
+ adjustments_made: List[str] = field(default_factory=list)
500
+ reason_for_iteration: str = ""
501
+
502
+ # Results
503
+ insights_generated: List[Insight] = field(default_factory=list)
504
+ quality_improvement: float = 0.0 # Change in quality from previous iteration
505
+
506
+ # Agent involvement
507
+ agents_involved: List[str] = field(default_factory=list)
508
+ consensus_score: float = 0.0
509
+
510
+
511
+ @dataclass
512
+ class A2ATask:
513
+ """
514
+ Structured task with full lifecycle management.
515
+
516
+ Replaces dictionary-based tasks with rich objects that track
517
+ state transitions, insight collection, and quality metrics.
518
+ """
519
+
520
+ # Identity
521
+ task_id: str = field(default_factory=lambda: str(uuid.uuid4()))
522
+ name: str = ""
523
+ description: str = ""
524
+
525
+ # State management
526
+ state: TaskState = TaskState.CREATED
527
+ priority: TaskPriority = TaskPriority.MEDIUM
528
+
529
+ # Assignment
530
+ assigned_to: List[str] = field(default_factory=list) # Agent IDs
531
+ delegated_by: Optional[str] = None # Coordinator ID
532
+
533
+ # Requirements
534
+ requirements: List[str] = field(default_factory=list)
535
+ constraints: Dict[str, Any] = field(default_factory=dict)
536
+ success_criteria: List[str] = field(default_factory=list)
537
+
538
+ # Timeline
539
+ created_at: datetime = field(default_factory=datetime.now)
540
+ assigned_at: Optional[datetime] = None
541
+ started_at: Optional[datetime] = None
542
+ completed_at: Optional[datetime] = None
543
+ deadline: Optional[datetime] = None
544
+
545
+ # Insights - Primary deliverable
546
+ insights: List[Insight] = field(default_factory=list)
547
+
548
+ # Iterations
549
+ iterations: List[TaskIteration] = field(default_factory=list)
550
+ max_iterations: int = 3
551
+ current_iteration: int = 0
552
+
553
+ # Quality tracking
554
+ target_quality_score: float = 0.85
555
+ current_quality_score: float = 0.0
556
+
557
+ # Context and memory
558
+ context: Dict[str, Any] = field(default_factory=dict)
559
+ memory_keys: List[str] = field(default_factory=list) # Shared memory references
560
+
561
+ # Results
562
+ final_result: Optional[Dict[str, Any]] = None
563
+ error_message: Optional[str] = None
564
+
565
+ # Metadata
566
+ tags: List[str] = field(default_factory=list)
567
+ parent_task_id: Optional[str] = None
568
+ subtask_ids: List[str] = field(default_factory=list)
569
+
570
+ def transition_to(self, new_state: TaskState) -> bool:
571
+ """
572
+ Transition task to new state with validation.
573
+
574
+ Returns True if transition is valid, False otherwise.
575
+ """
576
+ valid_transitions = {
577
+ TaskState.CREATED: [TaskState.ASSIGNED, TaskState.CANCELLED],
578
+ TaskState.ASSIGNED: [TaskState.IN_PROGRESS, TaskState.CANCELLED],
579
+ TaskState.IN_PROGRESS: [
580
+ TaskState.AWAITING_REVIEW,
581
+ TaskState.FAILED,
582
+ TaskState.CANCELLED,
583
+ ],
584
+ TaskState.AWAITING_REVIEW: [
585
+ TaskState.ITERATING,
586
+ TaskState.COMPLETED,
587
+ TaskState.FAILED,
588
+ ],
589
+ TaskState.ITERATING: [
590
+ TaskState.IN_PROGRESS,
591
+ TaskState.FAILED,
592
+ TaskState.CANCELLED,
593
+ ],
594
+ TaskState.COMPLETED: [], # Terminal state
595
+ TaskState.FAILED: [TaskState.IN_PROGRESS], # Can retry
596
+ TaskState.CANCELLED: [], # Terminal state
597
+ }
598
+
599
+ if new_state not in valid_transitions.get(self.state, []):
600
+ return False
601
+
602
+ # Update timestamps
603
+ if new_state == TaskState.ASSIGNED:
604
+ self.assigned_at = datetime.now()
605
+ elif new_state == TaskState.IN_PROGRESS:
606
+ if not self.started_at:
607
+ self.started_at = datetime.now()
608
+ elif new_state in [TaskState.COMPLETED, TaskState.FAILED, TaskState.CANCELLED]:
609
+ self.completed_at = datetime.now()
610
+
611
+ self.state = new_state
612
+ return True
613
+
614
+ def add_insight(self, insight: Insight) -> None:
615
+ """Add an insight to the task."""
616
+ self.insights.append(insight)
617
+ self._update_quality_score()
618
+
619
+ def start_iteration(self, reason: str, adjustments: List[str]) -> TaskIteration:
620
+ """Start a new iteration of the task."""
621
+ self.current_iteration += 1
622
+
623
+ iteration = TaskIteration(
624
+ iteration_number=self.current_iteration,
625
+ started_at=datetime.now(),
626
+ reason_for_iteration=reason,
627
+ adjustments_made=adjustments,
628
+ )
629
+
630
+ self.iterations.append(iteration)
631
+ self.transition_to(TaskState.ITERATING)
632
+
633
+ return iteration
634
+
635
+ def complete_iteration(
636
+ self,
637
+ insights: List[Insight],
638
+ agents_involved: List[str],
639
+ consensus_score: float = 0.0,
640
+ ) -> None:
641
+ """Complete the current iteration."""
642
+ if not self.iterations:
643
+ return
644
+
645
+ current = self.iterations[-1]
646
+ current.completed_at = datetime.now()
647
+ current.insights_generated = insights
648
+ current.agents_involved = agents_involved
649
+ current.consensus_score = consensus_score
650
+
651
+ # Calculate quality improvement
652
+ prev_quality = self.current_quality_score
653
+ self.insights.extend(insights)
654
+ self._update_quality_score()
655
+ current.quality_improvement = self.current_quality_score - prev_quality
656
+
657
+ # Transition back to in_progress
658
+ self.transition_to(TaskState.IN_PROGRESS)
659
+
660
+ def _update_quality_score(self) -> None:
661
+ """Update overall task quality score based on insights."""
662
+ if not self.insights:
663
+ self.current_quality_score = 0.0
664
+ return
665
+
666
+ # Average quality of all insights
667
+ avg_quality = sum(i.quality_score for i in self.insights) / len(self.insights)
668
+
669
+ # Bonus for unique insights
670
+ unique_content = len(set(i.content for i in self.insights))
671
+ uniqueness_bonus = min(unique_content / len(self.insights), 1.0) * 0.1
672
+
673
+ # Bonus for actionable insights
674
+ actionable_count = sum(1 for i in self.insights if i.actionability_score > 0.7)
675
+ actionability_bonus = (actionable_count / len(self.insights)) * 0.1
676
+
677
+ self.current_quality_score = min(
678
+ avg_quality + uniqueness_bonus + actionability_bonus, 1.0
679
+ )
680
+
681
+ @property
682
+ def is_complete(self) -> bool:
683
+ """Check if task is in a terminal state."""
684
+ return self.state in [
685
+ TaskState.COMPLETED,
686
+ TaskState.FAILED,
687
+ TaskState.CANCELLED,
688
+ ]
689
+
690
+ @property
691
+ def needs_iteration(self) -> bool:
692
+ """Check if task needs another iteration."""
693
+ return (
694
+ self.current_quality_score < self.target_quality_score
695
+ and self.current_iteration < self.max_iterations
696
+ and self.state == TaskState.AWAITING_REVIEW
697
+ )
698
+
699
+ @property
700
+ def duration(self) -> Optional[float]:
701
+ """Get task duration in seconds."""
702
+ if not self.started_at:
703
+ return None
704
+
705
+ end_time = self.completed_at or datetime.now()
706
+ return (end_time - self.started_at).total_seconds()
707
+
708
+ def to_dict(self) -> Dict[str, Any]:
709
+ """Convert to dictionary for serialization."""
710
+ return {
711
+ "task_id": self.task_id,
712
+ "name": self.name,
713
+ "description": self.description,
714
+ "state": self.state.value,
715
+ "priority": self.priority.value,
716
+ "assigned_to": self.assigned_to,
717
+ "requirements": self.requirements,
718
+ "created_at": self.created_at.isoformat(),
719
+ "started_at": self.started_at.isoformat() if self.started_at else None,
720
+ "completed_at": (
721
+ self.completed_at.isoformat() if self.completed_at else None
722
+ ),
723
+ "insights": [i.to_dict() for i in self.insights],
724
+ "iterations": len(self.iterations),
725
+ "current_quality_score": self.current_quality_score,
726
+ "target_quality_score": self.target_quality_score,
727
+ "duration": self.duration,
728
+ }
729
+
730
+ @classmethod
731
+ def from_dict(cls, data: Dict[str, Any]) -> "A2ATask":
732
+ """Create task from dictionary (backward compatibility)."""
733
+ # Support old dictionary format
734
+ if "task_id" not in data:
735
+ # Legacy format
736
+ return cls(
737
+ name=data.get("title", "Unnamed Task"),
738
+ description=data.get("description", ""),
739
+ requirements=data.get("requirements", []),
740
+ context=data,
741
+ )
742
+
743
+ # New format
744
+ task = cls(
745
+ task_id=data["task_id"],
746
+ name=data["name"],
747
+ description=data.get("description", ""),
748
+ state=TaskState(data.get("state", "created")),
749
+ priority=TaskPriority(data.get("priority", "medium")),
750
+ requirements=data.get("requirements", []),
751
+ )
752
+
753
+ # Restore insights if present
754
+ if "insights" in data:
755
+ for insight_data in data["insights"]:
756
+ insight = Insight(
757
+ insight_id=insight_data.get("insight_id", str(uuid.uuid4())),
758
+ content=insight_data.get("content", ""),
759
+ confidence=insight_data.get("confidence", 0.0),
760
+ )
761
+ task.insights.append(insight)
762
+
763
+ return task
764
+
765
+
766
+ class TaskValidator:
767
+ """Validates task readiness and quality."""
768
+
769
+ @staticmethod
770
+ def validate_for_assignment(task: A2ATask) -> Tuple[bool, List[str]]:
771
+ """
772
+ Validate task is ready for assignment.
773
+
774
+ Returns (is_valid, list_of_issues).
775
+ """
776
+ issues = []
777
+
778
+ if not task.name:
779
+ issues.append("Task must have a name")
780
+
781
+ if not task.description:
782
+ issues.append("Task must have a description")
783
+
784
+ if not task.requirements:
785
+ issues.append("Task must have at least one requirement")
786
+
787
+ if task.state != TaskState.CREATED:
788
+ issues.append(f"Task must be in CREATED state, not {task.state.value}")
789
+
790
+ return len(issues) == 0, issues
791
+
792
+ @staticmethod
793
+ def validate_for_completion(task: A2ATask) -> Tuple[bool, List[str]]:
794
+ """
795
+ Validate task is ready for completion.
796
+
797
+ Returns (is_valid, list_of_issues).
798
+ """
799
+ issues = []
800
+
801
+ if not task.insights:
802
+ issues.append("Task must have at least one insight")
803
+
804
+ if task.current_quality_score < task.target_quality_score:
805
+ issues.append(
806
+ f"Quality score {task.current_quality_score:.2f} "
807
+ f"below target {task.target_quality_score:.2f}"
808
+ )
809
+
810
+ if task.state != TaskState.AWAITING_REVIEW:
811
+ issues.append(
812
+ f"Task must be in AWAITING_REVIEW state, not {task.state.value}"
813
+ )
814
+
815
+ # Check success criteria
816
+ # This would need more sophisticated checking in practice
817
+ if task.success_criteria:
818
+ issues.append("Success criteria validation not yet implemented")
819
+
820
+ return len(issues) == 0, issues
821
+
822
+
823
+ # Factory functions for common agent types
824
+
825
+
826
+ def create_research_agent_card(agent_id: str, agent_name: str) -> A2AAgentCard:
827
+ """Create a card for a research-focused agent."""
828
+ return A2AAgentCard(
829
+ agent_id=agent_id,
830
+ agent_name=agent_name,
831
+ agent_type="research",
832
+ version="1.0.0",
833
+ primary_capabilities=[
834
+ Capability(
835
+ name="information_retrieval",
836
+ domain="research",
837
+ level=CapabilityLevel.EXPERT,
838
+ description="Expert at finding and synthesizing information from multiple sources",
839
+ keywords=["search", "retrieval", "synthesis", "analysis"],
840
+ examples=[
841
+ "literature review",
842
+ "market research",
843
+ "competitive analysis",
844
+ ],
845
+ ),
846
+ Capability(
847
+ name="data_analysis",
848
+ domain="research",
849
+ level=CapabilityLevel.ADVANCED,
850
+ description="Analyzes complex datasets to extract insights",
851
+ keywords=["statistics", "patterns", "trends", "visualization"],
852
+ ),
853
+ ],
854
+ secondary_capabilities=[
855
+ Capability(
856
+ name="report_generation",
857
+ domain="documentation",
858
+ level=CapabilityLevel.ADVANCED,
859
+ description="Creates comprehensive research reports",
860
+ keywords=["writing", "documentation", "summaries"],
861
+ ),
862
+ ],
863
+ collaboration_style=CollaborationStyle.COOPERATIVE,
864
+ description="Specialized in comprehensive research and information synthesis",
865
+ tags=["research", "analysis", "documentation"],
866
+ )
867
+
868
+
869
+ def create_coding_agent_card(agent_id: str, agent_name: str) -> A2AAgentCard:
870
+ """Create a card for a coding-focused agent."""
871
+ return A2AAgentCard(
872
+ agent_id=agent_id,
873
+ agent_name=agent_name,
874
+ agent_type="coding",
875
+ version="1.0.0",
876
+ primary_capabilities=[
877
+ Capability(
878
+ name="code_generation",
879
+ domain="software_development",
880
+ level=CapabilityLevel.EXPERT,
881
+ description="Generates high-quality code in multiple languages",
882
+ keywords=["python", "javascript", "java", "implementation"],
883
+ examples=["API implementation", "algorithm design", "refactoring"],
884
+ ),
885
+ Capability(
886
+ name="debugging",
887
+ domain="software_development",
888
+ level=CapabilityLevel.ADVANCED,
889
+ description="Identifies and fixes bugs in complex codebases",
890
+ keywords=["troubleshooting", "error", "fix", "debug"],
891
+ ),
892
+ ],
893
+ secondary_capabilities=[
894
+ Capability(
895
+ name="code_review",
896
+ domain="software_development",
897
+ level=CapabilityLevel.ADVANCED,
898
+ description="Reviews code for quality, security, and best practices",
899
+ keywords=["review", "quality", "standards", "security"],
900
+ ),
901
+ ],
902
+ collaboration_style=CollaborationStyle.INDEPENDENT,
903
+ description="Expert software developer focused on code quality and implementation",
904
+ tags=["coding", "development", "debugging"],
905
+ )
906
+
907
+
908
+ def create_qa_agent_card(agent_id: str, agent_name: str) -> A2AAgentCard:
909
+ """Create a card for a QA/testing-focused agent."""
910
+ return A2AAgentCard(
911
+ agent_id=agent_id,
912
+ agent_name=agent_name,
913
+ agent_type="qa_testing",
914
+ version="1.0.0",
915
+ primary_capabilities=[
916
+ Capability(
917
+ name="test_design",
918
+ domain="quality_assurance",
919
+ level=CapabilityLevel.EXPERT,
920
+ description="Designs comprehensive test scenarios and edge cases",
921
+ keywords=["testing", "scenarios", "edge cases", "coverage"],
922
+ examples=["integration tests", "security tests", "performance tests"],
923
+ ),
924
+ Capability(
925
+ name="bug_detection",
926
+ domain="quality_assurance",
927
+ level=CapabilityLevel.EXPERT,
928
+ description="Identifies defects and quality issues systematically",
929
+ keywords=["bugs", "defects", "issues", "validation"],
930
+ ),
931
+ ],
932
+ collaboration_style=CollaborationStyle.SUPPORT,
933
+ description="Quality assurance specialist focused on comprehensive testing",
934
+ tags=["qa", "testing", "quality", "validation"],
935
+ )
936
+
937
+
938
+ def create_research_task(
939
+ name: str,
940
+ description: str,
941
+ requirements: List[str],
942
+ priority: TaskPriority = TaskPriority.MEDIUM,
943
+ ) -> A2ATask:
944
+ """Create a research-oriented task."""
945
+ return A2ATask(
946
+ name=name,
947
+ description=description,
948
+ requirements=requirements,
949
+ priority=priority,
950
+ tags=["research", "analysis"],
951
+ target_quality_score=0.85,
952
+ max_iterations=3,
953
+ )
954
+
955
+
956
+ def create_implementation_task(
957
+ name: str,
958
+ description: str,
959
+ requirements: List[str],
960
+ priority: TaskPriority = TaskPriority.HIGH,
961
+ ) -> A2ATask:
962
+ """Create an implementation-oriented task."""
963
+ return A2ATask(
964
+ name=name,
965
+ description=description,
966
+ requirements=requirements,
967
+ priority=priority,
968
+ tags=["implementation", "coding"],
969
+ target_quality_score=0.90,
970
+ max_iterations=2,
971
+ )
972
+
973
+
974
+ def create_validation_task(
975
+ name: str,
976
+ description: str,
977
+ requirements: List[str],
978
+ parent_task_id: str,
979
+ ) -> A2ATask:
980
+ """Create a validation/testing task."""
981
+ return A2ATask(
982
+ name=name,
983
+ description=description,
984
+ requirements=requirements,
985
+ priority=TaskPriority.HIGH,
986
+ parent_task_id=parent_task_id,
987
+ tags=["validation", "testing"],
988
+ target_quality_score=0.95,
989
+ max_iterations=1,
990
+ )
991
+
992
+
993
+ # ============================================================================
994
+ # END OF ENHANCED A2A COMPONENTS
995
+ # ============================================================================
996
+
23
997
 
24
998
  @register_node()
25
999
  class SharedMemoryPoolNode(Node):
@@ -1043,9 +2017,68 @@ Focus on actionable intelligence rather than just listing what each agent said."
1043
2017
  agent_id: str,
1044
2018
  original_kwargs: Dict[str, Any],
1045
2019
  ) -> List[Dict[str, Any]]:
1046
- """Use LLM to extract and analyze insights from the response."""
2020
+ """
2021
+ Multi-stage LLM pipeline for high-quality insight extraction.
2022
+
2023
+ This enhanced method implements a 6-stage pipeline as per the A2A enhancement plan:
2024
+ 1. Primary extraction with structured output
2025
+ 2. Novelty detection against memory pool
2026
+ 3. Quality enhancement and validation
2027
+ 4. Cross-model validation for reliability (if enabled)
2028
+ 5. Impact scoring and ranking
2029
+ 6. Meta-insight synthesis
2030
+ """
1047
2031
 
1048
- # Prepare a focused prompt for insight extraction
2032
+ # Stage 1: Primary LLM extraction with structured output
2033
+ primary_insights = self._stage1_primary_extraction(
2034
+ response, agent_role, original_kwargs
2035
+ )
2036
+
2037
+ if not primary_insights:
2038
+ # Fallback to rule-based extraction
2039
+ return self._extract_insights(response, agent_role)
2040
+
2041
+ # Stage 2: Novelty detection against memory pool
2042
+ memory_pool = original_kwargs.get("memory_pool")
2043
+ if memory_pool:
2044
+ primary_insights = self._stage2_novelty_detection(
2045
+ primary_insights, agent_id, memory_pool
2046
+ )
2047
+
2048
+ # Stage 3: Quality enhancement and validation
2049
+ enhanced_insights = self._stage3_quality_enhancement(
2050
+ primary_insights, agent_role, original_kwargs
2051
+ )
2052
+
2053
+ # Stage 4: Cross-model validation (optional, based on settings)
2054
+ if original_kwargs.get("enable_cross_validation", False):
2055
+ enhanced_insights = self._stage4_cross_model_validation(
2056
+ enhanced_insights, original_kwargs
2057
+ )
2058
+
2059
+ # Stage 5: Impact scoring and ranking
2060
+ scored_insights = self._stage5_impact_scoring(
2061
+ enhanced_insights, agent_role, original_kwargs
2062
+ )
2063
+
2064
+ # Stage 6: Meta-insight synthesis (if multiple high-quality insights)
2065
+ if len(scored_insights) >= 3:
2066
+ meta_insights = self._stage6_meta_insight_synthesis(
2067
+ scored_insights, agent_role, original_kwargs
2068
+ )
2069
+ scored_insights.extend(meta_insights)
2070
+
2071
+ # Sort by quality and return top insights
2072
+ scored_insights.sort(
2073
+ key=lambda x: x.get("quality_score", x.get("importance", 0.5)), reverse=True
2074
+ )
2075
+
2076
+ return scored_insights[:5] # Return top 5 insights
2077
+
2078
+ def _stage1_primary_extraction(
2079
+ self, response: str, agent_role: str, kwargs: Dict[str, Any]
2080
+ ) -> List[Dict[str, Any]]:
2081
+ """Stage 1: Primary LLM extraction with structured output."""
1049
2082
  insight_extraction_prompt = f"""You are an AI insight extraction specialist. Analyze the following response and extract the most important insights.
1050
2083
 
1051
2084
  Agent Role: {agent_role}
@@ -1054,33 +2087,32 @@ Original Response:
1054
2087
 
1055
2088
  Extract 3-5 key insights from this response. For each insight:
1056
2089
  1. Summarize the core finding or conclusion (max 100 words)
1057
- 2. Assign an importance score (0.0-1.0) based on:
1058
- - Novelty and uniqueness (0.3 weight)
1059
- - Impact on decision-making (0.4 weight)
1060
- - Supporting evidence quality (0.3 weight)
1061
- 3. Categorize the insight type: finding, conclusion, comparison, recommendation, problem, metric, or pattern
2090
+ 2. Assign a confidence score (0.0-1.0) based on evidence strength
2091
+ 3. Categorize the insight type: discovery, analysis, recommendation, warning, opportunity, pattern, or anomaly
1062
2092
  4. Extract key entities mentioned (products, technologies, metrics, etc.)
1063
- 5. Suggest relevant tags for categorization
2093
+ 5. Identify the actionability level (0.0-1.0) - how easy is it to act on this insight?
2094
+ 6. Note any prerequisites or dependencies
1064
2095
 
1065
2096
  Output your analysis as a JSON array with this structure:
1066
2097
  [
1067
2098
  {{
1068
2099
  "content": "The core insight summarized concisely",
1069
- "importance": 0.85,
1070
- "type": "finding",
1071
- "entities": ["MacBook Air M3", "M2", "battery life"],
1072
- "tags": ["performance", "comparison", "hardware"],
1073
- "evidence": "Brief supporting evidence from the text"
2100
+ "confidence": 0.85,
2101
+ "type": "discovery",
2102
+ "entities": ["Entity1", "Entity2"],
2103
+ "actionability": 0.7,
2104
+ "prerequisites": ["Need access to X", "Requires Y"],
2105
+ "evidence": "Brief supporting evidence from the text",
2106
+ "keywords": ["keyword1", "keyword2"]
1074
2107
  }}
1075
2108
  ]
1076
2109
 
1077
2110
  Focus on insights that would be valuable for other agents to know. Ensure the JSON is valid."""
1078
2111
 
1079
2112
  try:
1080
- # Create a sub-call to the LLM for insight extraction
1081
2113
  extraction_kwargs = {
1082
- "provider": original_kwargs.get("provider", "ollama"),
1083
- "model": original_kwargs.get("model", "mistral"),
2114
+ "provider": kwargs.get("provider", "ollama"),
2115
+ "model": kwargs.get("model", "mistral"),
1084
2116
  "temperature": 0.3, # Lower temperature for more focused extraction
1085
2117
  "messages": [
1086
2118
  {
@@ -1089,10 +2121,9 @@ Focus on insights that would be valuable for other agents to know. Ensure the JS
1089
2121
  },
1090
2122
  {"role": "user", "content": insight_extraction_prompt},
1091
2123
  ],
1092
- "max_tokens": original_kwargs.get("max_tokens", 1000),
2124
+ "max_tokens": kwargs.get("max_tokens", 1000),
1093
2125
  }
1094
2126
 
1095
- # Execute LLM call for insight extraction
1096
2127
  extraction_result = super().run(**extraction_kwargs)
1097
2128
 
1098
2129
  if extraction_result.get("success"):
@@ -1100,45 +2131,367 @@ Focus on insights that would be valuable for other agents to know. Ensure the JS
1100
2131
  "content", ""
1101
2132
  )
1102
2133
 
1103
- # Parse the JSON response
2134
+ # Parse JSON response
1104
2135
  import json
1105
2136
  import re
1106
2137
 
1107
- # Try to extract JSON from the response
1108
2138
  json_match = re.search(r"\[.*?\]", extracted_content, re.DOTALL)
1109
2139
  if json_match:
1110
2140
  try:
1111
2141
  extracted_insights = json.loads(json_match.group())
1112
2142
 
1113
- # Convert to our insight format
2143
+ # Convert to enhanced format
1114
2144
  insights = []
1115
- for item in extracted_insights[:5]: # Limit to 5 insights
2145
+ for item in extracted_insights[:5]:
1116
2146
  insight = {
1117
2147
  "content": item.get("content", ""),
1118
- "importance": min(
1119
- max(item.get("importance", 0.5), 0.0), 1.0
2148
+ "confidence": item.get("confidence", 0.5),
2149
+ "insight_type": InsightType(
2150
+ item.get("type", "analysis").upper()
2151
+ if item.get("type", "").upper()
2152
+ in [e.value.upper() for e in InsightType]
2153
+ else "ANALYSIS"
1120
2154
  ),
1121
- "tags": item.get("tags", []) + [agent_role],
1122
- "segment": f"{agent_role}_{item.get('type', 'general')}",
1123
- "metadata": {
1124
- "insight_type": item.get("type", "general"),
1125
- "extracted_entities": item.get("entities", []),
1126
- "evidence": item.get("evidence", ""),
1127
- "llm_extracted": True,
1128
- },
2155
+ "entities": item.get("entities", []),
2156
+ "actionability_score": item.get("actionability", 0.5),
2157
+ "prerequisites": item.get("prerequisites", []),
2158
+ "evidence": item.get("evidence", ""),
2159
+ "keywords": item.get("keywords", []),
2160
+ "stage": "primary_extraction",
1129
2161
  }
1130
2162
  insights.append(insight)
1131
2163
 
1132
2164
  return insights
1133
- except json.JSONDecodeError:
2165
+ except (json.JSONDecodeError, ValueError):
2166
+ pass
2167
+ except Exception:
2168
+ pass
2169
+
2170
+ return []
2171
+
2172
+ def _stage2_novelty_detection(
2173
+ self, insights: List[Dict[str, Any]], agent_id: str, memory_pool: Any
2174
+ ) -> List[Dict[str, Any]]:
2175
+ """Stage 2: Novelty detection against memory pool."""
2176
+ for insight in insights:
2177
+ # Search for similar insights in memory
2178
+ similar_memories = memory_pool.execute(
2179
+ action="read",
2180
+ agent_id=agent_id,
2181
+ attention_filter={
2182
+ "tags": insight.get("keywords", []),
2183
+ "window_size": 50, # Check last 50 memories
2184
+ },
2185
+ ).get("memories", [])
2186
+
2187
+ # Calculate novelty score
2188
+ novelty_score = 1.0
2189
+ for memory in similar_memories:
2190
+ # Simple similarity check (could be enhanced with embeddings)
2191
+ memory_content = memory.get("content", "").lower()
2192
+ insight_content = insight["content"].lower()
2193
+
2194
+ # Check for significant overlap
2195
+ common_words = set(memory_content.split()) & set(
2196
+ insight_content.split()
2197
+ )
2198
+ if len(common_words) > len(insight_content.split()) * 0.5:
2199
+ novelty_score *= 0.7 # Reduce novelty if similar exists
2200
+
2201
+ insight["novelty_score"] = max(novelty_score, 0.1)
2202
+ insight["similar_insights_count"] = len(similar_memories)
2203
+
2204
+ return insights
2205
+
2206
+ def _stage3_quality_enhancement(
2207
+ self, insights: List[Dict[str, Any]], agent_role: str, kwargs: Dict[str, Any]
2208
+ ) -> List[Dict[str, Any]]:
2209
+ """Stage 3: Quality enhancement and validation."""
2210
+ if not insights:
2211
+ return insights
2212
+
2213
+ # Create a consolidated prompt for quality enhancement
2214
+ insights_text = "\n".join(
2215
+ [
2216
+ f"{i+1}. {insight['content']} (Confidence: {insight['confidence']:.2f})"
2217
+ for i, insight in enumerate(insights)
2218
+ ]
2219
+ )
2220
+
2221
+ enhancement_prompt = f"""As a quality assurance specialist, enhance these insights:
2222
+
2223
+ Agent Role: {agent_role}
2224
+ Raw Insights:
2225
+ {insights_text}
2226
+
2227
+ For each insight:
2228
+ 1. Clarify any ambiguous statements
2229
+ 2. Add specific metrics or quantities where possible
2230
+ 3. Identify potential impacts (business, technical, strategic)
2231
+ 4. Suggest follow-up actions
2232
+ 5. Rate the overall quality (0.0-1.0)
2233
+
2234
+ Respond with a JSON array matching the input order:
2235
+ [
2236
+ {{
2237
+ "enhanced_content": "Clearer, more specific version of the insight",
2238
+ "impact": "Description of potential impact",
2239
+ "follow_up_actions": ["Action 1", "Action 2"],
2240
+ "quality_score": 0.85
2241
+ }}
2242
+ ]"""
2243
+
2244
+ try:
2245
+ enhancement_kwargs = {
2246
+ "provider": kwargs.get("provider", "ollama"),
2247
+ "model": kwargs.get("model", "mistral"),
2248
+ "temperature": 0.2, # Even lower for enhancement
2249
+ "messages": [
2250
+ {
2251
+ "role": "system",
2252
+ "content": "You are an expert at enhancing and clarifying insights. Always respond with valid JSON.",
2253
+ },
2254
+ {"role": "user", "content": enhancement_prompt},
2255
+ ],
2256
+ "max_tokens": 800,
2257
+ }
2258
+
2259
+ enhancement_result = super().run(**enhancement_kwargs)
2260
+
2261
+ if enhancement_result.get("success"):
2262
+ enhanced_content = enhancement_result.get("response", {}).get(
2263
+ "content", ""
2264
+ )
2265
+
2266
+ import json
2267
+ import re
2268
+
2269
+ json_match = re.search(r"\[.*?\]", enhanced_content, re.DOTALL)
2270
+ if json_match:
2271
+ try:
2272
+ enhancements = json.loads(json_match.group())
2273
+
2274
+ # Merge enhancements with original insights
2275
+ for i, enhancement in enumerate(enhancements[: len(insights)]):
2276
+ if enhancement.get("enhanced_content"):
2277
+ insights[i]["content"] = enhancement["enhanced_content"]
2278
+ insights[i]["impact_score"] = enhancement.get(
2279
+ "quality_score", 0.5
2280
+ )
2281
+ insights[i]["impact_description"] = enhancement.get(
2282
+ "impact", ""
2283
+ )
2284
+ insights[i]["follow_up_actions"] = enhancement.get(
2285
+ "follow_up_actions", []
2286
+ )
2287
+ insights[i]["stage"] = "quality_enhanced"
2288
+ except (json.JSONDecodeError, ValueError):
1134
2289
  pass
2290
+ except Exception:
2291
+ pass
2292
+
2293
+ return insights
2294
+
2295
+ def _stage4_cross_model_validation(
2296
+ self, insights: List[Dict[str, Any]], kwargs: Dict[str, Any]
2297
+ ) -> List[Dict[str, Any]]:
2298
+ """Stage 4: Cross-model validation for reliability (optional)."""
2299
+ # This would validate insights using a different model
2300
+ # For now, we'll simulate by adjusting confidence based on consistency
1135
2301
 
2302
+ alternate_model = kwargs.get("validation_model", kwargs.get("model"))
2303
+ if alternate_model == kwargs.get("model"):
2304
+ # Same model, skip validation
2305
+ return insights
2306
+
2307
+ # In a real implementation, we would re-validate with alternate model
2308
+ # For now, apply a validation factor
2309
+ for insight in insights:
2310
+ insight["cross_validated"] = True
2311
+ insight["confidence"] *= 0.95 # Slight confidence adjustment
2312
+
2313
+ return insights
2314
+
2315
+ def _stage5_impact_scoring(
2316
+ self, insights: List[Dict[str, Any]], agent_role: str, kwargs: Dict[str, Any]
2317
+ ) -> List[Dict[str, Any]]:
2318
+ """Stage 5: Impact scoring and ranking."""
2319
+ for insight in insights:
2320
+ # Calculate comprehensive quality score
2321
+ confidence = insight.get("confidence", 0.5)
2322
+ novelty = insight.get("novelty_score", 0.5)
2323
+ actionability = insight.get("actionability_score", 0.5)
2324
+ impact = insight.get("impact_score", 0.5)
2325
+
2326
+ # Weighted quality score
2327
+ quality_score = (
2328
+ confidence * 0.3 + novelty * 0.3 + actionability * 0.3 + impact * 0.1
2329
+ )
2330
+
2331
+ # Convert to final format
2332
+ insight_obj = Insight(
2333
+ content=insight["content"],
2334
+ insight_type=insight.get("insight_type", InsightType.ANALYSIS),
2335
+ confidence=confidence,
2336
+ novelty_score=novelty,
2337
+ actionability_score=actionability,
2338
+ impact_score=impact,
2339
+ generated_by=kwargs.get("agent_id", ""),
2340
+ keywords=insight.get("keywords", []),
2341
+ evidence=(
2342
+ [{"text": insight.get("evidence", "")}]
2343
+ if insight.get("evidence")
2344
+ else []
2345
+ ),
2346
+ )
2347
+
2348
+ # Add to format expected by memory pool
2349
+ formatted_insight = {
2350
+ "content": insight_obj.content,
2351
+ "importance": quality_score,
2352
+ "quality_score": quality_score,
2353
+ "tags": insight.get("keywords", []) + [agent_role],
2354
+ "segment": f"{agent_role}_{insight_obj.insight_type.value}",
2355
+ "metadata": {
2356
+ "insight_type": insight_obj.insight_type.value,
2357
+ "extracted_entities": insight.get("entities", []),
2358
+ "evidence": insight.get("evidence", ""),
2359
+ "llm_extracted": True,
2360
+ "multi_stage_pipeline": True,
2361
+ "stages_completed": insight.get("stage", "unknown"),
2362
+ "novelty_score": novelty,
2363
+ "actionability_score": actionability,
2364
+ "impact_score": impact,
2365
+ "quality_score": quality_score,
2366
+ },
2367
+ }
2368
+
2369
+ # Copy over the insight object for reference
2370
+ formatted_insight["insight_object"] = insight_obj
2371
+
2372
+ insights[insights.index(insight)] = formatted_insight
2373
+
2374
+ return insights
2375
+
2376
+ def _stage6_meta_insight_synthesis(
2377
+ self, insights: List[Dict[str, Any]], agent_role: str, kwargs: Dict[str, Any]
2378
+ ) -> List[Dict[str, Any]]:
2379
+ """Stage 6: Meta-insight synthesis from multiple insights."""
2380
+ if len(insights) < 3:
2381
+ return []
2382
+
2383
+ # Create synthesis prompt
2384
+ insights_summary = "\n".join(
2385
+ [
2386
+ f"- {insight['content']} (Quality: {insight.get('quality_score', 0.5):.2f})"
2387
+ for insight in insights[:5]
2388
+ ]
2389
+ )
2390
+
2391
+ synthesis_prompt = f"""Analyze these insights collectively to identify meta-patterns:
2392
+
2393
+ Agent Role: {agent_role}
2394
+ Individual Insights:
2395
+ {insights_summary}
2396
+
2397
+ Identify:
2398
+ 1. Common themes or patterns across insights
2399
+ 2. Potential synergies or connections
2400
+ 3. Contradictions or tensions
2401
+ 4. Emergent conclusions from the collective insights
2402
+
2403
+ Provide 1-2 meta-insights that capture higher-level understanding.
2404
+
2405
+ Respond with JSON:
2406
+ [
2407
+ {{
2408
+ "meta_insight": "Higher-level insight derived from patterns",
2409
+ "supporting_insights": [1, 2, 3],
2410
+ "insight_type": "pattern",
2411
+ "confidence": 0.8
2412
+ }}
2413
+ ]"""
2414
+
2415
+ try:
2416
+ synthesis_kwargs = {
2417
+ "provider": kwargs.get("provider", "ollama"),
2418
+ "model": kwargs.get("model", "mistral"),
2419
+ "temperature": 0.4, # Slightly higher for creative synthesis
2420
+ "messages": [
2421
+ {
2422
+ "role": "system",
2423
+ "content": "You are an expert at identifying patterns and synthesizing meta-insights.",
2424
+ },
2425
+ {"role": "user", "content": synthesis_prompt},
2426
+ ],
2427
+ "max_tokens": 500,
2428
+ }
2429
+
2430
+ synthesis_result = super().run(**synthesis_kwargs)
2431
+
2432
+ if synthesis_result.get("success"):
2433
+ synthetic_content = synthesis_result.get("response", {}).get(
2434
+ "content", ""
2435
+ )
2436
+
2437
+ import json
2438
+ import re
2439
+
2440
+ json_match = re.search(r"\[.*?\]", synthetic_content, re.DOTALL)
2441
+ if json_match:
2442
+ try:
2443
+ meta_insights_data = json.loads(json_match.group())
2444
+
2445
+ meta_insights = []
2446
+ for meta in meta_insights_data[:2]: # Max 2 meta-insights
2447
+ meta_insight = Insight(
2448
+ content=meta.get("meta_insight", ""),
2449
+ insight_type=InsightType.PATTERN,
2450
+ confidence=meta.get("confidence", 0.7),
2451
+ novelty_score=0.9, # Meta-insights are typically novel
2452
+ actionability_score=0.6, # May be less directly actionable
2453
+ impact_score=0.8, # But high impact
2454
+ generated_by=kwargs.get("agent_id", ""),
2455
+ keywords=["meta-insight", "synthesis", agent_role],
2456
+ )
2457
+
2458
+ # Track which insights it builds on
2459
+ supporting_indices = meta.get("supporting_insights", [])
2460
+ if supporting_indices:
2461
+ meta_insight.builds_on = [
2462
+ insights[i - 1]
2463
+ .get("insight_object", Insight())
2464
+ .insight_id
2465
+ for i in supporting_indices
2466
+ if 0 < i <= len(insights)
2467
+ ]
2468
+
2469
+ formatted_meta = {
2470
+ "content": meta_insight.content,
2471
+ "importance": meta_insight.quality_score,
2472
+ "quality_score": meta_insight.quality_score,
2473
+ "tags": ["meta-insight", "synthesis"] + [agent_role],
2474
+ "segment": f"{agent_role}_meta_pattern",
2475
+ "metadata": {
2476
+ "insight_type": "meta_pattern",
2477
+ "is_meta_insight": True,
2478
+ "supporting_insights": supporting_indices,
2479
+ "llm_extracted": True,
2480
+ "multi_stage_pipeline": True,
2481
+ "stage": "meta_synthesis",
2482
+ },
2483
+ "insight_object": meta_insight,
2484
+ }
2485
+
2486
+ meta_insights.append(formatted_meta)
2487
+
2488
+ return meta_insights
2489
+ except (json.JSONDecodeError, ValueError):
2490
+ pass
1136
2491
  except Exception:
1137
- # Log the error but don't fail - fall back to rule-based extraction
1138
2492
  pass
1139
2493
 
1140
- # If LLM extraction fails, fall back to rule-based
1141
- return self._extract_insights(response, agent_role)
2494
+ return []
1142
2495
 
1143
2496
 
1144
2497
  @register_node()
@@ -1242,6 +2595,12 @@ class A2ACoordinatorNode(CycleAwareNode):
1242
2595
  self.task_queue = deque()
1243
2596
  self.consensus_sessions = {}
1244
2597
 
2598
+ # Enhanced features
2599
+ self.agent_cards: Dict[str, A2AAgentCard] = {}
2600
+ self.active_tasks: Dict[str, A2ATask] = {}
2601
+ self.completed_tasks: List[A2ATask] = []
2602
+ self.task_history_limit = 100 # Keep last 100 completed tasks
2603
+
1245
2604
  def get_parameters(self) -> Dict[str, NodeParameter]:
1246
2605
  return {
1247
2606
  "action": NodeParameter(
@@ -1249,7 +2608,7 @@ class A2ACoordinatorNode(CycleAwareNode):
1249
2608
  type=str,
1250
2609
  required=False,
1251
2610
  default="coordinate",
1252
- description="Action: 'register', 'delegate', 'broadcast', 'consensus', 'coordinate'",
2611
+ description="Action: 'register', 'register_with_card', 'delegate', 'broadcast', 'consensus', 'coordinate', 'create_task', 'update_task_state', 'get_task_insights', 'match_agents_to_task'",
1253
2612
  ),
1254
2613
  "agent_info": NodeParameter(
1255
2614
  name="agent_info",
@@ -1257,12 +2616,30 @@ class A2ACoordinatorNode(CycleAwareNode):
1257
2616
  required=False,
1258
2617
  description="Information about agent (for registration)",
1259
2618
  ),
2619
+ "agent_id": NodeParameter(
2620
+ name="agent_id",
2621
+ type=str,
2622
+ required=False,
2623
+ description="Unique identifier for an agent (for register_with_card, update_task_state)",
2624
+ ),
2625
+ "agent_card": NodeParameter(
2626
+ name="agent_card",
2627
+ type=dict,
2628
+ required=False,
2629
+ description="Rich capability card for agent registration (for register_with_card action)",
2630
+ ),
1260
2631
  "task": NodeParameter(
1261
2632
  name="task",
1262
2633
  type=dict,
1263
2634
  required=False,
1264
2635
  description="Task to delegate or coordinate",
1265
2636
  ),
2637
+ "task_id": NodeParameter(
2638
+ name="task_id",
2639
+ type=str,
2640
+ required=False,
2641
+ description="ID of an existing task (for update_task_state, get_task_insights, delegate)",
2642
+ ),
1266
2643
  "message": NodeParameter(
1267
2644
  name="message",
1268
2645
  type=dict,
@@ -1289,6 +2666,67 @@ class A2ACoordinatorNode(CycleAwareNode):
1289
2666
  default="best_match",
1290
2667
  description="Strategy: 'best_match', 'round_robin', 'broadcast', 'auction'",
1291
2668
  ),
2669
+ "task_type": NodeParameter(
2670
+ name="task_type",
2671
+ type=str,
2672
+ required=False,
2673
+ default="research",
2674
+ description="Type of task to create: 'research', 'implementation', 'validation'",
2675
+ ),
2676
+ "name": NodeParameter(
2677
+ name="name",
2678
+ type=str,
2679
+ required=False,
2680
+ default="",
2681
+ description="Name of the task",
2682
+ ),
2683
+ "description": NodeParameter(
2684
+ name="description",
2685
+ type=str,
2686
+ required=False,
2687
+ default="",
2688
+ description="Description of the task",
2689
+ ),
2690
+ "requirements": NodeParameter(
2691
+ name="requirements",
2692
+ type=list,
2693
+ required=False,
2694
+ default=[],
2695
+ description="List of requirements for the task",
2696
+ ),
2697
+ "priority": NodeParameter(
2698
+ name="priority",
2699
+ type=str,
2700
+ required=False,
2701
+ default="medium",
2702
+ description="Task priority: 'low', 'medium', 'high', 'critical'",
2703
+ ),
2704
+ "new_state": NodeParameter(
2705
+ name="new_state",
2706
+ type=str,
2707
+ required=False,
2708
+ description="New state for task transition",
2709
+ ),
2710
+ "insights": NodeParameter(
2711
+ name="insights",
2712
+ type=list,
2713
+ required=False,
2714
+ default=[],
2715
+ description="List of insights to add to task",
2716
+ ),
2717
+ "min_quality": NodeParameter(
2718
+ name="min_quality",
2719
+ type=float,
2720
+ required=False,
2721
+ default=0.0,
2722
+ description="Minimum quality score for insight filtering",
2723
+ ),
2724
+ "insight_type": NodeParameter(
2725
+ name="insight_type",
2726
+ type=str,
2727
+ required=False,
2728
+ description="Type of insights to filter",
2729
+ ),
1292
2730
  }
1293
2731
 
1294
2732
  def run(self, **kwargs) -> Dict[str, Any]:
@@ -1348,13 +2786,30 @@ class A2ACoordinatorNode(CycleAwareNode):
1348
2786
  coordination_history = prev_state.get("coordination_history", [])
1349
2787
  agent_performance_history = prev_state.get("agent_performance", {})
1350
2788
 
1351
- # Execute the coordination action
1352
- if action == "register":
2789
+ # Execute the coordination action - enhanced actions first
2790
+ if action == "register_with_card":
2791
+ result = self._register_agent_with_card(kwargs, context)
2792
+ elif action == "create_task":
2793
+ result = self._create_structured_task(kwargs)
2794
+ elif action == "update_task_state":
2795
+ result = self._update_task_state(kwargs)
2796
+ elif action == "get_task_insights":
2797
+ result = self._get_task_insights(kwargs)
2798
+ elif action == "match_agents_to_task":
2799
+ result = self._match_agents_to_task(kwargs)
2800
+ # Original actions with enhancement support
2801
+ elif action == "register":
1353
2802
  result = self._register_agent(kwargs, context)
1354
2803
  elif action == "delegate":
1355
- result = self._delegate_task(
1356
- kwargs, context, coordination_history, agent_performance_history
1357
- )
2804
+ # Check if we should use enhanced delegation
2805
+ if self.agent_cards or kwargs.get("task_id") in self.active_tasks:
2806
+ result = self._enhanced_delegate_task(
2807
+ kwargs, context, coordination_history, agent_performance_history
2808
+ )
2809
+ else:
2810
+ result = self._delegate_task(
2811
+ kwargs, context, coordination_history, agent_performance_history
2812
+ )
1358
2813
  elif action == "broadcast":
1359
2814
  result = self._broadcast_message(kwargs, context)
1360
2815
  elif action == "consensus":
@@ -1427,6 +2882,7 @@ class A2ACoordinatorNode(CycleAwareNode):
1427
2882
  if not agent_id:
1428
2883
  return {"success": False, "error": "Agent ID required"}
1429
2884
 
2885
+ # Create base registration
1430
2886
  self.registered_agents[agent_id] = {
1431
2887
  "id": agent_id,
1432
2888
  "skills": agent_info.get("skills", []),
@@ -1437,6 +2893,12 @@ class A2ACoordinatorNode(CycleAwareNode):
1437
2893
  "success_rate": 1.0,
1438
2894
  }
1439
2895
 
2896
+ # Create default agent card if not exists
2897
+ if agent_id not in self.agent_cards:
2898
+ self.agent_cards[agent_id] = self._create_default_agent_card(
2899
+ agent_id, agent_info.get("skills", [])
2900
+ )
2901
+
1440
2902
  return {
1441
2903
  "success": True,
1442
2904
  "agent_id": agent_id,
@@ -1805,3 +3267,412 @@ class A2ACoordinatorNode(CycleAwareNode):
1805
3267
  return bids[0]["agent"]
1806
3268
 
1807
3269
  return None
3270
+
3271
+ # Enhanced methods to be added to A2ACoordinatorNode
3272
+
3273
+ # =========================================================================
3274
+ # ENHANCED METHODS FOR AGENT CARDS AND TASK MANAGEMENT
3275
+ # =========================================================================
3276
+
3277
+ def _register_agent_with_card(
3278
+ self, kwargs: Dict[str, Any], context: Dict[str, Any]
3279
+ ) -> Dict[str, Any]:
3280
+ """Register an agent with a rich capability card."""
3281
+ agent_id = kwargs.get("agent_id")
3282
+ card_data = kwargs.get("agent_card")
3283
+
3284
+ if not agent_id or not card_data:
3285
+ return {"success": False, "error": "agent_id and agent_card required"}
3286
+
3287
+ # Create or update agent card
3288
+ if isinstance(card_data, dict):
3289
+ card = A2AAgentCard.from_dict(card_data)
3290
+ else:
3291
+ card = card_data
3292
+
3293
+ self.agent_cards[agent_id] = card
3294
+
3295
+ # Also register with base system for compatibility
3296
+ capabilities = [cap.name for cap in card.primary_capabilities]
3297
+ self._register_agent(
3298
+ {
3299
+ "agent_info": {
3300
+ "id": agent_id,
3301
+ "skills": capabilities,
3302
+ "role": card.agent_type,
3303
+ }
3304
+ },
3305
+ context,
3306
+ )
3307
+
3308
+ return {
3309
+ "success": True,
3310
+ "agent_id": agent_id,
3311
+ "capabilities_registered": len(capabilities),
3312
+ "card_version": card.version,
3313
+ }
3314
+
3315
+ def _create_structured_task(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
3316
+ """Create a structured task with lifecycle management."""
3317
+ task_type = kwargs.get("task_type", "research")
3318
+ name = kwargs.get("name", "")
3319
+ description = kwargs.get("description", "")
3320
+ requirements = kwargs.get("requirements", [])
3321
+ priority = kwargs.get("priority", "medium")
3322
+
3323
+ # Create appropriate task type
3324
+ if task_type == "research":
3325
+ task = create_research_task(
3326
+ name=name,
3327
+ description=description,
3328
+ requirements=requirements,
3329
+ priority=TaskPriority(priority),
3330
+ )
3331
+ elif task_type == "implementation":
3332
+ task = create_implementation_task(
3333
+ name=name,
3334
+ description=description,
3335
+ requirements=requirements,
3336
+ priority=TaskPriority(priority),
3337
+ )
3338
+ elif task_type == "validation":
3339
+ parent_id = kwargs.get("parent_task_id")
3340
+ task = create_validation_task(
3341
+ name=name,
3342
+ description=description,
3343
+ requirements=requirements,
3344
+ parent_task_id=parent_id,
3345
+ )
3346
+ else:
3347
+ # Generic task
3348
+ task = A2ATask(
3349
+ name=name,
3350
+ description=description,
3351
+ requirements=requirements,
3352
+ priority=TaskPriority(priority),
3353
+ )
3354
+
3355
+ # Add any additional context
3356
+ if "context" in kwargs:
3357
+ task.context.update(kwargs["context"])
3358
+
3359
+ # Store task
3360
+ self.active_tasks[task.task_id] = task
3361
+
3362
+ return {"success": True, "task_id": task.task_id, "task": task.to_dict()}
3363
+
3364
+ def _enhanced_delegate_task(
3365
+ self,
3366
+ kwargs: Dict[str, Any],
3367
+ context: Dict[str, Any],
3368
+ coordination_history: List[Dict],
3369
+ agent_performance_history: Dict[str, Dict],
3370
+ ) -> Dict[str, Any]:
3371
+ """Enhanced delegation using agent cards for better matching."""
3372
+ task_id = kwargs.get("task_id")
3373
+ task_dict = kwargs.get("task", {})
3374
+
3375
+ # Check if this is a structured task
3376
+ if task_id and task_id in self.active_tasks:
3377
+ task = self.active_tasks[task_id]
3378
+
3379
+ # Validate task is ready
3380
+ is_valid, issues = TaskValidator.validate_for_assignment(task)
3381
+ if not is_valid:
3382
+ return {
3383
+ "success": False,
3384
+ "error": f"Task not ready for assignment: {', '.join(issues)}",
3385
+ }
3386
+
3387
+ # Find best agents using cards
3388
+ best_agents = self._find_best_agents_for_task(task)
3389
+
3390
+ if not best_agents:
3391
+ # Fall back to base delegation
3392
+ return self._delegate_task(
3393
+ kwargs, context, coordination_history, agent_performance_history
3394
+ )
3395
+
3396
+ # Assign to best agents
3397
+ task.assigned_to = [agent_id for agent_id, _ in best_agents[:3]]
3398
+ task.transition_to(TaskState.ASSIGNED)
3399
+ task.assigned_at = datetime.now()
3400
+
3401
+ # Use first agent for delegation
3402
+ return {
3403
+ "success": True,
3404
+ "delegated_to": task.assigned_to[0],
3405
+ "task_id": task.task_id,
3406
+ "match_score": best_agents[0][1],
3407
+ "state": task.state.value,
3408
+ }
3409
+
3410
+ # Not a structured task, but we can still use agent cards for better matching
3411
+ if self.agent_cards and task_dict.get("required_skills"):
3412
+ # Try to match using agent cards
3413
+ required_skills = task_dict.get("required_skills", [])
3414
+
3415
+ # Find agents that match the requirements
3416
+ matching_agents = []
3417
+ for agent_id, agent_info in self.registered_agents.items():
3418
+ if agent_info["status"] == "available" and agent_id in self.agent_cards:
3419
+ card = self.agent_cards[agent_id]
3420
+ # Check if any capability matches the required skills
3421
+ all_capabilities = list(card.primary_capabilities) + list(
3422
+ card.secondary_capabilities
3423
+ )
3424
+ for cap in all_capabilities:
3425
+ cap_keywords = getattr(cap, "keywords", [])
3426
+ for req in required_skills:
3427
+ if req.lower() in cap.name.lower() or any(
3428
+ req.lower() in kw.lower() for kw in cap_keywords
3429
+ ):
3430
+ matching_agents.append(agent_id)
3431
+ break
3432
+ else:
3433
+ continue
3434
+ break
3435
+
3436
+ if matching_agents:
3437
+ # Override available_agents with matched agents, adding card capabilities as skills
3438
+ enhanced_agents = []
3439
+ for agent_id in matching_agents:
3440
+ agent_copy = dict(self.registered_agents[agent_id])
3441
+ # Add card capabilities as skills for matching
3442
+ if agent_id in self.agent_cards:
3443
+ card = self.agent_cards[agent_id]
3444
+ card_skills = [cap.name for cap in card.primary_capabilities]
3445
+ # Merge original skills with card capabilities
3446
+ agent_copy["skills"] = list(
3447
+ set(agent_copy.get("skills", []) + card_skills)
3448
+ )
3449
+ enhanced_agents.append(agent_copy)
3450
+ kwargs["available_agents"] = enhanced_agents
3451
+
3452
+ # Use base delegation which will use the available_agents if provided
3453
+ return self._delegate_task(
3454
+ kwargs, context, coordination_history, agent_performance_history
3455
+ )
3456
+
3457
+ def _find_best_agents_for_task(self, task: A2ATask) -> List[Tuple[str, float]]:
3458
+ """Find best agents for a task using agent cards."""
3459
+ matches = []
3460
+
3461
+ for agent_id, card in self.agent_cards.items():
3462
+ # Skip if incompatible
3463
+ if task.delegated_by and not card.is_compatible_with(task.delegated_by):
3464
+ continue
3465
+
3466
+ # Calculate match score
3467
+ score = card.calculate_match_score(task.requirements)
3468
+
3469
+ # Apply collaboration style bonus
3470
+ if len(task.assigned_to) > 0:
3471
+ if card.collaboration_style == CollaborationStyle.COOPERATIVE:
3472
+ score *= 1.1
3473
+ elif card.collaboration_style == CollaborationStyle.INDEPENDENT:
3474
+ score *= 0.9
3475
+
3476
+ # Apply performance history bonus
3477
+ if card.performance.total_tasks > 10:
3478
+ score *= 0.8 + 0.2 * card.performance.success_rate
3479
+
3480
+ matches.append((agent_id, score))
3481
+
3482
+ # Sort by score descending
3483
+ matches.sort(key=lambda x: x[1], reverse=True)
3484
+
3485
+ return matches
3486
+
3487
+ def _update_task_state(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
3488
+ """Update task state and handle transitions."""
3489
+ task_id = kwargs.get("task_id")
3490
+ new_state = kwargs.get("new_state")
3491
+ insights = kwargs.get("insights", [])
3492
+ agent_id = kwargs.get("agent_id")
3493
+
3494
+ if task_id not in self.active_tasks:
3495
+ return {"success": False, "error": f"Task {task_id} not found"}
3496
+
3497
+ task = self.active_tasks[task_id]
3498
+
3499
+ # Handle state transition
3500
+ if new_state:
3501
+ success = task.transition_to(TaskState(new_state))
3502
+ if not success:
3503
+ return {
3504
+ "success": False,
3505
+ "error": f"Invalid transition from {task.state.value} to {new_state}",
3506
+ }
3507
+
3508
+ # Add insights if provided
3509
+ for insight_data in insights:
3510
+ if isinstance(insight_data, dict):
3511
+ insight = Insight(
3512
+ content=insight_data.get("content", ""),
3513
+ insight_type=InsightType(insight_data.get("type", "analysis")),
3514
+ confidence=insight_data.get("confidence", 0.0),
3515
+ novelty_score=insight_data.get("novelty_score", 0.0),
3516
+ actionability_score=insight_data.get("actionability_score", 0.0),
3517
+ impact_score=insight_data.get("impact_score", 0.0),
3518
+ generated_by=agent_id or "",
3519
+ keywords=insight_data.get("keywords", []),
3520
+ )
3521
+ else:
3522
+ insight = insight_data
3523
+
3524
+ task.add_insight(insight)
3525
+
3526
+ # Update agent performance if we have cards
3527
+ if agent_id and agent_id in self.agent_cards:
3528
+ card = self.agent_cards[agent_id]
3529
+ card.update_performance(
3530
+ {
3531
+ "success": task.state != TaskState.FAILED,
3532
+ "insights": insights,
3533
+ "quality_score": task.current_quality_score,
3534
+ }
3535
+ )
3536
+
3537
+ # Check if task needs iteration
3538
+ if task.state == TaskState.AWAITING_REVIEW and task.needs_iteration:
3539
+ return {
3540
+ "success": True,
3541
+ "task_state": task.state.value,
3542
+ "needs_iteration": True,
3543
+ "current_quality": task.current_quality_score,
3544
+ "target_quality": task.target_quality_score,
3545
+ "iteration": task.current_iteration + 1,
3546
+ }
3547
+
3548
+ # Move completed tasks to history
3549
+ if task.is_complete:
3550
+ self.completed_tasks.append(task)
3551
+ # Limit history size
3552
+ if len(self.completed_tasks) > self.task_history_limit:
3553
+ self.completed_tasks = self.completed_tasks[-self.task_history_limit :]
3554
+ del self.active_tasks[task_id]
3555
+
3556
+ return {
3557
+ "success": True,
3558
+ "task_state": task.state.value,
3559
+ "quality_score": task.current_quality_score,
3560
+ "insights_count": len(task.insights),
3561
+ }
3562
+
3563
+ def _get_task_insights(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
3564
+ """Get insights from a task."""
3565
+ task_id = kwargs.get("task_id")
3566
+ min_quality = kwargs.get("min_quality", 0.0)
3567
+ insight_type = kwargs.get("insight_type")
3568
+
3569
+ # Check active tasks
3570
+ if task_id in self.active_tasks:
3571
+ task = self.active_tasks[task_id]
3572
+ # Check completed tasks
3573
+ else:
3574
+ task = next((t for t in self.completed_tasks if t.task_id == task_id), None)
3575
+
3576
+ if not task:
3577
+ return {"success": False, "error": f"Task {task_id} not found"}
3578
+
3579
+ # Filter insights
3580
+ insights = task.insights
3581
+
3582
+ if min_quality > 0:
3583
+ insights = [i for i in insights if i.quality_score >= min_quality]
3584
+
3585
+ if insight_type:
3586
+ type_filter = InsightType(insight_type)
3587
+ insights = [i for i in insights if i.insight_type == type_filter]
3588
+
3589
+ return {
3590
+ "success": True,
3591
+ "task_id": task_id,
3592
+ "task_state": task.state.value,
3593
+ "insights": [i.to_dict() for i in insights],
3594
+ "total_insights": len(task.insights),
3595
+ "filtered_insights": len(insights),
3596
+ }
3597
+
3598
+ def _match_agents_to_task(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
3599
+ """Match agents to task requirements without delegation."""
3600
+ task_id = kwargs.get("task_id")
3601
+ requirements = kwargs.get("requirements", [])
3602
+
3603
+ # Get task if ID provided
3604
+ if task_id and task_id in self.active_tasks:
3605
+ task = self.active_tasks[task_id]
3606
+ requirements = task.requirements
3607
+ elif not requirements:
3608
+ return {
3609
+ "success": False,
3610
+ "error": "Either task_id or requirements must be provided",
3611
+ }
3612
+
3613
+ # Create temporary task for matching if needed
3614
+ if not task_id:
3615
+ task = A2ATask(requirements=requirements)
3616
+
3617
+ # Find matches
3618
+ matches = self._find_best_agents_for_task(task)
3619
+
3620
+ # Format results
3621
+ agent_matches = []
3622
+ for agent_id, score in matches[:10]: # Top 10 matches
3623
+ card = self.agent_cards[agent_id]
3624
+ agent_matches.append(
3625
+ {
3626
+ "agent_id": agent_id,
3627
+ "agent_name": card.agent_name,
3628
+ "match_score": score,
3629
+ "primary_capabilities": [
3630
+ cap.name for cap in card.primary_capabilities
3631
+ ],
3632
+ "performance": {
3633
+ "success_rate": card.performance.success_rate,
3634
+ "insight_quality": card.performance.insight_quality_score,
3635
+ },
3636
+ "collaboration_style": card.collaboration_style.value,
3637
+ }
3638
+ )
3639
+
3640
+ return {
3641
+ "success": True,
3642
+ "requirements": requirements,
3643
+ "matched_agents": agent_matches,
3644
+ "total_agents": len(self.agent_cards),
3645
+ }
3646
+
3647
+ def _create_default_agent_card(
3648
+ self, agent_id: str, capabilities: List[str]
3649
+ ) -> A2AAgentCard:
3650
+ """Create a basic agent card from capability list."""
3651
+ # Guess agent type from capabilities
3652
+ if any("research" in cap.lower() for cap in capabilities):
3653
+ return create_research_agent_card(agent_id, agent_id)
3654
+ elif any(
3655
+ "code" in cap.lower() or "implement" in cap.lower() for cap in capabilities
3656
+ ):
3657
+ return create_coding_agent_card(agent_id, agent_id)
3658
+ elif any("test" in cap.lower() or "qa" in cap.lower() for cap in capabilities):
3659
+ return create_qa_agent_card(agent_id, agent_id)
3660
+ else:
3661
+ # Generic card
3662
+ return A2AAgentCard(
3663
+ agent_id=agent_id,
3664
+ agent_name=agent_id,
3665
+ agent_type="generic",
3666
+ version="1.0.0",
3667
+ primary_capabilities=[
3668
+ Capability(
3669
+ name=cap,
3670
+ domain="general",
3671
+ level=CapabilityLevel.INTERMEDIATE,
3672
+ description=f"Capable of {cap}",
3673
+ keywords=[cap.lower()],
3674
+ )
3675
+ for cap in capabilities[:3] # Limit to 3 primary
3676
+ ],
3677
+ description=f"Agent with capabilities: {', '.join(capabilities)}",
3678
+ )