empathy-framework 3.11.0__py3-none-any.whl → 4.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,582 @@
1
+ """Orchestrated Release Preparation Workflow
2
+
3
+ Uses the meta-orchestration system to coordinate multiple validation agents
4
+ in parallel for comprehensive release readiness assessment.
5
+
6
+ This is the first production use case of the meta-orchestration system,
7
+ demonstrating parallel agent composition with quality gates.
8
+
9
+ Architecture:
10
+ - MetaOrchestrator analyzes task and selects agents
11
+ - ParallelStrategy runs validation agents simultaneously
12
+ - Quality gates enforce release standards
13
+ - Results aggregated into consolidated report
14
+
15
+ Agents:
16
+ - Security Auditor: Vulnerability scan and compliance check
17
+ - Test Coverage Analyzer: Gap analysis and coverage validation
18
+ - Code Quality Reviewer: Code review and best practices
19
+ - Documentation Writer: Documentation completeness check
20
+
21
+ Quality Gates:
22
+ - No critical security issues
23
+ - Test coverage ≥ 80%
24
+ - Code quality score ≥ 7
25
+ - Documentation coverage ≥ 100%
26
+
27
+ Example:
28
+ >>> workflow = OrchestCreatedReleasePrepWorkflow()
29
+ >>> result = await workflow.execute(path=".")
30
+ >>> print(result.approved)
31
+ True
32
+
33
+ Copyright 2025 Smart-AI-Memory
34
+ Licensed under Fair Source License 0.9
35
+ """
36
+
37
+ import asyncio
38
+ import logging
39
+ from dataclasses import dataclass, field
40
+ from datetime import datetime
41
+ from typing import Any
42
+
43
+ from ..orchestration.agent_templates import AgentTemplate, get_template
44
+ from ..orchestration.execution_strategies import ParallelStrategy, StrategyResult
45
+ from ..orchestration.meta_orchestrator import MetaOrchestrator
46
+
47
+ logger = logging.getLogger(__name__)
48
+
49
+
50
+ @dataclass
51
+ class QualityGate:
52
+ """Quality gate threshold for release readiness.
53
+
54
+ Attributes:
55
+ name: Gate identifier (e.g., "security", "coverage")
56
+ threshold: Minimum acceptable value
57
+ actual: Actual measured value
58
+ passed: Whether gate passed
59
+ critical: Whether failure blocks release
60
+ message: Human-readable status message
61
+ """
62
+
63
+ name: str
64
+ threshold: float
65
+ actual: float = 0.0
66
+ passed: bool = False
67
+ critical: bool = True
68
+ message: str = ""
69
+
70
+ def __post_init__(self):
71
+ """Validate and compute pass/fail status."""
72
+ if not self.name:
73
+ raise ValueError("name must be non-empty")
74
+ if self.threshold < 0:
75
+ raise ValueError("threshold must be non-negative")
76
+
77
+ # Note: passed field is computed externally based on gate semantics
78
+ # (some gates use >=, others use <=)
79
+
80
+ # Generate message if not provided
81
+ if not self.message:
82
+ status = "✅ PASS" if self.passed else "❌ FAIL"
83
+ self.message = (
84
+ f"{self.name}: {status} "
85
+ f"(actual: {self.actual:.1f}, threshold: {self.threshold:.1f})"
86
+ )
87
+
88
+
89
+ @dataclass
90
+ class ReleaseReadinessReport:
91
+ """Consolidated release readiness assessment.
92
+
93
+ Attributes:
94
+ approved: Overall release approval status
95
+ confidence: Confidence level ("high", "medium", "low")
96
+ quality_gates: List of quality gate results
97
+ agent_results: Individual agent outputs
98
+ blockers: Critical issues blocking release
99
+ warnings: Non-critical issues to address
100
+ summary: Executive summary of readiness
101
+ timestamp: Report generation time
102
+ total_duration: Total execution time in seconds
103
+ """
104
+
105
+ approved: bool
106
+ confidence: str
107
+ quality_gates: list[QualityGate] = field(default_factory=list)
108
+ agent_results: dict[str, dict] = field(default_factory=dict)
109
+ blockers: list[str] = field(default_factory=list)
110
+ warnings: list[str] = field(default_factory=list)
111
+ summary: str = ""
112
+ timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
113
+ total_duration: float = 0.0
114
+
115
+ def to_dict(self) -> dict[str, Any]:
116
+ """Convert report to dictionary format.
117
+
118
+ Returns:
119
+ Dictionary representation suitable for JSON serialization
120
+ """
121
+ return {
122
+ "approved": self.approved,
123
+ "confidence": self.confidence,
124
+ "quality_gates": [
125
+ {
126
+ "name": gate.name,
127
+ "threshold": gate.threshold,
128
+ "actual": gate.actual,
129
+ "passed": gate.passed,
130
+ "critical": gate.critical,
131
+ "message": gate.message,
132
+ }
133
+ for gate in self.quality_gates
134
+ ],
135
+ "agent_results": self.agent_results,
136
+ "blockers": self.blockers,
137
+ "warnings": self.warnings,
138
+ "summary": self.summary,
139
+ "timestamp": self.timestamp,
140
+ "total_duration": self.total_duration,
141
+ }
142
+
143
+ def format_console_output(self) -> str:
144
+ """Format report for console display.
145
+
146
+ Returns:
147
+ Human-readable formatted report
148
+ """
149
+ lines = []
150
+
151
+ # Header
152
+ lines.append("=" * 70)
153
+ lines.append("RELEASE READINESS REPORT (Meta-Orchestrated)")
154
+ lines.append("=" * 70)
155
+ lines.append("")
156
+
157
+ # Status
158
+ status_icon = "✅" if self.approved else "❌"
159
+ lines.append(
160
+ f"Status: {status_icon} {'READY FOR RELEASE' if self.approved else 'NOT READY'}"
161
+ )
162
+ lines.append(f"Confidence: {self.confidence.upper()}")
163
+ lines.append(f"Generated: {self.timestamp}")
164
+ lines.append(f"Duration: {self.total_duration:.2f}s")
165
+ lines.append("")
166
+
167
+ # Quality Gates
168
+ lines.append("-" * 70)
169
+ lines.append("QUALITY GATES")
170
+ lines.append("-" * 70)
171
+ for gate in self.quality_gates:
172
+ icon = "✅" if gate.passed else ("🔴" if gate.critical else "⚠️")
173
+ lines.append(f"{icon} {gate.message}")
174
+ lines.append("")
175
+
176
+ # Blockers
177
+ if self.blockers:
178
+ lines.append("-" * 70)
179
+ lines.append("🚫 RELEASE BLOCKERS")
180
+ lines.append("-" * 70)
181
+ for blocker in self.blockers:
182
+ lines.append(f" • {blocker}")
183
+ lines.append("")
184
+
185
+ # Warnings
186
+ if self.warnings:
187
+ lines.append("-" * 70)
188
+ lines.append("⚠️ WARNINGS")
189
+ lines.append("-" * 70)
190
+ for warning in self.warnings:
191
+ lines.append(f" • {warning}")
192
+ lines.append("")
193
+
194
+ # Summary
195
+ if self.summary:
196
+ lines.append("-" * 70)
197
+ lines.append("EXECUTIVE SUMMARY")
198
+ lines.append("-" * 70)
199
+ lines.append(self.summary)
200
+ lines.append("")
201
+
202
+ # Agent Results Summary
203
+ lines.append("-" * 70)
204
+ lines.append(f"AGENTS EXECUTED ({len(self.agent_results)})")
205
+ lines.append("-" * 70)
206
+ for agent_id, result in self.agent_results.items():
207
+ success = result.get("success", False)
208
+ icon = "✅" if success else "❌"
209
+ duration = result.get("duration", 0.0)
210
+ lines.append(f"{icon} {agent_id}: {duration:.2f}s")
211
+ lines.append("")
212
+
213
+ lines.append("=" * 70)
214
+
215
+ return "\n".join(lines)
216
+
217
+
218
+ class OrchestratedReleasePrepWorkflow:
219
+ """Release preparation workflow using meta-orchestration.
220
+
221
+ This workflow demonstrates the meta-orchestration system's capabilities
222
+ by coordinating multiple validation agents in parallel to assess release
223
+ readiness.
224
+
225
+ The workflow:
226
+ 1. Uses MetaOrchestrator to analyze task and select agents
227
+ 2. Executes agents in parallel using ParallelStrategy
228
+ 3. Aggregates results and enforces quality gates
229
+ 4. Produces consolidated release readiness report
230
+
231
+ Quality Gates:
232
+ - no_critical_security_issues: No critical vulnerabilities
233
+ - min_test_coverage: Test coverage ≥ 80%
234
+ - min_code_quality: Quality score ≥ 7
235
+ - complete_documentation: All public APIs documented
236
+
237
+ Example:
238
+ >>> workflow = OrchestratedReleasePrepWorkflow()
239
+ >>> report = await workflow.execute(path=".")
240
+ >>> if report.approved:
241
+ ... print("Ready for release!")
242
+ """
243
+
244
+ # Default quality gate thresholds
245
+ DEFAULT_QUALITY_GATES = {
246
+ "min_coverage": 80.0,
247
+ "min_quality_score": 7.0,
248
+ "max_critical_issues": 0.0,
249
+ "min_doc_coverage": 100.0,
250
+ }
251
+
252
+ def __init__(
253
+ self,
254
+ quality_gates: dict[str, float] | None = None,
255
+ agent_ids: list[str] | None = None,
256
+ ):
257
+ """Initialize orchestrated release prep workflow.
258
+
259
+ Args:
260
+ quality_gates: Custom quality gate thresholds
261
+ agent_ids: Specific agent IDs to use (defaults to domain defaults)
262
+
263
+ Raises:
264
+ ValueError: If quality gates are invalid
265
+ """
266
+ self.quality_gates = {**self.DEFAULT_QUALITY_GATES}
267
+ if quality_gates:
268
+ self.quality_gates.update(quality_gates)
269
+
270
+ # Validate quality gates
271
+ for name, threshold in self.quality_gates.items():
272
+ if not isinstance(threshold, int | float):
273
+ raise ValueError(f"Quality gate '{name}' must be numeric")
274
+ if threshold < 0:
275
+ raise ValueError(f"Quality gate '{name}' must be non-negative")
276
+
277
+ self.orchestrator = MetaOrchestrator()
278
+ self.agent_ids = agent_ids
279
+
280
+ logger.info(f"OrchestratedReleasePrepWorkflow initialized with gates: {self.quality_gates}")
281
+
282
+ async def execute(
283
+ self, path: str = ".", context: dict[str, Any] | None = None
284
+ ) -> ReleaseReadinessReport:
285
+ """Execute release preparation workflow.
286
+
287
+ Args:
288
+ path: Path to codebase to analyze (default: ".")
289
+ context: Additional context for agents
290
+
291
+ Returns:
292
+ ReleaseReadinessReport with consolidated results
293
+
294
+ Raises:
295
+ ValueError: If path is invalid
296
+ """
297
+ if not path or not isinstance(path, str):
298
+ raise ValueError("path must be a non-empty string")
299
+
300
+ logger.info(f"Starting orchestrated release prep for: {path}")
301
+ start_time = asyncio.get_event_loop().time()
302
+
303
+ # Prepare context
304
+ full_context = {
305
+ "path": path,
306
+ "quality_gates": self.quality_gates,
307
+ **(context or {}),
308
+ }
309
+
310
+ # Step 1: Analyze task and compose agents
311
+ task = (
312
+ "Prepare for release: validate security, test coverage, code quality, and documentation"
313
+ )
314
+ execution_plan = self.orchestrator.analyze_and_compose(task, full_context)
315
+
316
+ logger.info(
317
+ f"Execution plan: {len(execution_plan.agents)} agents, "
318
+ f"strategy={execution_plan.strategy.value}"
319
+ )
320
+
321
+ # Override agents if specific IDs provided
322
+ if self.agent_ids:
323
+ agents = []
324
+ for agent_id in self.agent_ids:
325
+ template = get_template(agent_id)
326
+ if template:
327
+ agents.append(template)
328
+ else:
329
+ logger.warning(f"Agent template not found: {agent_id}")
330
+
331
+ if not agents:
332
+ raise ValueError(f"No valid agents found from: {self.agent_ids}")
333
+
334
+ execution_plan.agents = agents
335
+
336
+ # Step 2: Execute agents in parallel
337
+ strategy = ParallelStrategy()
338
+ strategy_result = await strategy.execute(execution_plan.agents, full_context)
339
+
340
+ # Step 3: Process results and evaluate quality gates
341
+ report = await self._create_report(strategy_result, execution_plan.agents, full_context)
342
+
343
+ # Set duration
344
+ end_time = asyncio.get_event_loop().time()
345
+ report.total_duration = end_time - start_time
346
+
347
+ logger.info(
348
+ f"Release prep completed: approved={report.approved}, "
349
+ f"duration={report.total_duration:.2f}s"
350
+ )
351
+
352
+ return report
353
+
354
+ async def _create_report(
355
+ self,
356
+ strategy_result: StrategyResult,
357
+ agents: list[AgentTemplate],
358
+ context: dict[str, Any],
359
+ ) -> ReleaseReadinessReport:
360
+ """Create consolidated release readiness report.
361
+
362
+ Args:
363
+ strategy_result: Results from parallel execution
364
+ agents: Agents that were executed
365
+ context: Execution context
366
+
367
+ Returns:
368
+ ReleaseReadinessReport with all findings
369
+ """
370
+ # Extract agent results
371
+ agent_results: dict[str, dict] = {}
372
+ for result in strategy_result.outputs:
373
+ agent_results[result.agent_id] = {
374
+ "success": result.success,
375
+ "output": result.output,
376
+ "confidence": result.confidence,
377
+ "duration": result.duration_seconds,
378
+ "error": result.error,
379
+ }
380
+
381
+ # Evaluate quality gates
382
+ quality_gates = self._evaluate_quality_gates(agent_results)
383
+
384
+ # Identify blockers and warnings
385
+ blockers, warnings = self._identify_issues(quality_gates, agent_results)
386
+
387
+ # Determine approval
388
+ critical_failures = [g for g in quality_gates if g.critical and not g.passed]
389
+ approved = len(critical_failures) == 0 and len(blockers) == 0
390
+
391
+ # Determine confidence
392
+ if approved and len(warnings) == 0:
393
+ confidence = "high"
394
+ elif approved:
395
+ confidence = "medium"
396
+ else:
397
+ confidence = "low"
398
+
399
+ # Generate summary
400
+ summary = self._generate_summary(approved, quality_gates, agent_results)
401
+
402
+ return ReleaseReadinessReport(
403
+ approved=approved,
404
+ confidence=confidence,
405
+ quality_gates=quality_gates,
406
+ agent_results=agent_results,
407
+ blockers=blockers,
408
+ warnings=warnings,
409
+ summary=summary,
410
+ total_duration=strategy_result.total_duration,
411
+ )
412
+
413
+ def _evaluate_quality_gates(self, agent_results: dict[str, dict]) -> list[QualityGate]:
414
+ """Evaluate all quality gates based on agent results.
415
+
416
+ Args:
417
+ agent_results: Results from all agents
418
+
419
+ Returns:
420
+ List of QualityGate results
421
+ """
422
+ gates = []
423
+
424
+ # Security gate: no critical issues
425
+ security_result = agent_results.get("security_auditor", {}).get("output", {})
426
+ critical_issues = security_result.get("critical_issues", 0)
427
+
428
+ gates.append(
429
+ QualityGate(
430
+ name="Security",
431
+ threshold=self.quality_gates["max_critical_issues"],
432
+ actual=float(critical_issues),
433
+ critical=True,
434
+ passed=critical_issues <= self.quality_gates["max_critical_issues"],
435
+ )
436
+ )
437
+
438
+ # Coverage gate: minimum test coverage
439
+ coverage_result = agent_results.get("test_coverage_analyzer", {}).get("output", {})
440
+ coverage_percent = coverage_result.get("coverage_percent", 0.0)
441
+
442
+ gates.append(
443
+ QualityGate(
444
+ name="Test Coverage",
445
+ threshold=self.quality_gates["min_coverage"],
446
+ actual=coverage_percent,
447
+ passed=coverage_percent >= self.quality_gates["min_coverage"],
448
+ critical=True,
449
+ )
450
+ )
451
+
452
+ # Quality gate: minimum code quality score
453
+ quality_result = agent_results.get("code_reviewer", {}).get("output", {})
454
+ quality_score = quality_result.get("quality_score", 0.0)
455
+
456
+ gates.append(
457
+ QualityGate(
458
+ name="Code Quality",
459
+ threshold=self.quality_gates["min_quality_score"],
460
+ actual=quality_score,
461
+ passed=quality_score >= self.quality_gates["min_quality_score"],
462
+ critical=True,
463
+ )
464
+ )
465
+
466
+ # Documentation gate: completeness
467
+ docs_result = agent_results.get("documentation_writer", {}).get("output", {})
468
+ doc_coverage = docs_result.get("coverage_percent", 0.0)
469
+
470
+ gates.append(
471
+ QualityGate(
472
+ name="Documentation",
473
+ threshold=self.quality_gates["min_doc_coverage"],
474
+ actual=doc_coverage,
475
+ passed=doc_coverage >= self.quality_gates["min_doc_coverage"],
476
+ critical=False, # Non-critical - warning only
477
+ )
478
+ )
479
+
480
+ return gates
481
+
482
+ def _identify_issues(
483
+ self, quality_gates: list[QualityGate], agent_results: dict[str, dict]
484
+ ) -> tuple[list[str], list[str]]:
485
+ """Identify blockers and warnings from quality gates and agent results.
486
+
487
+ Args:
488
+ quality_gates: Evaluated quality gates
489
+ agent_results: Agent execution results
490
+
491
+ Returns:
492
+ Tuple of (blockers, warnings)
493
+ """
494
+ blockers = []
495
+ warnings = []
496
+
497
+ # Check quality gates
498
+ for gate in quality_gates:
499
+ if not gate.passed:
500
+ if gate.critical:
501
+ blockers.append(f"{gate.name} failed: {gate.message}")
502
+ else:
503
+ warnings.append(f"{gate.name} below threshold: {gate.message}")
504
+
505
+ # Check agent errors
506
+ for agent_id, result in agent_results.items():
507
+ if not result["success"]:
508
+ error = result.get("error", "Unknown error")
509
+ blockers.append(f"Agent {agent_id} failed: {error}")
510
+
511
+ return blockers, warnings
512
+
513
+ def _generate_summary(
514
+ self,
515
+ approved: bool,
516
+ quality_gates: list[QualityGate],
517
+ agent_results: dict[str, dict],
518
+ ) -> str:
519
+ """Generate executive summary of release readiness.
520
+
521
+ Args:
522
+ approved: Overall approval status
523
+ quality_gates: Quality gate results
524
+ agent_results: Agent execution results
525
+
526
+ Returns:
527
+ Executive summary text
528
+ """
529
+ lines = []
530
+
531
+ if approved:
532
+ lines.append("✅ RELEASE APPROVED")
533
+ lines.append("")
534
+ lines.append("All quality gates passed. The codebase is ready for release.")
535
+ else:
536
+ lines.append("❌ RELEASE NOT APPROVED")
537
+ lines.append("")
538
+ lines.append("Critical quality gates failed. Address blockers before release.")
539
+
540
+ lines.append("")
541
+ lines.append("Quality Gate Summary:")
542
+
543
+ passed_count = sum(1 for g in quality_gates if g.passed)
544
+ total_count = len(quality_gates)
545
+ lines.append(f" Passed: {passed_count}/{total_count}")
546
+
547
+ failed_gates = [g for g in quality_gates if not g.passed]
548
+ if failed_gates:
549
+ lines.append(" Failed:")
550
+ for gate in failed_gates:
551
+ lines.append(f" • {gate.name}: {gate.actual:.1f} < {gate.threshold:.1f}")
552
+
553
+ lines.append("")
554
+ lines.append(f"Agents Executed: {len(agent_results)}")
555
+
556
+ successful_agents = sum(1 for r in agent_results.values() if r["success"])
557
+ lines.append(f" Successful: {successful_agents}/{len(agent_results)}")
558
+
559
+ return "\n".join(lines)
560
+
561
+
562
+ async def main():
563
+ """CLI entry point for orchestrated release preparation."""
564
+ import sys
565
+
566
+ workflow = OrchestratedReleasePrepWorkflow()
567
+
568
+ # Get path from args or use current directory
569
+ path = sys.argv[1] if len(sys.argv) > 1 else "."
570
+
571
+ # Execute workflow
572
+ report = await workflow.execute(path=path)
573
+
574
+ # Print formatted report
575
+ print(report.format_console_output())
576
+
577
+ # Exit with appropriate code
578
+ sys.exit(0 if report.approved else 1)
579
+
580
+
581
+ if __name__ == "__main__":
582
+ asyncio.run(main())