empathy-framework 4.7.1__py3-none-any.whl → 4.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/METADATA +65 -2
  2. {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/RECORD +69 -59
  3. {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/WHEEL +1 -1
  4. {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/entry_points.txt +2 -1
  5. {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/top_level.txt +0 -1
  6. empathy_os/__init__.py +2 -0
  7. empathy_os/cli/__init__.py +128 -238
  8. empathy_os/cli/__main__.py +5 -33
  9. empathy_os/cli/commands/__init__.py +1 -8
  10. empathy_os/cli/commands/help.py +331 -0
  11. empathy_os/cli/commands/info.py +140 -0
  12. empathy_os/cli/commands/inspect.py +437 -0
  13. empathy_os/cli/commands/metrics.py +92 -0
  14. empathy_os/cli/commands/orchestrate.py +184 -0
  15. empathy_os/cli/commands/patterns.py +207 -0
  16. empathy_os/cli/commands/provider.py +93 -81
  17. empathy_os/cli/commands/setup.py +96 -0
  18. empathy_os/cli/commands/status.py +235 -0
  19. empathy_os/cli/commands/sync.py +166 -0
  20. empathy_os/cli/commands/tier.py +121 -0
  21. empathy_os/cli/commands/workflow.py +574 -0
  22. empathy_os/cli/parsers/__init__.py +62 -0
  23. empathy_os/cli/parsers/help.py +41 -0
  24. empathy_os/cli/parsers/info.py +26 -0
  25. empathy_os/cli/parsers/inspect.py +66 -0
  26. empathy_os/cli/parsers/metrics.py +42 -0
  27. empathy_os/cli/parsers/orchestrate.py +61 -0
  28. empathy_os/cli/parsers/patterns.py +54 -0
  29. empathy_os/cli/parsers/provider.py +40 -0
  30. empathy_os/cli/parsers/setup.py +42 -0
  31. empathy_os/cli/parsers/status.py +47 -0
  32. empathy_os/cli/parsers/sync.py +31 -0
  33. empathy_os/cli/parsers/tier.py +33 -0
  34. empathy_os/cli/parsers/workflow.py +77 -0
  35. empathy_os/cli/utils/__init__.py +1 -0
  36. empathy_os/cli/utils/data.py +242 -0
  37. empathy_os/cli/utils/helpers.py +68 -0
  38. empathy_os/{cli.py → cli_legacy.py} +0 -26
  39. empathy_os/cli_minimal.py +662 -0
  40. empathy_os/cli_router.py +384 -0
  41. empathy_os/cli_unified.py +13 -2
  42. empathy_os/memory/short_term.py +146 -414
  43. empathy_os/memory/types.py +441 -0
  44. empathy_os/memory/unified.py +61 -48
  45. empathy_os/models/fallback.py +1 -1
  46. empathy_os/models/provider_config.py +59 -344
  47. empathy_os/models/registry.py +27 -176
  48. empathy_os/monitoring/alerts.py +14 -20
  49. empathy_os/monitoring/alerts_cli.py +24 -7
  50. empathy_os/project_index/__init__.py +2 -0
  51. empathy_os/project_index/index.py +210 -5
  52. empathy_os/project_index/scanner.py +48 -16
  53. empathy_os/project_index/scanner_parallel.py +291 -0
  54. empathy_os/workflow_commands.py +9 -9
  55. empathy_os/workflows/__init__.py +31 -2
  56. empathy_os/workflows/base.py +295 -317
  57. empathy_os/workflows/bug_predict.py +10 -2
  58. empathy_os/workflows/builder.py +273 -0
  59. empathy_os/workflows/caching.py +253 -0
  60. empathy_os/workflows/code_review_pipeline.py +1 -0
  61. empathy_os/workflows/history.py +512 -0
  62. empathy_os/workflows/perf_audit.py +129 -23
  63. empathy_os/workflows/routing.py +163 -0
  64. empathy_os/workflows/secure_release.py +1 -0
  65. empathy_os/workflows/security_audit.py +1 -0
  66. empathy_os/workflows/security_audit_phase3.py +352 -0
  67. empathy_os/workflows/telemetry_mixin.py +269 -0
  68. empathy_os/workflows/test_gen.py +7 -7
  69. empathy_os/dashboard/__init__.py +0 -15
  70. empathy_os/dashboard/server.py +0 -941
  71. empathy_os/vscode_bridge 2.py +0 -173
  72. empathy_os/workflows/progressive/README 2.md +0 -454
  73. empathy_os/workflows/progressive/__init__ 2.py +0 -92
  74. empathy_os/workflows/progressive/cli 2.py +0 -242
  75. empathy_os/workflows/progressive/core 2.py +0 -488
  76. empathy_os/workflows/progressive/orchestrator 2.py +0 -701
  77. empathy_os/workflows/progressive/reports 2.py +0 -528
  78. empathy_os/workflows/progressive/telemetry 2.py +0 -280
  79. empathy_os/workflows/progressive/test_gen 2.py +0 -514
  80. empathy_os/workflows/progressive/workflow 2.py +0 -628
  81. patterns/README.md +0 -119
  82. patterns/__init__.py +0 -95
  83. patterns/behavior.py +0 -298
  84. patterns/code_review_memory.json +0 -441
  85. patterns/core.py +0 -97
  86. patterns/debugging.json +0 -3763
  87. patterns/empathy.py +0 -268
  88. patterns/health_check_memory.json +0 -505
  89. patterns/input.py +0 -161
  90. patterns/memory_graph.json +0 -8
  91. patterns/refactoring_memory.json +0 -1113
  92. patterns/registry.py +0 -663
  93. patterns/security_memory.json +0 -8
  94. patterns/structural.py +0 -415
  95. patterns/validation.py +0 -194
  96. {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,512 @@
1
+ """SQLite-backed workflow history storage.
2
+
3
+ Replaces JSON file-based history with structured, queryable storage.
4
+ Provides concurrent-safe workflow execution history with fast queries.
5
+
6
+ Copyright 2025 Smart-AI-Memory
7
+ Licensed under Fair Source License 0.9
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import sqlite3
13
+ from datetime import datetime
14
+ from pathlib import Path
15
+ from typing import TYPE_CHECKING, Any
16
+
17
+ from empathy_os.logging_config import get_logger
18
+
19
+ if TYPE_CHECKING:
20
+ from .base import WorkflowResult
21
+
22
+ logger = get_logger(__name__)
23
+
24
+
25
+ class WorkflowHistoryStore:
26
+ """SQLite-backed workflow history with migrations.
27
+
28
+ Provides concurrent-safe storage with fast queries for workflow execution history.
29
+
30
+ Features:
31
+ - Concurrent-safe (SQLite ACID guarantees)
32
+ - Fast queries with indexes
33
+ - Unlimited history (no artificial limits)
34
+ - Flexible filtering by workflow, provider, date range
35
+ - Analytics-ready with aggregate queries
36
+
37
+ Example:
38
+ >>> store = WorkflowHistoryStore()
39
+ >>> store.record_run("run-123", "test-gen", "anthropic", result)
40
+ >>> stats = store.get_stats()
41
+ >>> recent = store.query_runs(limit=10)
42
+ >>> store.close()
43
+ """
44
+
45
+ SCHEMA_VERSION = 1
46
+ DEFAULT_DB = ".empathy/history.db"
47
+
48
+ def __init__(self, db_path: str = DEFAULT_DB):
49
+ """Initialize history store.
50
+
51
+ Args:
52
+ db_path: Path to SQLite database file (default: .empathy/history.db)
53
+ """
54
+ self.db_path = db_path
55
+ Path(db_path).parent.mkdir(parents=True, exist_ok=True)
56
+
57
+ # Enable thread-safe access
58
+ self.conn = sqlite3.connect(db_path, check_same_thread=False)
59
+ self.conn.row_factory = sqlite3.Row
60
+
61
+ self._migrate()
62
+
63
+ def _migrate(self) -> None:
64
+ """Create schema if needed.
65
+
66
+ Creates workflow_runs and workflow_stages tables with appropriate indexes.
67
+ Idempotent - safe to call multiple times.
68
+ """
69
+ # Main workflow runs table
70
+ self.conn.execute("""
71
+ CREATE TABLE IF NOT EXISTS workflow_runs (
72
+ run_id TEXT PRIMARY KEY,
73
+ workflow_name TEXT NOT NULL,
74
+ provider TEXT NOT NULL,
75
+ success INTEGER NOT NULL,
76
+ started_at TEXT NOT NULL,
77
+ completed_at TEXT NOT NULL,
78
+ duration_ms INTEGER NOT NULL,
79
+ total_cost REAL NOT NULL,
80
+ baseline_cost REAL NOT NULL,
81
+ savings REAL NOT NULL,
82
+ savings_percent REAL NOT NULL,
83
+ error TEXT,
84
+ error_type TEXT,
85
+ transient INTEGER DEFAULT 0,
86
+ xml_parsed INTEGER DEFAULT 0,
87
+ summary TEXT,
88
+ created_at TEXT DEFAULT CURRENT_TIMESTAMP
89
+ )
90
+ """)
91
+
92
+ # Workflow stages (1:many relationship)
93
+ self.conn.execute("""
94
+ CREATE TABLE IF NOT EXISTS workflow_stages (
95
+ stage_id INTEGER PRIMARY KEY AUTOINCREMENT,
96
+ run_id TEXT NOT NULL,
97
+ stage_name TEXT NOT NULL,
98
+ tier TEXT NOT NULL,
99
+ skipped INTEGER NOT NULL DEFAULT 0,
100
+ skip_reason TEXT,
101
+ cost REAL NOT NULL DEFAULT 0.0,
102
+ duration_ms INTEGER NOT NULL DEFAULT 0,
103
+ input_tokens INTEGER DEFAULT 0,
104
+ output_tokens INTEGER DEFAULT 0,
105
+ FOREIGN KEY (run_id) REFERENCES workflow_runs(run_id)
106
+ )
107
+ """)
108
+
109
+ # Indexes for common queries
110
+ self.conn.execute("""
111
+ CREATE INDEX IF NOT EXISTS idx_workflow_name
112
+ ON workflow_runs(workflow_name)
113
+ """)
114
+
115
+ self.conn.execute("""
116
+ CREATE INDEX IF NOT EXISTS idx_started_at
117
+ ON workflow_runs(started_at DESC)
118
+ """)
119
+
120
+ self.conn.execute("""
121
+ CREATE INDEX IF NOT EXISTS idx_provider
122
+ ON workflow_runs(provider)
123
+ """)
124
+
125
+ self.conn.execute("""
126
+ CREATE INDEX IF NOT EXISTS idx_success
127
+ ON workflow_runs(success)
128
+ """)
129
+
130
+ # Index for stage queries
131
+ self.conn.execute("""
132
+ CREATE INDEX IF NOT EXISTS idx_run_stages
133
+ ON workflow_stages(run_id)
134
+ """)
135
+
136
+ self.conn.commit()
137
+ logger.debug(f"History store initialized: {self.db_path}")
138
+
139
+ def record_run(
140
+ self,
141
+ run_id: str,
142
+ workflow_name: str,
143
+ provider: str,
144
+ result: WorkflowResult,
145
+ ) -> None:
146
+ """Record a workflow execution.
147
+
148
+ Args:
149
+ run_id: Unique identifier for this run
150
+ workflow_name: Name of the workflow
151
+ provider: Provider used (anthropic, openai, google)
152
+ result: WorkflowResult object with execution details
153
+
154
+ Raises:
155
+ sqlite3.IntegrityError: If run_id already exists
156
+ sqlite3.OperationalError: If database is locked
157
+ """
158
+ cursor = self.conn.cursor()
159
+
160
+ try:
161
+ # Insert run record
162
+ cursor.execute(
163
+ """
164
+ INSERT INTO workflow_runs (
165
+ run_id, workflow_name, provider, success,
166
+ started_at, completed_at, duration_ms,
167
+ total_cost, baseline_cost, savings, savings_percent,
168
+ error, error_type, transient,
169
+ xml_parsed, summary
170
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
171
+ """,
172
+ (
173
+ run_id,
174
+ workflow_name,
175
+ provider,
176
+ 1 if result.success else 0,
177
+ result.started_at.isoformat(),
178
+ result.completed_at.isoformat(),
179
+ result.total_duration_ms,
180
+ result.cost_report.total_cost,
181
+ result.cost_report.baseline_cost,
182
+ result.cost_report.savings,
183
+ result.cost_report.savings_percent,
184
+ result.error,
185
+ result.error_type,
186
+ 1 if result.transient else 0,
187
+ 1
188
+ if isinstance(result.final_output, dict)
189
+ and result.final_output.get("xml_parsed")
190
+ else 0,
191
+ result.final_output.get("summary")
192
+ if isinstance(result.final_output, dict)
193
+ else None,
194
+ ),
195
+ )
196
+
197
+ # Insert stage records
198
+ for stage in result.stages:
199
+ cursor.execute(
200
+ """
201
+ INSERT INTO workflow_stages (
202
+ run_id, stage_name, tier, skipped, skip_reason,
203
+ cost, duration_ms, input_tokens, output_tokens
204
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
205
+ """,
206
+ (
207
+ run_id,
208
+ stage.name,
209
+ stage.tier.value,
210
+ 1 if stage.skipped else 0,
211
+ stage.skip_reason,
212
+ stage.cost,
213
+ stage.duration_ms,
214
+ stage.input_tokens,
215
+ stage.output_tokens,
216
+ ),
217
+ )
218
+
219
+ self.conn.commit()
220
+ logger.debug(f"Recorded workflow run: {run_id} ({workflow_name})")
221
+
222
+ except sqlite3.IntegrityError as e:
223
+ self.conn.rollback()
224
+ logger.warning(f"Run ID already exists: {run_id}")
225
+ raise ValueError(f"Duplicate run_id: {run_id}") from e
226
+ except sqlite3.OperationalError as e:
227
+ self.conn.rollback()
228
+ logger.error(f"Database error recording run: {e}")
229
+ raise
230
+
231
+ def query_runs(
232
+ self,
233
+ workflow_name: str | None = None,
234
+ provider: str | None = None,
235
+ since: datetime | None = None,
236
+ until: datetime | None = None,
237
+ success_only: bool = False,
238
+ limit: int = 100,
239
+ ) -> list[dict]:
240
+ """Query workflow runs with flexible filters.
241
+
242
+ Args:
243
+ workflow_name: Filter by workflow name (optional)
244
+ provider: Filter by provider (optional)
245
+ since: Filter runs after this datetime (optional)
246
+ until: Filter runs before this datetime (optional)
247
+ success_only: Only return successful runs (default: False)
248
+ limit: Maximum number of runs to return (default: 100)
249
+
250
+ Returns:
251
+ List of workflow run dictionaries with stages included
252
+
253
+ Example:
254
+ >>> # Get recent successful test-gen runs
255
+ >>> runs = store.query_runs(
256
+ ... workflow_name="test-gen",
257
+ ... success_only=True,
258
+ ... limit=10
259
+ ... )
260
+ """
261
+ query = "SELECT * FROM workflow_runs WHERE 1=1"
262
+ params: list[Any] = []
263
+
264
+ if workflow_name:
265
+ query += " AND workflow_name = ?"
266
+ params.append(workflow_name)
267
+
268
+ if provider:
269
+ query += " AND provider = ?"
270
+ params.append(provider)
271
+
272
+ if since:
273
+ query += " AND started_at >= ?"
274
+ params.append(since.isoformat())
275
+
276
+ if until:
277
+ query += " AND started_at <= ?"
278
+ params.append(until.isoformat())
279
+
280
+ if success_only:
281
+ query += " AND success = 1"
282
+
283
+ query += " ORDER BY started_at DESC LIMIT ?"
284
+ params.append(limit)
285
+
286
+ cursor = self.conn.cursor()
287
+ cursor.execute(query, params)
288
+
289
+ runs = []
290
+ for row in cursor.fetchall():
291
+ run = dict(row)
292
+
293
+ # Fetch stages for this run
294
+ cursor.execute(
295
+ """
296
+ SELECT * FROM workflow_stages
297
+ WHERE run_id = ?
298
+ ORDER BY stage_id
299
+ """,
300
+ (run["run_id"],),
301
+ )
302
+
303
+ run["stages"] = [dict(s) for s in cursor.fetchall()]
304
+ runs.append(run)
305
+
306
+ return runs
307
+
308
+ def get_stats(self) -> dict[str, Any]:
309
+ """Get aggregate statistics across all workflow runs.
310
+
311
+ Returns:
312
+ Dictionary with statistics including:
313
+ - total_runs: Total number of runs
314
+ - successful_runs: Number of successful runs
315
+ - by_workflow: Stats grouped by workflow name
316
+ - by_provider: Stats grouped by provider
317
+ - by_tier: Total cost grouped by tier
318
+ - recent_runs: Last 10 runs
319
+ - total_cost: Total cost across all runs
320
+ - total_savings: Total savings from tier optimization
321
+ - avg_savings_percent: Average savings percentage
322
+
323
+ Example:
324
+ >>> stats = store.get_stats()
325
+ >>> print(f"Total savings: ${stats['total_savings']:.2f}")
326
+ >>> print(f"Success rate: {stats['successful_runs'] / stats['total_runs']:.1%}")
327
+ """
328
+ cursor = self.conn.cursor()
329
+
330
+ # Total runs by workflow
331
+ cursor.execute("""
332
+ SELECT
333
+ workflow_name,
334
+ COUNT(*) as runs,
335
+ SUM(total_cost) as cost,
336
+ SUM(savings) as savings,
337
+ SUM(success) as successful
338
+ FROM workflow_runs
339
+ GROUP BY workflow_name
340
+ """)
341
+ by_workflow = {row["workflow_name"]: dict(row) for row in cursor.fetchall()}
342
+
343
+ # Total runs by provider
344
+ cursor.execute("""
345
+ SELECT
346
+ provider,
347
+ COUNT(*) as runs,
348
+ SUM(total_cost) as cost
349
+ FROM workflow_runs
350
+ GROUP BY provider
351
+ """)
352
+ by_provider = {row["provider"]: dict(row) for row in cursor.fetchall()}
353
+
354
+ # Total cost by tier
355
+ cursor.execute("""
356
+ SELECT
357
+ tier,
358
+ SUM(cost) as total_cost
359
+ FROM workflow_stages
360
+ WHERE skipped = 0
361
+ GROUP BY tier
362
+ """)
363
+ by_tier = {row["tier"]: row["total_cost"] for row in cursor.fetchall()}
364
+
365
+ # Recent runs (last 10)
366
+ cursor.execute("""
367
+ SELECT * FROM workflow_runs
368
+ ORDER BY started_at DESC
369
+ LIMIT 10
370
+ """)
371
+ recent_runs = [dict(row) for row in cursor.fetchall()]
372
+
373
+ # Totals
374
+ cursor.execute("""
375
+ SELECT
376
+ COUNT(*) as total_runs,
377
+ SUM(success) as successful_runs,
378
+ SUM(total_cost) as total_cost,
379
+ SUM(savings) as total_savings,
380
+ AVG(CASE WHEN success = 1 THEN savings_percent ELSE NULL END) as avg_savings_percent
381
+ FROM workflow_runs
382
+ """)
383
+ totals = dict(cursor.fetchone())
384
+
385
+ return {
386
+ "total_runs": totals["total_runs"] or 0,
387
+ "successful_runs": totals["successful_runs"] or 0,
388
+ "by_workflow": by_workflow,
389
+ "by_provider": by_provider,
390
+ "by_tier": by_tier,
391
+ "recent_runs": recent_runs,
392
+ "total_cost": totals["total_cost"] or 0.0,
393
+ "total_savings": totals["total_savings"] or 0.0,
394
+ "avg_savings_percent": totals["avg_savings_percent"] or 0.0,
395
+ }
396
+
397
+ def get_run_by_id(self, run_id: str) -> dict | None:
398
+ """Get a specific run by ID.
399
+
400
+ Args:
401
+ run_id: The run ID to fetch
402
+
403
+ Returns:
404
+ Run dictionary with stages, or None if not found
405
+ """
406
+ cursor = self.conn.cursor()
407
+ cursor.execute("SELECT * FROM workflow_runs WHERE run_id = ?", (run_id,))
408
+
409
+ row = cursor.fetchone()
410
+ if not row:
411
+ return None
412
+
413
+ run = dict(row)
414
+
415
+ # Fetch stages
416
+ cursor.execute(
417
+ """
418
+ SELECT * FROM workflow_stages
419
+ WHERE run_id = ?
420
+ ORDER BY stage_id
421
+ """,
422
+ (run_id,),
423
+ )
424
+ run["stages"] = [dict(s) for s in cursor.fetchall()]
425
+
426
+ return run
427
+
428
+ def delete_run(self, run_id: str) -> bool:
429
+ """Delete a workflow run and its stages.
430
+
431
+ Args:
432
+ run_id: The run ID to delete
433
+
434
+ Returns:
435
+ True if run was deleted, False if not found
436
+ """
437
+ cursor = self.conn.cursor()
438
+
439
+ # Delete stages first (foreign key)
440
+ cursor.execute("DELETE FROM workflow_stages WHERE run_id = ?", (run_id,))
441
+
442
+ # Delete run
443
+ cursor.execute("DELETE FROM workflow_runs WHERE run_id = ?", (run_id,))
444
+
445
+ deleted = cursor.rowcount > 0
446
+ self.conn.commit()
447
+
448
+ if deleted:
449
+ logger.debug(f"Deleted workflow run: {run_id}")
450
+
451
+ return deleted
452
+
453
+ def cleanup_old_runs(self, keep_days: int = 90) -> int:
454
+ """Delete runs older than specified days.
455
+
456
+ Args:
457
+ keep_days: Number of days to keep (default: 90)
458
+
459
+ Returns:
460
+ Number of runs deleted
461
+ """
462
+ cutoff = datetime.now().replace(
463
+ hour=0, minute=0, second=0, microsecond=0
464
+ ).isoformat()
465
+
466
+ cursor = self.conn.cursor()
467
+
468
+ # Get run IDs to delete
469
+ cursor.execute(
470
+ """
471
+ SELECT run_id FROM workflow_runs
472
+ WHERE started_at < datetime('now', '-' || ? || ' days')
473
+ """,
474
+ (keep_days,),
475
+ )
476
+
477
+ run_ids = [row["run_id"] for row in cursor.fetchall()]
478
+
479
+ if not run_ids:
480
+ return 0
481
+
482
+ # Delete stages for these runs
483
+ placeholders = ",".join("?" * len(run_ids))
484
+ cursor.execute(
485
+ f"DELETE FROM workflow_stages WHERE run_id IN ({placeholders})", run_ids
486
+ )
487
+
488
+ # Delete runs
489
+ cursor.execute(
490
+ f"DELETE FROM workflow_runs WHERE run_id IN ({placeholders})", run_ids
491
+ )
492
+
493
+ self.conn.commit()
494
+ logger.info(f"Cleaned up {len(run_ids)} runs older than {keep_days} days")
495
+
496
+ return len(run_ids)
497
+
498
+ def close(self) -> None:
499
+ """Close database connection.
500
+
501
+ Should be called when done with the store.
502
+ """
503
+ self.conn.close()
504
+ logger.debug("History store closed")
505
+
506
+ def __enter__(self):
507
+ """Context manager entry."""
508
+ return self
509
+
510
+ def __exit__(self, exc_type, exc_val, exc_tb):
511
+ """Context manager exit - closes connection."""
512
+ self.close()
@@ -20,6 +20,7 @@ from pathlib import Path
20
20
  from typing import Any
21
21
 
22
22
  from .base import BaseWorkflow, ModelTier
23
+ from .output import Finding, WorkflowReport, get_console
23
24
  from .step_config import WorkflowStepConfig
24
25
 
25
26
  # Define step configurations for executor-based execution
@@ -268,10 +269,10 @@ class PerformanceAuditWorkflow(BaseWorkflow):
268
269
  # Analyze each file
269
270
  analysis: list[dict] = []
270
271
  for file_path, file_findings in by_file.items():
271
- # Calculate file complexity score
272
- high_count = len([f for f in file_findings if f["impact"] == "high"])
273
- medium_count = len([f for f in file_findings if f["impact"] == "medium"])
274
- low_count = len([f for f in file_findings if f["impact"] == "low"])
272
+ # Calculate file complexity score (generator expressions for memory efficiency)
273
+ high_count = sum(1 for f in file_findings if f["impact"] == "high")
274
+ medium_count = sum(1 for f in file_findings if f["impact"] == "medium")
275
+ low_count = sum(1 for f in file_findings if f["impact"] == "low")
275
276
 
276
277
  complexity_score = high_count * 10 + medium_count * 5 + low_count * 1
277
278
 
@@ -483,6 +484,9 @@ Provide detailed optimization strategies."""
483
484
  # Add formatted report for human readability
484
485
  result["formatted_report"] = format_perf_audit_report(result, input_data)
485
486
 
487
+ # Add structured WorkflowReport for Rich rendering
488
+ result["workflow_report"] = create_perf_audit_workflow_report(result, input_data)
489
+
486
490
  return (result, input_tokens, output_tokens)
487
491
 
488
492
  def _get_optimization_action(self, concern: str) -> dict | None:
@@ -532,6 +536,92 @@ Provide detailed optimization strategies."""
532
536
  return actions.get(concern)
533
537
 
534
538
 
539
+ def create_perf_audit_workflow_report(result: dict, input_data: dict) -> WorkflowReport:
540
+ """Create a WorkflowReport from performance audit results.
541
+
542
+ Args:
543
+ result: The optimize stage result
544
+ input_data: Input data from previous stages
545
+
546
+ Returns:
547
+ WorkflowReport instance for Rich or plain text rendering
548
+ """
549
+ perf_score = result.get("perf_score", 0)
550
+ perf_level = result.get("perf_level", "unknown")
551
+
552
+ # Determine report level
553
+ if perf_score >= 85:
554
+ level = "success"
555
+ elif perf_score >= 50:
556
+ level = "warning"
557
+ else:
558
+ level = "error"
559
+
560
+ # Build summary
561
+ files_scanned = input_data.get("files_scanned", 0)
562
+ finding_count = input_data.get("finding_count", 0)
563
+ by_impact = input_data.get("by_impact", {})
564
+
565
+ summary = (
566
+ f"Scanned {files_scanned} files, found {finding_count} issues. "
567
+ f"High: {by_impact.get('high', 0)}, Medium: {by_impact.get('medium', 0)}, "
568
+ f"Low: {by_impact.get('low', 0)}"
569
+ )
570
+
571
+ report = WorkflowReport(
572
+ title="Performance Audit Report",
573
+ summary=summary,
574
+ score=perf_score,
575
+ level=level,
576
+ metadata={
577
+ "perf_level": perf_level,
578
+ "files_scanned": files_scanned,
579
+ "finding_count": finding_count,
580
+ },
581
+ )
582
+
583
+ # Add top issues section
584
+ top_issues = result.get("top_issues", [])
585
+ if top_issues:
586
+ issues_content = {
587
+ issue.get("type", "unknown").replace("_", " ").title(): f"{issue.get('count', 0)} occurrences"
588
+ for issue in top_issues
589
+ }
590
+ report.add_section("Top Performance Issues", issues_content)
591
+
592
+ # Add hotspots section
593
+ hotspot_result = input_data.get("hotspot_result", {})
594
+ hotspots = hotspot_result.get("hotspots", [])
595
+ if hotspots:
596
+ hotspot_content = {
597
+ "Critical Hotspots": hotspot_result.get("critical_count", 0),
598
+ "Moderate Hotspots": hotspot_result.get("moderate_count", 0),
599
+ }
600
+ report.add_section("Hotspot Summary", hotspot_content)
601
+
602
+ # Add findings section
603
+ findings = input_data.get("findings", [])
604
+ high_impact = [f for f in findings if f.get("impact") == "high"]
605
+ if high_impact:
606
+ finding_objs = [
607
+ Finding(
608
+ severity="high",
609
+ file=f.get("file", "unknown"),
610
+ line=f.get("line"),
611
+ message=f.get("description", ""),
612
+ )
613
+ for f in high_impact[:10]
614
+ ]
615
+ report.add_section("High Impact Findings", finding_objs, style="error")
616
+
617
+ # Add recommendations section
618
+ optimization_plan = result.get("optimization_plan", "")
619
+ if optimization_plan:
620
+ report.add_section("Optimization Recommendations", optimization_plan)
621
+
622
+ return report
623
+
624
+
535
625
  def format_perf_audit_report(result: dict, input_data: dict) -> str:
536
626
  """Format performance audit output as a human-readable report.
537
627
 
@@ -660,26 +750,42 @@ def main():
660
750
  workflow = PerformanceAuditWorkflow()
661
751
  result = await workflow.execute(path=".", file_types=[".py"])
662
752
 
663
- print("\nPerformance Audit Results")
664
- print("=" * 50)
665
- print(f"Provider: {result.provider}")
666
- print(f"Success: {result.success}")
667
-
668
753
  output = result.final_output
669
- print(f"Performance Level: {output.get('perf_level', 'N/A')}")
670
- print(f"Performance Score: {output.get('perf_score', 0)}/100")
671
- print(f"Recommendations: {output.get('recommendation_count', 0)}")
672
-
673
- if output.get("top_issues"):
674
- print("\nTop Issues:")
675
- for issue in output["top_issues"]:
676
- print(f" - {issue['type']}: {issue['count']} occurrences")
677
-
678
- print("\nCost Report:")
679
- print(f" Total Cost: ${result.cost_report.total_cost:.4f}")
680
- savings = result.cost_report.savings
681
- pct = result.cost_report.savings_percent
682
- print(f" Savings: ${savings:.4f} ({pct:.1f}%)")
754
+
755
+ # Try Rich output first
756
+ console = get_console()
757
+ workflow_report = output.get("workflow_report")
758
+
759
+ if console and workflow_report:
760
+ # Render with Rich
761
+ workflow_report.render(console, use_rich=True)
762
+ console.print()
763
+ console.print(f"[dim]Provider: {result.provider}[/dim]")
764
+ console.print(f"[dim]Cost: ${result.cost_report.total_cost:.4f}[/dim]")
765
+ savings = result.cost_report.savings
766
+ pct = result.cost_report.savings_percent
767
+ console.print(f"[dim]Savings: ${savings:.4f} ({pct:.1f}%)[/dim]")
768
+ else:
769
+ # Fallback to plain text
770
+ print("\nPerformance Audit Results")
771
+ print("=" * 50)
772
+ print(f"Provider: {result.provider}")
773
+ print(f"Success: {result.success}")
774
+
775
+ print(f"Performance Level: {output.get('perf_level', 'N/A')}")
776
+ print(f"Performance Score: {output.get('perf_score', 0)}/100")
777
+ print(f"Recommendations: {output.get('recommendation_count', 0)}")
778
+
779
+ if output.get("top_issues"):
780
+ print("\nTop Issues:")
781
+ for issue in output["top_issues"]:
782
+ print(f" - {issue['type']}: {issue['count']} occurrences")
783
+
784
+ print("\nCost Report:")
785
+ print(f" Total Cost: ${result.cost_report.total_cost:.4f}")
786
+ savings = result.cost_report.savings
787
+ pct = result.cost_report.savings_percent
788
+ print(f" Savings: ${savings:.4f} ({pct:.1f}%)")
683
789
 
684
790
  asyncio.run(run())
685
791