tapps-agents 3.5.41__py3-none-any.whl → 3.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. tapps_agents/__init__.py +2 -2
  2. tapps_agents/agents/reviewer/scoring.py +1566 -1566
  3. tapps_agents/agents/reviewer/tools/__init__.py +41 -41
  4. tapps_agents/cli/commands/health.py +665 -665
  5. tapps_agents/cli/commands/top_level.py +3586 -3586
  6. tapps_agents/core/artifact_context_builder.py +293 -0
  7. tapps_agents/core/config.py +33 -0
  8. tapps_agents/health/orchestrator.py +271 -271
  9. tapps_agents/resources/__init__.py +5 -0
  10. tapps_agents/resources/claude/__init__.py +1 -0
  11. tapps_agents/resources/claude/commands/README.md +156 -0
  12. tapps_agents/resources/claude/commands/__init__.py +1 -0
  13. tapps_agents/resources/claude/commands/build-fix.md +22 -0
  14. tapps_agents/resources/claude/commands/build.md +77 -0
  15. tapps_agents/resources/claude/commands/debug.md +53 -0
  16. tapps_agents/resources/claude/commands/design.md +68 -0
  17. tapps_agents/resources/claude/commands/docs.md +53 -0
  18. tapps_agents/resources/claude/commands/e2e.md +22 -0
  19. tapps_agents/resources/claude/commands/fix.md +54 -0
  20. tapps_agents/resources/claude/commands/implement.md +53 -0
  21. tapps_agents/resources/claude/commands/improve.md +53 -0
  22. tapps_agents/resources/claude/commands/library-docs.md +64 -0
  23. tapps_agents/resources/claude/commands/lint.md +52 -0
  24. tapps_agents/resources/claude/commands/plan.md +65 -0
  25. tapps_agents/resources/claude/commands/refactor-clean.md +21 -0
  26. tapps_agents/resources/claude/commands/refactor.md +55 -0
  27. tapps_agents/resources/claude/commands/review.md +67 -0
  28. tapps_agents/resources/claude/commands/score.md +60 -0
  29. tapps_agents/resources/claude/commands/security-review.md +22 -0
  30. tapps_agents/resources/claude/commands/security-scan.md +54 -0
  31. tapps_agents/resources/claude/commands/tdd.md +24 -0
  32. tapps_agents/resources/claude/commands/test-coverage.md +21 -0
  33. tapps_agents/resources/claude/commands/test.md +54 -0
  34. tapps_agents/resources/claude/commands/update-codemaps.md +20 -0
  35. tapps_agents/resources/claude/commands/update-docs.md +21 -0
  36. tapps_agents/resources/claude/skills/__init__.py +1 -0
  37. tapps_agents/resources/claude/skills/analyst/SKILL.md +272 -0
  38. tapps_agents/resources/claude/skills/analyst/__init__.py +1 -0
  39. tapps_agents/resources/claude/skills/architect/SKILL.md +282 -0
  40. tapps_agents/resources/claude/skills/architect/__init__.py +1 -0
  41. tapps_agents/resources/claude/skills/backend-patterns/SKILL.md +30 -0
  42. tapps_agents/resources/claude/skills/backend-patterns/__init__.py +1 -0
  43. tapps_agents/resources/claude/skills/coding-standards/SKILL.md +29 -0
  44. tapps_agents/resources/claude/skills/coding-standards/__init__.py +1 -0
  45. tapps_agents/resources/claude/skills/debugger/SKILL.md +203 -0
  46. tapps_agents/resources/claude/skills/debugger/__init__.py +1 -0
  47. tapps_agents/resources/claude/skills/designer/SKILL.md +243 -0
  48. tapps_agents/resources/claude/skills/designer/__init__.py +1 -0
  49. tapps_agents/resources/claude/skills/documenter/SKILL.md +252 -0
  50. tapps_agents/resources/claude/skills/documenter/__init__.py +1 -0
  51. tapps_agents/resources/claude/skills/enhancer/SKILL.md +307 -0
  52. tapps_agents/resources/claude/skills/enhancer/__init__.py +1 -0
  53. tapps_agents/resources/claude/skills/evaluator/SKILL.md +204 -0
  54. tapps_agents/resources/claude/skills/evaluator/__init__.py +1 -0
  55. tapps_agents/resources/claude/skills/frontend-patterns/SKILL.md +29 -0
  56. tapps_agents/resources/claude/skills/frontend-patterns/__init__.py +1 -0
  57. tapps_agents/resources/claude/skills/implementer/SKILL.md +188 -0
  58. tapps_agents/resources/claude/skills/implementer/__init__.py +1 -0
  59. tapps_agents/resources/claude/skills/improver/SKILL.md +218 -0
  60. tapps_agents/resources/claude/skills/improver/__init__.py +1 -0
  61. tapps_agents/resources/claude/skills/ops/SKILL.md +281 -0
  62. tapps_agents/resources/claude/skills/ops/__init__.py +1 -0
  63. tapps_agents/resources/claude/skills/orchestrator/SKILL.md +390 -0
  64. tapps_agents/resources/claude/skills/orchestrator/__init__.py +1 -0
  65. tapps_agents/resources/claude/skills/planner/SKILL.md +254 -0
  66. tapps_agents/resources/claude/skills/planner/__init__.py +1 -0
  67. tapps_agents/resources/claude/skills/reviewer/SKILL.md +434 -0
  68. tapps_agents/resources/claude/skills/reviewer/__init__.py +1 -0
  69. tapps_agents/resources/claude/skills/security-review/SKILL.md +31 -0
  70. tapps_agents/resources/claude/skills/security-review/__init__.py +1 -0
  71. tapps_agents/resources/claude/skills/simple-mode/SKILL.md +695 -0
  72. tapps_agents/resources/claude/skills/simple-mode/__init__.py +1 -0
  73. tapps_agents/resources/claude/skills/tester/SKILL.md +219 -0
  74. tapps_agents/resources/claude/skills/tester/__init__.py +1 -0
  75. tapps_agents/resources/cursor/.cursorignore +35 -0
  76. tapps_agents/resources/cursor/__init__.py +1 -0
  77. tapps_agents/resources/cursor/commands/__init__.py +1 -0
  78. tapps_agents/resources/cursor/commands/build-fix.md +11 -0
  79. tapps_agents/resources/cursor/commands/build.md +11 -0
  80. tapps_agents/resources/cursor/commands/e2e.md +11 -0
  81. tapps_agents/resources/cursor/commands/fix.md +11 -0
  82. tapps_agents/resources/cursor/commands/refactor-clean.md +11 -0
  83. tapps_agents/resources/cursor/commands/review.md +11 -0
  84. tapps_agents/resources/cursor/commands/security-review.md +11 -0
  85. tapps_agents/resources/cursor/commands/tdd.md +11 -0
  86. tapps_agents/resources/cursor/commands/test-coverage.md +11 -0
  87. tapps_agents/resources/cursor/commands/test.md +11 -0
  88. tapps_agents/resources/cursor/commands/update-codemaps.md +10 -0
  89. tapps_agents/resources/cursor/commands/update-docs.md +11 -0
  90. tapps_agents/resources/cursor/rules/__init__.py +1 -0
  91. tapps_agents/resources/cursor/rules/agent-capabilities.mdc +687 -0
  92. tapps_agents/resources/cursor/rules/coding-style.mdc +31 -0
  93. tapps_agents/resources/cursor/rules/command-reference.mdc +2081 -0
  94. tapps_agents/resources/cursor/rules/cursor-mode-usage.mdc +125 -0
  95. tapps_agents/resources/cursor/rules/git-workflow.mdc +29 -0
  96. tapps_agents/resources/cursor/rules/performance.mdc +29 -0
  97. tapps_agents/resources/cursor/rules/project-context.mdc +163 -0
  98. tapps_agents/resources/cursor/rules/project-profiling.mdc +197 -0
  99. tapps_agents/resources/cursor/rules/quick-reference.mdc +630 -0
  100. tapps_agents/resources/cursor/rules/security.mdc +32 -0
  101. tapps_agents/resources/cursor/rules/simple-mode.mdc +500 -0
  102. tapps_agents/resources/cursor/rules/testing.mdc +31 -0
  103. tapps_agents/resources/cursor/rules/when-to-use.mdc +156 -0
  104. tapps_agents/resources/cursor/rules/workflow-presets.mdc +179 -0
  105. tapps_agents/resources/customizations/__init__.py +1 -0
  106. tapps_agents/resources/customizations/example-custom.yaml +83 -0
  107. tapps_agents/resources/hooks/__init__.py +1 -0
  108. tapps_agents/resources/hooks/templates/README.md +5 -0
  109. tapps_agents/resources/hooks/templates/__init__.py +1 -0
  110. tapps_agents/resources/hooks/templates/add-project-context.yaml +8 -0
  111. tapps_agents/resources/hooks/templates/auto-format-js.yaml +10 -0
  112. tapps_agents/resources/hooks/templates/auto-format-python.yaml +10 -0
  113. tapps_agents/resources/hooks/templates/git-commit-check.yaml +7 -0
  114. tapps_agents/resources/hooks/templates/notify-on-complete.yaml +8 -0
  115. tapps_agents/resources/hooks/templates/quality-gate.yaml +8 -0
  116. tapps_agents/resources/hooks/templates/security-scan-on-edit.yaml +10 -0
  117. tapps_agents/resources/hooks/templates/session-end-log.yaml +7 -0
  118. tapps_agents/resources/hooks/templates/show-beads-ready.yaml +8 -0
  119. tapps_agents/resources/hooks/templates/test-on-edit.yaml +10 -0
  120. tapps_agents/resources/hooks/templates/update-docs-on-complete.yaml +8 -0
  121. tapps_agents/resources/hooks/templates/user-prompt-log.yaml +7 -0
  122. tapps_agents/resources/scripts/__init__.py +1 -0
  123. tapps_agents/resources/scripts/set_bd_path.ps1 +51 -0
  124. tapps_agents/resources/workflows/__init__.py +1 -0
  125. tapps_agents/resources/workflows/presets/__init__.py +1 -0
  126. tapps_agents/resources/workflows/presets/brownfield-analysis.yaml +235 -0
  127. tapps_agents/resources/workflows/presets/fix.yaml +78 -0
  128. tapps_agents/resources/workflows/presets/full-sdlc.yaml +122 -0
  129. tapps_agents/resources/workflows/presets/quality.yaml +82 -0
  130. tapps_agents/resources/workflows/presets/rapid-dev.yaml +84 -0
  131. tapps_agents/simple_mode/orchestrators/base.py +185 -185
  132. tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2700 -2667
  133. tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +723 -723
  134. tapps_agents/workflow/cursor_executor.py +2337 -2337
  135. tapps_agents/workflow/message_formatter.py +188 -188
  136. {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.1.dist-info}/METADATA +6 -6
  137. {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.1.dist-info}/RECORD +141 -18
  138. {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.1.dist-info}/WHEEL +0 -0
  139. {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.1.dist-info}/entry_points.txt +0 -0
  140. {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.1.dist-info}/licenses/LICENSE +0 -0
  141. {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.1.dist-info}/top_level.txt +0 -0
@@ -1,665 +1,665 @@
1
- """
2
- Health command handlers.
3
- """
4
-
5
- from __future__ import annotations
6
-
7
- import json
8
- import logging
9
- import sys
10
- from collections import defaultdict
11
- from datetime import UTC, datetime, timedelta
12
- from pathlib import Path
13
-
14
- from ...health.checks.automation import AutomationHealthCheck
15
- from ...health.checks.environment import EnvironmentHealthCheck
16
- from ...health.checks.execution import ExecutionHealthCheck
17
- from ...health.checks.context7_cache import Context7CacheHealthCheck
18
- from ...health.checks.knowledge_base import KnowledgeBaseHealthCheck
19
- from ...health.checks.outcomes import OutcomeHealthCheck
20
- from ...health.collector import HealthMetricsCollector
21
- from ...health.dashboard import HealthDashboard
22
- from ...health.orchestrator import HealthOrchestrator
23
- from ...health.registry import HealthCheckRegistry
24
- from ..feedback import get_feedback, ProgressTracker
25
- from .common import format_json_output
26
-
27
-
28
- def _usage_data_from_execution_metrics(project_root: Path) -> dict | None:
29
- """
30
- Build usage-like data from execution metrics when analytics is empty.
31
-
32
- Aggregates .tapps-agents/metrics/executions_*.jsonl by today (steps/workflows),
33
- by skill (agents), and by workflow_id (workflows). Returns same shape as
34
- AnalyticsDashboard.get_dashboard_data() for system/agents/workflows.
35
- """
36
- try:
37
- from ...workflow.execution_metrics import ExecutionMetricsCollector
38
-
39
- collector = ExecutionMetricsCollector(project_root=project_root)
40
- metrics = collector.get_metrics(limit=5000)
41
- if not metrics:
42
- return None
43
-
44
- now = datetime.now(UTC)
45
- today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
46
- thirty_days_ago = now - timedelta(days=30)
47
-
48
- # Filter to last 30 days
49
- def parse_ts(ts: str) -> datetime:
50
- return datetime.fromisoformat(ts.replace("Z", "+00:00"))
51
-
52
- recent = [m for m in metrics if parse_ts(m.started_at) >= thirty_days_ago]
53
- if not recent:
54
- return None
55
-
56
- today_metrics = [m for m in recent if parse_ts(m.started_at) >= today_start]
57
- workflow_ids_today_success = {
58
- m.workflow_id for m in today_metrics if m.status == "success"
59
- }
60
- workflow_ids_today_failed = {
61
- m.workflow_id for m in today_metrics if m.status != "success"
62
- }
63
- completed_today = len(workflow_ids_today_success)
64
- failed_today = len(workflow_ids_today_failed)
65
- avg_duration = (
66
- sum(m.duration_ms for m in recent) / len(recent) / 1000.0
67
- if recent
68
- else 0.0
69
- )
70
-
71
- # Agents: by skill or command
72
- agent_counts: dict[str, list] = defaultdict(list)
73
- for m in recent:
74
- key = m.skill or m.command or "unknown"
75
- agent_counts[key].append(m)
76
-
77
- agents_list = []
78
- for name, ms in agent_counts.items():
79
- total = len(ms)
80
- success = sum(1 for m in ms if m.status == "success")
81
- agents_list.append(
82
- {
83
- "agent_id": name,
84
- "agent_name": name,
85
- "total_executions": total,
86
- "successful_executions": success,
87
- "failed_executions": total - success,
88
- "success_rate": success / total if total else 0.0,
89
- "average_duration": sum(m.duration_ms for m in ms) / total / 1000.0
90
- if total
91
- else 0.0,
92
- }
93
- )
94
-
95
- # Workflows: by workflow_id
96
- wf_counts: dict[str, list] = defaultdict(list)
97
- for m in recent:
98
- wf_counts[m.workflow_id].append(m)
99
-
100
- workflows_list = []
101
- for wf_id, ms in wf_counts.items():
102
- total = len(ms)
103
- success = sum(1 for m in ms if m.status == "success")
104
- workflows_list.append(
105
- {
106
- "workflow_id": wf_id,
107
- "workflow_name": wf_id,
108
- "total_executions": total,
109
- "successful_executions": success,
110
- "failed_executions": total - success,
111
- "success_rate": success / total if total else 0.0,
112
- "average_duration": sum(m.duration_ms for m in ms) / total / 1000.0
113
- if total
114
- else 0.0,
115
- }
116
- )
117
-
118
- # System: try cpu/mem/disk from ResourceMonitor
119
- cpu_usage = memory_usage = disk_usage = 0.0
120
- try:
121
- from ...core.resource_monitor import ResourceMonitor
122
-
123
- mon = ResourceMonitor()
124
- res = mon.get_current_metrics()
125
- cpu_usage = getattr(res, "cpu_percent", 0.0) or 0.0
126
- memory_usage = getattr(res, "memory_percent", 0.0) or 0.0
127
- disk_usage = getattr(res, "disk_percent", 0.0) or 0.0
128
- except Exception:
129
- pass
130
-
131
- return {
132
- "timestamp": now.isoformat(),
133
- "system": {
134
- "timestamp": now.isoformat(),
135
- "total_agents": len(agents_list),
136
- "active_workflows": 0,
137
- "completed_workflows_today": completed_today,
138
- "failed_workflows_today": failed_today,
139
- "average_workflow_duration": avg_duration,
140
- "cpu_usage": cpu_usage,
141
- "memory_usage": memory_usage,
142
- "disk_usage": disk_usage,
143
- },
144
- "agents": agents_list,
145
- "workflows": workflows_list,
146
- }
147
- except Exception:
148
- return None
149
-
150
-
151
- def handle_health_check_command(
152
- check_name: str | None = None,
153
- output_format: str = "text",
154
- save: bool = True,
155
- project_root: Path | None = None,
156
- ) -> None:
157
- """
158
- Handle health check command.
159
-
160
- Args:
161
- check_name: Optional specific check to run
162
- output_format: Output format (json or text)
163
- save: Whether to save results to metrics storage
164
- project_root: Project root directory
165
- """
166
- project_root = project_root or Path.cwd()
167
-
168
- # Initialize registry and register all checks
169
- registry = HealthCheckRegistry()
170
- registry.register(EnvironmentHealthCheck(project_root=project_root))
171
- registry.register(AutomationHealthCheck(project_root=project_root))
172
- registry.register(ExecutionHealthCheck(project_root=project_root))
173
- registry.register(Context7CacheHealthCheck(project_root=project_root))
174
- registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
175
- registry.register(OutcomeHealthCheck(project_root=project_root))
176
-
177
- # Initialize orchestrator
178
- metrics_collector = HealthMetricsCollector(project_root=project_root)
179
- orchestrator = HealthOrchestrator(
180
- registry=registry, metrics_collector=metrics_collector, project_root=project_root
181
- )
182
-
183
- # Run checks
184
- feedback = get_feedback()
185
- feedback.format_type = output_format
186
- operation_desc = f"Running health check: {check_name}" if check_name else "Running all health checks"
187
- feedback.start_operation("Health Check", operation_desc)
188
-
189
- if check_name:
190
- check_names = [check_name]
191
- feedback.running(f"Initializing check: {check_name}...", step=1, total_steps=3)
192
- else:
193
- check_names = None
194
- feedback.running("Discovering health checks...", step=1, total_steps=3)
195
-
196
- feedback.running("Executing health checks...", step=2, total_steps=3)
197
- results = orchestrator.run_all_checks(check_names=check_names, save_metrics=save)
198
- feedback.running("Collecting results...", step=3, total_steps=3)
199
- feedback.clear_progress()
200
-
201
- # Build summary
202
- summary = {}
203
- if results:
204
- healthy_count = sum(1 for r in results.values() if r and r.status == "healthy")
205
- total_count = len([r for r in results.values() if r])
206
- summary["checks_run"] = total_count
207
- summary["healthy"] = healthy_count
208
- summary["degraded"] = sum(1 for r in results.values() if r and r.status == "degraded")
209
- summary["unhealthy"] = sum(1 for r in results.values() if r and r.status == "unhealthy")
210
-
211
- # Format output
212
- if output_format == "json":
213
- output = {
214
- "checks": {
215
- name: {
216
- "status": result.status,
217
- "score": result.score,
218
- "message": result.message,
219
- "details": result.details,
220
- "remediation": (
221
- result.remediation
222
- if isinstance(result.remediation, list)
223
- else [result.remediation]
224
- if result.remediation
225
- else None
226
- ),
227
- }
228
- for name, result in results.items()
229
- if result
230
- }
231
- }
232
- # Merge summary into output
233
- if summary:
234
- output = {**output, "summary": summary}
235
- feedback.output_result(output, message="Health checks completed")
236
- else:
237
- # Text output
238
- feedback.success("Health checks completed")
239
- warnings = []
240
- for name, result in sorted(results.items()):
241
- if not result:
242
- continue
243
-
244
- status_symbol = {
245
- "healthy": "[OK]",
246
- "degraded": "[WARN]",
247
- "unhealthy": "[FAIL]",
248
- }.get(result.status, "[?]")
249
-
250
- print(f"\n[{status_symbol}] {name.upper()}: {result.status} ({result.score:.1f}/100)")
251
- print(f" {result.message}")
252
-
253
- if result.status != "healthy":
254
- warnings.append(f"{name}: {result.message}")
255
-
256
- if result.details:
257
- # Show key metrics
258
- key_metrics = []
259
- for key in [
260
- "total_executions",
261
- "success_rate",
262
- "hit_rate",
263
- "total_files",
264
- "average_score",
265
- ]:
266
- if key in result.details:
267
- value = result.details[key]
268
- if isinstance(value, float):
269
- if key == "success_rate" or key == "hit_rate":
270
- key_metrics.append(f"{key}: {value:.1f}%")
271
- else:
272
- key_metrics.append(f"{key}: {value:.1f}")
273
- else:
274
- key_metrics.append(f"{key}: {value}")
275
-
276
- if key_metrics:
277
- print(f" Metrics: {' | '.join(key_metrics)}")
278
-
279
- if result.remediation:
280
- if isinstance(result.remediation, list):
281
- if len(result.remediation) > 0:
282
- print(f" Remediation: {result.remediation[0]}")
283
- elif isinstance(result.remediation, str):
284
- print(f" Remediation: {result.remediation}")
285
-
286
- if warnings:
287
- for warning_msg in warnings:
288
- feedback.warning(warning_msg)
289
-
290
-
291
- def handle_health_dashboard_command(
292
- output_format: str = "text", project_root: Path | None = None
293
- ) -> None:
294
- """
295
- Handle health dashboard command.
296
-
297
- Args:
298
- output_format: Output format (json or text)
299
- project_root: Project root directory
300
- """
301
- project_root = project_root or Path.cwd()
302
-
303
- # Initialize registry and register all checks
304
- registry = HealthCheckRegistry()
305
- registry.register(EnvironmentHealthCheck(project_root=project_root))
306
- registry.register(AutomationHealthCheck(project_root=project_root))
307
- registry.register(ExecutionHealthCheck(project_root=project_root))
308
- registry.register(Context7CacheHealthCheck(project_root=project_root))
309
- registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
310
- registry.register(OutcomeHealthCheck(project_root=project_root))
311
-
312
- # Initialize dashboard
313
- metrics_collector = HealthMetricsCollector(project_root=project_root)
314
- orchestrator = HealthOrchestrator(
315
- registry=registry, metrics_collector=metrics_collector, project_root=project_root
316
- )
317
- dashboard = HealthDashboard(orchestrator=orchestrator)
318
-
319
- # Render dashboard
320
- feedback = get_feedback()
321
- feedback.format_type = output_format
322
- feedback.start_operation("Health Dashboard", "Generating health dashboard visualization")
323
- feedback.running("Collecting health metrics...", step=1, total_steps=3)
324
- feedback.running("Generating dashboard...", step=2, total_steps=3)
325
- feedback.running("Rendering dashboard output...", step=3, total_steps=3)
326
-
327
- if output_format == "json":
328
- output = dashboard.render_json()
329
- feedback.clear_progress()
330
- feedback.output_result(output, message="Health dashboard generated")
331
- else:
332
- output = dashboard.render_text()
333
- feedback.clear_progress()
334
- feedback.success("Health dashboard generated")
335
- print(output)
336
-
337
-
338
- def handle_health_metrics_command(
339
- check_name: str | None = None,
340
- status: str | None = None,
341
- days: int = 30,
342
- output_format: str = "text",
343
- project_root: Path | None = None,
344
- ) -> None:
345
- """
346
- Handle health metrics command.
347
-
348
- Args:
349
- check_name: Optional check name to filter
350
- status: Optional status to filter
351
- days: Number of days to look back
352
- output_format: Output format (json or text)
353
- project_root: Project root directory
354
- """
355
- project_root = project_root or Path.cwd()
356
- collector = HealthMetricsCollector(project_root=project_root)
357
-
358
- # Get metrics
359
- feedback = get_feedback()
360
- feedback.format_type = output_format
361
- operation_desc = f"Collecting metrics{f' for {check_name}' if check_name else ''}"
362
- feedback.start_operation("Health Metrics", operation_desc)
363
- feedback.running("Querying metrics database...", step=1, total_steps=3)
364
-
365
- metrics = collector.get_metrics(check_name=check_name, status=status, days=days, limit=1000)
366
- feedback.running("Calculating summary statistics...", step=2, total_steps=3)
367
- summary = collector.get_summary(days=days)
368
- feedback.running("Formatting results...", step=3, total_steps=3)
369
- feedback.clear_progress()
370
-
371
- if output_format == "json":
372
- output = {
373
- "summary": summary,
374
- "metrics": [m.to_dict() for m in metrics],
375
- }
376
- feedback.output_result(output, message="Health metrics retrieved")
377
- else:
378
- # Text output
379
- feedback.success("Health metrics retrieved")
380
- print(f"\nHealth Metrics Summary (last {days} days)")
381
- print("=" * 70)
382
- print(f"Total checks: {summary['total_checks']}")
383
- print(f"Average score: {summary['average_score']:.1f}/100")
384
- print(f"\nBy status:")
385
- for status_name, count in summary["by_status"].items():
386
- print(f" {status_name}: {count}")
387
-
388
- if summary["by_check"]:
389
- print(f"\nBy check:")
390
- for check_name, check_data in summary["by_check"].items():
391
- print(f" {check_name}:")
392
- print(f" Count: {check_data['count']}")
393
- print(f" Average score: {check_data['average_score']:.1f}/100")
394
- print(f" Latest status: {check_data['latest_status']}")
395
- print(f" Latest score: {check_data['latest_score']:.1f}/100")
396
-
397
- if metrics:
398
- print(f"\nRecent metrics (showing up to 10):")
399
- for metric in metrics[:10]:
400
- print(f" {metric.check_name}: {metric.status} ({metric.score:.1f}/100) - {metric.timestamp}")
401
-
402
-
403
- def handle_health_trends_command(
404
- check_name: str,
405
- days: int = 7,
406
- output_format: str = "text",
407
- project_root: Path | None = None,
408
- ) -> None:
409
- """
410
- Handle health trends command.
411
-
412
- Args:
413
- check_name: Check name to analyze trends for
414
- days: Number of days to analyze
415
- output_format: Output format (json or text)
416
- project_root: Project root directory
417
- """
418
- project_root = project_root or Path.cwd()
419
- collector = HealthMetricsCollector(project_root=project_root)
420
-
421
- # Get trends
422
- feedback = get_feedback()
423
- feedback.format_type = output_format
424
- feedback.start_operation("Health Trends", f"Analyzing health trends for {check_name}")
425
- feedback.running("Loading historical data...", step=1, total_steps=3)
426
-
427
- trends = collector.get_trends(check_name=check_name, days=days)
428
- feedback.running("Calculating trends...", step=2, total_steps=3)
429
- feedback.running("Generating trend report...", step=3, total_steps=3)
430
- feedback.clear_progress()
431
-
432
- if output_format == "json":
433
- output = {
434
- "check_name": check_name,
435
- "days": days,
436
- "trends": trends,
437
- }
438
- feedback.output_result(output, message="Health trends analyzed")
439
- else:
440
- # Text output
441
- feedback.success("Health trends analyzed")
442
- print(f"\nHealth Trends for '{check_name}' (last {days} days)")
443
- print("=" * 70)
444
- print(f"Direction: {trends['direction']}")
445
- print(f"Score change: {trends['score_change']:+.1f} points")
446
-
447
- if trends["status_changes"]:
448
- print(f"\nStatus changes:")
449
- for status, change in trends["status_changes"].items():
450
- if change != 0:
451
- print(f" {status}: {change:+d}")
452
-
453
-
454
- def handle_health_usage_command(args: object) -> None:
455
- """
456
- Handle health usage subcommand (formerly analytics).
457
- Dispatches to dashboard, agents, workflows, trends, or system using AnalyticsDashboard.
458
- """
459
- from ...core.analytics_dashboard import AnalyticsDashboard
460
-
461
- dashboard = AnalyticsDashboard()
462
- sub = getattr(args, "usage_subcommand", "dashboard")
463
- if sub == "show":
464
- sub = "dashboard"
465
- fmt = getattr(args, "format", "text")
466
-
467
- if sub == "dashboard":
468
- data = dashboard.get_dashboard_data()
469
- if fmt == "json":
470
- format_json_output(data)
471
- else:
472
- print("\n" + "=" * 60)
473
- print("Usage / Analytics Dashboard")
474
- print("=" * 60)
475
- print(f"\nSystem Status (as of {data['timestamp']}):")
476
- sys_data = data["system"]
477
- print(f" Total Agents: {sys_data['total_agents']}")
478
- print(f" Active Workflows: {sys_data['active_workflows']}")
479
- print(f" Completed Today: {sys_data['completed_workflows_today']}")
480
- print(f" Failed Today: {sys_data['failed_workflows_today']}")
481
- print(f" Avg Workflow Duration: {sys_data['average_workflow_duration']:.2f}s")
482
- print(f" CPU Usage: {sys_data['cpu_usage']:.1f}%")
483
- print(f" Memory Usage: {sys_data['memory_usage']:.1f}%")
484
- print(f" Disk Usage: {sys_data['disk_usage']:.1f}%")
485
- print("\nAgent Performance (Top 10):")
486
- for agent in sorted(data["agents"], key=lambda x: x["total_executions"], reverse=True)[:10]:
487
- print(f" {agent['agent_name']}: {agent['total_executions']} executions, "
488
- f"{agent['success_rate']*100:.1f}% success, {agent['average_duration']:.2f}s avg")
489
- print("\nWorkflow Performance:")
490
- for wf in sorted(data["workflows"], key=lambda x: x["total_executions"], reverse=True)[:10]:
491
- print(f" {wf['workflow_name']}: {wf['total_executions']} executions, "
492
- f"{wf['success_rate']*100:.1f}% success")
493
- elif sub == "agents":
494
- metrics = dashboard.get_agent_performance(agent_id=getattr(args, "agent_id", None))
495
- if fmt == "json":
496
- format_json_output(metrics)
497
- else:
498
- for agent in metrics:
499
- print(f"{agent['agent_name']}: {agent['total_executions']} executions, "
500
- f"{agent['success_rate']*100:.1f}% success")
501
- elif sub == "workflows":
502
- metrics = dashboard.get_workflow_performance(workflow_id=getattr(args, "workflow_id", None))
503
- if fmt == "json":
504
- format_json_output(metrics)
505
- else:
506
- for wf in metrics:
507
- print(f"{wf['workflow_name']}: {wf['total_executions']} executions, "
508
- f"{wf['success_rate']*100:.1f}% success")
509
- elif sub == "trends":
510
- metric_type = getattr(args, "metric_type", "agent_duration")
511
- days = getattr(args, "days", 30)
512
- trends = dashboard.get_trends(metric_type, days=days)
513
- if fmt == "json":
514
- format_json_output(trends)
515
- else:
516
- for t in trends:
517
- print(f"{t['metric_name']}: {len(t['values'])} data points")
518
- elif sub == "system":
519
- status = dashboard.get_system_status()
520
- if fmt == "json":
521
- format_json_output(status)
522
- else:
523
- print(f"System Status (as of {status['timestamp']}):")
524
- print(f" Total Agents: {status['total_agents']}")
525
- print(f" Active Workflows: {status['active_workflows']}")
526
- print(f" Completed Today: {status['completed_workflows_today']}")
527
- print(f" Failed Today: {status['failed_workflows_today']}")
528
-
529
-
530
- def handle_health_overview_command(
531
- output_format: str = "text",
532
- project_root: Path | None = None,
533
- ) -> None:
534
- """
535
- Single 1000-foot view: health checks + usage rolled up for all subsystems.
536
-
537
- Renders one easy-to-read report: overall health, each health check one line,
538
- then usage at a glance (system, top agents, top workflows).
539
- """
540
- from ...core.analytics_dashboard import AnalyticsDashboard
541
-
542
- project_root = project_root or Path.cwd()
543
-
544
- # 1. Health checks
545
- registry = HealthCheckRegistry()
546
- registry.register(EnvironmentHealthCheck(project_root=project_root))
547
- registry.register(AutomationHealthCheck(project_root=project_root))
548
- registry.register(ExecutionHealthCheck(project_root=project_root))
549
- registry.register(Context7CacheHealthCheck(project_root=project_root))
550
- registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
551
- registry.register(OutcomeHealthCheck(project_root=project_root))
552
- metrics_collector = HealthMetricsCollector(project_root=project_root)
553
- orchestrator = HealthOrchestrator(
554
- registry=registry,
555
- metrics_collector=metrics_collector,
556
- project_root=project_root,
557
- )
558
- health_results = orchestrator.run_all_checks(save_metrics=True)
559
- overall = orchestrator.get_overall_health(health_results)
560
-
561
- # 2. Usage (best-effort; prefer analytics, fallback to execution metrics — HM-001-S1)
562
- _log = logging.getLogger(__name__)
563
- usage_data = None
564
- try:
565
- usage_dashboard = AnalyticsDashboard()
566
- usage_data = usage_dashboard.get_dashboard_data()
567
- except Exception:
568
- pass
569
- # If analytics has no agent/workflow data, derive from execution metrics
570
- fallback_used = False
571
- if usage_data:
572
- agents = usage_data.get("agents") or []
573
- workflows = usage_data.get("workflows") or []
574
- total_runs = sum(a.get("total_executions", 0) for a in agents) + sum(
575
- w.get("total_executions", 0) for w in workflows
576
- )
577
- if total_runs == 0:
578
- fallback = _usage_data_from_execution_metrics(project_root)
579
- if fallback:
580
- fallback_used = True
581
- usage_data = fallback
582
- else:
583
- fallback = _usage_data_from_execution_metrics(project_root)
584
- if fallback:
585
- fallback_used = True
586
- usage_data = fallback
587
- if fallback_used and usage_data:
588
- n_agents = len(usage_data.get("agents") or [])
589
- n_workflows = len(usage_data.get("workflows") or [])
590
- _log.info(
591
- "Health overview: using execution metrics fallback (%s agents, %s workflows)",
592
- n_agents, n_workflows,
593
- )
594
-
595
- # 3. Build output
596
- feedback = get_feedback()
597
- feedback.format_type = output_format
598
-
599
- if output_format == "json":
600
- out = {
601
- "overview": {
602
- "overall_health": overall,
603
- "health_checks": {
604
- name: {
605
- "status": r.status,
606
- "score": r.score,
607
- "message": r.message,
608
- }
609
- for name, r in health_results.items()
610
- if r
611
- },
612
- },
613
- "usage": usage_data,
614
- }
615
- format_json_output(out)
616
- return
617
-
618
- # Text: 1000-foot, great-looking, easy to read
619
- width = 72
620
- lines = []
621
- lines.append("")
622
- lines.append("=" * width)
623
- lines.append(" TAPPS-AGENTS | HEALTH + USAGE | 1000-FOOT VIEW")
624
- lines.append("=" * width)
625
- lines.append("")
626
-
627
- # Overall health
628
- status_sym = {"healthy": "[OK] ", "degraded": "[WARN]", "unhealthy": "[FAIL]", "unknown": "[?] "}
629
- sym = status_sym.get(overall["status"], "[?] ")
630
- lines.append(f" {sym} Overall: {overall['status'].upper()} ({overall['score']:.1f}/100)")
631
- lines.append("")
632
-
633
- # Subsystems (health checks) - one line each
634
- lines.append(" SUBSYSTEMS (health)")
635
- lines.append(" " + "-" * (width - 2))
636
- for name, result in sorted(health_results.items()):
637
- if not result:
638
- continue
639
- s = status_sym.get(result.status, "[?] ")
640
- label = name.replace("_", " ").upper()
641
- lines.append(f" {s} {label}: {result.score:.1f}/100 | {result.message[:50]}{'...' if len(result.message) > 50 else ''}")
642
- lines.append("")
643
-
644
- # Usage at a glance
645
- lines.append(" USAGE (agents & workflows)")
646
- lines.append(" " + "-" * (width - 2))
647
- if usage_data:
648
- sys_data = usage_data.get("system", {})
649
- lines.append(f" Today: completed {sys_data.get('completed_workflows_today', 0)} workflows, failed {sys_data.get('failed_workflows_today', 0)} | active: {sys_data.get('active_workflows', 0)}")
650
- lines.append(f" Avg workflow duration: {sys_data.get('average_workflow_duration', 0):.1f}s | CPU: {sys_data.get('cpu_usage', 0):.0f}% Mem: {sys_data.get('memory_usage', 0):.0f}% Disk: {sys_data.get('disk_usage', 0):.0f}%")
651
- agents = sorted(usage_data.get("agents", []), key=lambda x: x.get("total_executions", 0), reverse=True)[:5]
652
- if agents:
653
- lines.append(" Top agents (30d): " + " | ".join(f"{a.get('agent_name', '')}: {a.get('total_executions', 0)} runs ({a.get('success_rate', 0)*100:.0f}% ok)" for a in agents))
654
- workflows = sorted(usage_data.get("workflows", []), key=lambda x: x.get("total_executions", 0), reverse=True)[:5]
655
- if workflows:
656
- lines.append(" Top workflows (30d): " + " | ".join(f"{w.get('workflow_name', '')}: {w.get('total_executions', 0)} ({w.get('success_rate', 0)*100:.0f}% ok)" for w in workflows))
657
- else:
658
- lines.append(" (No usage data yet. Run agents/workflows to populate.)")
659
- lines.append("")
660
- lines.append("=" * width)
661
- lines.append("")
662
-
663
- feedback.clear_progress()
664
- print("\n".join(lines))
665
-
1
+ """
2
+ Health command handlers.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import json
8
+ import logging
9
+ import sys
10
+ from collections import defaultdict
11
+ from datetime import UTC, datetime, timedelta
12
+ from pathlib import Path
13
+
14
+ from ...health.checks.automation import AutomationHealthCheck
15
+ from ...health.checks.environment import EnvironmentHealthCheck
16
+ from ...health.checks.execution import ExecutionHealthCheck
17
+ from ...health.checks.context7_cache import Context7CacheHealthCheck
18
+ from ...health.checks.knowledge_base import KnowledgeBaseHealthCheck
19
+ from ...health.checks.outcomes import OutcomeHealthCheck
20
+ from ...health.collector import HealthMetricsCollector
21
+ from ...health.dashboard import HealthDashboard
22
+ from ...health.orchestrator import HealthOrchestrator
23
+ from ...health.registry import HealthCheckRegistry
24
+ from ..feedback import get_feedback, ProgressTracker
25
+ from .common import format_json_output
26
+
27
+
28
+ def _usage_data_from_execution_metrics(project_root: Path) -> dict | None:
29
+ """
30
+ Build usage-like data from execution metrics when analytics is empty.
31
+
32
+ Aggregates .tapps-agents/metrics/executions_*.jsonl by today (steps/workflows),
33
+ by skill (agents), and by workflow_id (workflows). Returns same shape as
34
+ AnalyticsDashboard.get_dashboard_data() for system/agents/workflows.
35
+ """
36
+ try:
37
+ from ...workflow.execution_metrics import ExecutionMetricsCollector
38
+
39
+ collector = ExecutionMetricsCollector(project_root=project_root)
40
+ metrics = collector.get_metrics(limit=5000)
41
+ if not metrics:
42
+ return None
43
+
44
+ now = datetime.now(UTC)
45
+ today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
46
+ thirty_days_ago = now - timedelta(days=30)
47
+
48
+ # Filter to last 30 days
49
+ def parse_ts(ts: str) -> datetime:
50
+ return datetime.fromisoformat(ts.replace("Z", "+00:00"))
51
+
52
+ recent = [m for m in metrics if parse_ts(m.started_at) >= thirty_days_ago]
53
+ if not recent:
54
+ return None
55
+
56
+ today_metrics = [m for m in recent if parse_ts(m.started_at) >= today_start]
57
+ workflow_ids_today_success = {
58
+ m.workflow_id for m in today_metrics if m.status == "success"
59
+ }
60
+ workflow_ids_today_failed = {
61
+ m.workflow_id for m in today_metrics if m.status != "success"
62
+ }
63
+ completed_today = len(workflow_ids_today_success)
64
+ failed_today = len(workflow_ids_today_failed)
65
+ avg_duration = (
66
+ sum(m.duration_ms for m in recent) / len(recent) / 1000.0
67
+ if recent
68
+ else 0.0
69
+ )
70
+
71
+ # Agents: by skill or command
72
+ agent_counts: dict[str, list] = defaultdict(list)
73
+ for m in recent:
74
+ key = m.skill or m.command or "unknown"
75
+ agent_counts[key].append(m)
76
+
77
+ agents_list = []
78
+ for name, ms in agent_counts.items():
79
+ total = len(ms)
80
+ success = sum(1 for m in ms if m.status == "success")
81
+ agents_list.append(
82
+ {
83
+ "agent_id": name,
84
+ "agent_name": name,
85
+ "total_executions": total,
86
+ "successful_executions": success,
87
+ "failed_executions": total - success,
88
+ "success_rate": success / total if total else 0.0,
89
+ "average_duration": sum(m.duration_ms for m in ms) / total / 1000.0
90
+ if total
91
+ else 0.0,
92
+ }
93
+ )
94
+
95
+ # Workflows: by workflow_id
96
+ wf_counts: dict[str, list] = defaultdict(list)
97
+ for m in recent:
98
+ wf_counts[m.workflow_id].append(m)
99
+
100
+ workflows_list = []
101
+ for wf_id, ms in wf_counts.items():
102
+ total = len(ms)
103
+ success = sum(1 for m in ms if m.status == "success")
104
+ workflows_list.append(
105
+ {
106
+ "workflow_id": wf_id,
107
+ "workflow_name": wf_id,
108
+ "total_executions": total,
109
+ "successful_executions": success,
110
+ "failed_executions": total - success,
111
+ "success_rate": success / total if total else 0.0,
112
+ "average_duration": sum(m.duration_ms for m in ms) / total / 1000.0
113
+ if total
114
+ else 0.0,
115
+ }
116
+ )
117
+
118
+ # System: try cpu/mem/disk from ResourceMonitor
119
+ cpu_usage = memory_usage = disk_usage = 0.0
120
+ try:
121
+ from ...core.resource_monitor import ResourceMonitor
122
+
123
+ mon = ResourceMonitor()
124
+ res = mon.get_current_metrics()
125
+ cpu_usage = getattr(res, "cpu_percent", 0.0) or 0.0
126
+ memory_usage = getattr(res, "memory_percent", 0.0) or 0.0
127
+ disk_usage = getattr(res, "disk_percent", 0.0) or 0.0
128
+ except Exception:
129
+ pass
130
+
131
+ return {
132
+ "timestamp": now.isoformat(),
133
+ "system": {
134
+ "timestamp": now.isoformat(),
135
+ "total_agents": len(agents_list),
136
+ "active_workflows": 0,
137
+ "completed_workflows_today": completed_today,
138
+ "failed_workflows_today": failed_today,
139
+ "average_workflow_duration": avg_duration,
140
+ "cpu_usage": cpu_usage,
141
+ "memory_usage": memory_usage,
142
+ "disk_usage": disk_usage,
143
+ },
144
+ "agents": agents_list,
145
+ "workflows": workflows_list,
146
+ }
147
+ except Exception:
148
+ return None
149
+
150
+
151
+ def handle_health_check_command(
152
+ check_name: str | None = None,
153
+ output_format: str = "text",
154
+ save: bool = True,
155
+ project_root: Path | None = None,
156
+ ) -> None:
157
+ """
158
+ Handle health check command.
159
+
160
+ Args:
161
+ check_name: Optional specific check to run
162
+ output_format: Output format (json or text)
163
+ save: Whether to save results to metrics storage
164
+ project_root: Project root directory
165
+ """
166
+ project_root = project_root or Path.cwd()
167
+
168
+ # Initialize registry and register all checks
169
+ registry = HealthCheckRegistry()
170
+ registry.register(EnvironmentHealthCheck(project_root=project_root))
171
+ registry.register(AutomationHealthCheck(project_root=project_root))
172
+ registry.register(ExecutionHealthCheck(project_root=project_root))
173
+ registry.register(Context7CacheHealthCheck(project_root=project_root))
174
+ registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
175
+ registry.register(OutcomeHealthCheck(project_root=project_root))
176
+
177
+ # Initialize orchestrator
178
+ metrics_collector = HealthMetricsCollector(project_root=project_root)
179
+ orchestrator = HealthOrchestrator(
180
+ registry=registry, metrics_collector=metrics_collector, project_root=project_root
181
+ )
182
+
183
+ # Run checks
184
+ feedback = get_feedback()
185
+ feedback.format_type = output_format
186
+ operation_desc = f"Running health check: {check_name}" if check_name else "Running all health checks"
187
+ feedback.start_operation("Health Check", operation_desc)
188
+
189
+ if check_name:
190
+ check_names = [check_name]
191
+ feedback.running(f"Initializing check: {check_name}...", step=1, total_steps=3)
192
+ else:
193
+ check_names = None
194
+ feedback.running("Discovering health checks...", step=1, total_steps=3)
195
+
196
+ feedback.running("Executing health checks...", step=2, total_steps=3)
197
+ results = orchestrator.run_all_checks(check_names=check_names, save_metrics=save)
198
+ feedback.running("Collecting results...", step=3, total_steps=3)
199
+ feedback.clear_progress()
200
+
201
+ # Build summary
202
+ summary = {}
203
+ if results:
204
+ healthy_count = sum(1 for r in results.values() if r and r.status == "healthy")
205
+ total_count = len([r for r in results.values() if r])
206
+ summary["checks_run"] = total_count
207
+ summary["healthy"] = healthy_count
208
+ summary["degraded"] = sum(1 for r in results.values() if r and r.status == "degraded")
209
+ summary["unhealthy"] = sum(1 for r in results.values() if r and r.status == "unhealthy")
210
+
211
+ # Format output
212
+ if output_format == "json":
213
+ output = {
214
+ "checks": {
215
+ name: {
216
+ "status": result.status,
217
+ "score": result.score,
218
+ "message": result.message,
219
+ "details": result.details,
220
+ "remediation": (
221
+ result.remediation
222
+ if isinstance(result.remediation, list)
223
+ else [result.remediation]
224
+ if result.remediation
225
+ else None
226
+ ),
227
+ }
228
+ for name, result in results.items()
229
+ if result
230
+ }
231
+ }
232
+ # Merge summary into output
233
+ if summary:
234
+ output = {**output, "summary": summary}
235
+ feedback.output_result(output, message="Health checks completed")
236
+ else:
237
+ # Text output
238
+ feedback.success("Health checks completed")
239
+ warnings = []
240
+ for name, result in sorted(results.items()):
241
+ if not result:
242
+ continue
243
+
244
+ status_symbol = {
245
+ "healthy": "[OK]",
246
+ "degraded": "[WARN]",
247
+ "unhealthy": "[FAIL]",
248
+ }.get(result.status, "[?]")
249
+
250
+ print(f"\n[{status_symbol}] {name.upper()}: {result.status} ({result.score:.1f}/100)")
251
+ print(f" {result.message}")
252
+
253
+ if result.status != "healthy":
254
+ warnings.append(f"{name}: {result.message}")
255
+
256
+ if result.details:
257
+ # Show key metrics
258
+ key_metrics = []
259
+ for key in [
260
+ "total_executions",
261
+ "success_rate",
262
+ "hit_rate",
263
+ "total_files",
264
+ "average_score",
265
+ ]:
266
+ if key in result.details:
267
+ value = result.details[key]
268
+ if isinstance(value, float):
269
+ if key == "success_rate" or key == "hit_rate":
270
+ key_metrics.append(f"{key}: {value:.1f}%")
271
+ else:
272
+ key_metrics.append(f"{key}: {value:.1f}")
273
+ else:
274
+ key_metrics.append(f"{key}: {value}")
275
+
276
+ if key_metrics:
277
+ print(f" Metrics: {' | '.join(key_metrics)}")
278
+
279
+ if result.remediation:
280
+ if isinstance(result.remediation, list):
281
+ if len(result.remediation) > 0:
282
+ print(f" Remediation: {result.remediation[0]}")
283
+ elif isinstance(result.remediation, str):
284
+ print(f" Remediation: {result.remediation}")
285
+
286
+ if warnings:
287
+ for warning_msg in warnings:
288
+ feedback.warning(warning_msg)
289
+
290
+
291
+ def handle_health_dashboard_command(
292
+ output_format: str = "text", project_root: Path | None = None
293
+ ) -> None:
294
+ """
295
+ Handle health dashboard command.
296
+
297
+ Args:
298
+ output_format: Output format (json or text)
299
+ project_root: Project root directory
300
+ """
301
+ project_root = project_root or Path.cwd()
302
+
303
+ # Initialize registry and register all checks
304
+ registry = HealthCheckRegistry()
305
+ registry.register(EnvironmentHealthCheck(project_root=project_root))
306
+ registry.register(AutomationHealthCheck(project_root=project_root))
307
+ registry.register(ExecutionHealthCheck(project_root=project_root))
308
+ registry.register(Context7CacheHealthCheck(project_root=project_root))
309
+ registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
310
+ registry.register(OutcomeHealthCheck(project_root=project_root))
311
+
312
+ # Initialize dashboard
313
+ metrics_collector = HealthMetricsCollector(project_root=project_root)
314
+ orchestrator = HealthOrchestrator(
315
+ registry=registry, metrics_collector=metrics_collector, project_root=project_root
316
+ )
317
+ dashboard = HealthDashboard(orchestrator=orchestrator)
318
+
319
+ # Render dashboard
320
+ feedback = get_feedback()
321
+ feedback.format_type = output_format
322
+ feedback.start_operation("Health Dashboard", "Generating health dashboard visualization")
323
+ feedback.running("Collecting health metrics...", step=1, total_steps=3)
324
+ feedback.running("Generating dashboard...", step=2, total_steps=3)
325
+ feedback.running("Rendering dashboard output...", step=3, total_steps=3)
326
+
327
+ if output_format == "json":
328
+ output = dashboard.render_json()
329
+ feedback.clear_progress()
330
+ feedback.output_result(output, message="Health dashboard generated")
331
+ else:
332
+ output = dashboard.render_text()
333
+ feedback.clear_progress()
334
+ feedback.success("Health dashboard generated")
335
+ print(output)
336
+
337
+
338
+ def handle_health_metrics_command(
339
+ check_name: str | None = None,
340
+ status: str | None = None,
341
+ days: int = 30,
342
+ output_format: str = "text",
343
+ project_root: Path | None = None,
344
+ ) -> None:
345
+ """
346
+ Handle health metrics command.
347
+
348
+ Args:
349
+ check_name: Optional check name to filter
350
+ status: Optional status to filter
351
+ days: Number of days to look back
352
+ output_format: Output format (json or text)
353
+ project_root: Project root directory
354
+ """
355
+ project_root = project_root or Path.cwd()
356
+ collector = HealthMetricsCollector(project_root=project_root)
357
+
358
+ # Get metrics
359
+ feedback = get_feedback()
360
+ feedback.format_type = output_format
361
+ operation_desc = f"Collecting metrics{f' for {check_name}' if check_name else ''}"
362
+ feedback.start_operation("Health Metrics", operation_desc)
363
+ feedback.running("Querying metrics database...", step=1, total_steps=3)
364
+
365
+ metrics = collector.get_metrics(check_name=check_name, status=status, days=days, limit=1000)
366
+ feedback.running("Calculating summary statistics...", step=2, total_steps=3)
367
+ summary = collector.get_summary(days=days)
368
+ feedback.running("Formatting results...", step=3, total_steps=3)
369
+ feedback.clear_progress()
370
+
371
+ if output_format == "json":
372
+ output = {
373
+ "summary": summary,
374
+ "metrics": [m.to_dict() for m in metrics],
375
+ }
376
+ feedback.output_result(output, message="Health metrics retrieved")
377
+ else:
378
+ # Text output
379
+ feedback.success("Health metrics retrieved")
380
+ print(f"\nHealth Metrics Summary (last {days} days)")
381
+ print("=" * 70)
382
+ print(f"Total checks: {summary['total_checks']}")
383
+ print(f"Average score: {summary['average_score']:.1f}/100")
384
+ print(f"\nBy status:")
385
+ for status_name, count in summary["by_status"].items():
386
+ print(f" {status_name}: {count}")
387
+
388
+ if summary["by_check"]:
389
+ print(f"\nBy check:")
390
+ for check_name, check_data in summary["by_check"].items():
391
+ print(f" {check_name}:")
392
+ print(f" Count: {check_data['count']}")
393
+ print(f" Average score: {check_data['average_score']:.1f}/100")
394
+ print(f" Latest status: {check_data['latest_status']}")
395
+ print(f" Latest score: {check_data['latest_score']:.1f}/100")
396
+
397
+ if metrics:
398
+ print(f"\nRecent metrics (showing up to 10):")
399
+ for metric in metrics[:10]:
400
+ print(f" {metric.check_name}: {metric.status} ({metric.score:.1f}/100) - {metric.timestamp}")
401
+
402
+
403
+ def handle_health_trends_command(
404
+ check_name: str,
405
+ days: int = 7,
406
+ output_format: str = "text",
407
+ project_root: Path | None = None,
408
+ ) -> None:
409
+ """
410
+ Handle health trends command.
411
+
412
+ Args:
413
+ check_name: Check name to analyze trends for
414
+ days: Number of days to analyze
415
+ output_format: Output format (json or text)
416
+ project_root: Project root directory
417
+ """
418
+ project_root = project_root or Path.cwd()
419
+ collector = HealthMetricsCollector(project_root=project_root)
420
+
421
+ # Get trends
422
+ feedback = get_feedback()
423
+ feedback.format_type = output_format
424
+ feedback.start_operation("Health Trends", f"Analyzing health trends for {check_name}")
425
+ feedback.running("Loading historical data...", step=1, total_steps=3)
426
+
427
+ trends = collector.get_trends(check_name=check_name, days=days)
428
+ feedback.running("Calculating trends...", step=2, total_steps=3)
429
+ feedback.running("Generating trend report...", step=3, total_steps=3)
430
+ feedback.clear_progress()
431
+
432
+ if output_format == "json":
433
+ output = {
434
+ "check_name": check_name,
435
+ "days": days,
436
+ "trends": trends,
437
+ }
438
+ feedback.output_result(output, message="Health trends analyzed")
439
+ else:
440
+ # Text output
441
+ feedback.success("Health trends analyzed")
442
+ print(f"\nHealth Trends for '{check_name}' (last {days} days)")
443
+ print("=" * 70)
444
+ print(f"Direction: {trends['direction']}")
445
+ print(f"Score change: {trends['score_change']:+.1f} points")
446
+
447
+ if trends["status_changes"]:
448
+ print(f"\nStatus changes:")
449
+ for status, change in trends["status_changes"].items():
450
+ if change != 0:
451
+ print(f" {status}: {change:+d}")
452
+
453
+
454
+ def handle_health_usage_command(args: object) -> None:
455
+ """
456
+ Handle health usage subcommand (formerly analytics).
457
+ Dispatches to dashboard, agents, workflows, trends, or system using AnalyticsDashboard.
458
+ """
459
+ from ...core.analytics_dashboard import AnalyticsDashboard
460
+
461
+ dashboard = AnalyticsDashboard()
462
+ sub = getattr(args, "usage_subcommand", "dashboard")
463
+ if sub == "show":
464
+ sub = "dashboard"
465
+ fmt = getattr(args, "format", "text")
466
+
467
+ if sub == "dashboard":
468
+ data = dashboard.get_dashboard_data()
469
+ if fmt == "json":
470
+ format_json_output(data)
471
+ else:
472
+ print("\n" + "=" * 60)
473
+ print("Usage / Analytics Dashboard")
474
+ print("=" * 60)
475
+ print(f"\nSystem Status (as of {data['timestamp']}):")
476
+ sys_data = data["system"]
477
+ print(f" Total Agents: {sys_data['total_agents']}")
478
+ print(f" Active Workflows: {sys_data['active_workflows']}")
479
+ print(f" Completed Today: {sys_data['completed_workflows_today']}")
480
+ print(f" Failed Today: {sys_data['failed_workflows_today']}")
481
+ print(f" Avg Workflow Duration: {sys_data['average_workflow_duration']:.2f}s")
482
+ print(f" CPU Usage: {sys_data['cpu_usage']:.1f}%")
483
+ print(f" Memory Usage: {sys_data['memory_usage']:.1f}%")
484
+ print(f" Disk Usage: {sys_data['disk_usage']:.1f}%")
485
+ print("\nAgent Performance (Top 10):")
486
+ for agent in sorted(data["agents"], key=lambda x: x["total_executions"], reverse=True)[:10]:
487
+ print(f" {agent['agent_name']}: {agent['total_executions']} executions, "
488
+ f"{agent['success_rate']*100:.1f}% success, {agent['average_duration']:.2f}s avg")
489
+ print("\nWorkflow Performance:")
490
+ for wf in sorted(data["workflows"], key=lambda x: x["total_executions"], reverse=True)[:10]:
491
+ print(f" {wf['workflow_name']}: {wf['total_executions']} executions, "
492
+ f"{wf['success_rate']*100:.1f}% success")
493
+ elif sub == "agents":
494
+ metrics = dashboard.get_agent_performance(agent_id=getattr(args, "agent_id", None))
495
+ if fmt == "json":
496
+ format_json_output(metrics)
497
+ else:
498
+ for agent in metrics:
499
+ print(f"{agent['agent_name']}: {agent['total_executions']} executions, "
500
+ f"{agent['success_rate']*100:.1f}% success")
501
+ elif sub == "workflows":
502
+ metrics = dashboard.get_workflow_performance(workflow_id=getattr(args, "workflow_id", None))
503
+ if fmt == "json":
504
+ format_json_output(metrics)
505
+ else:
506
+ for wf in metrics:
507
+ print(f"{wf['workflow_name']}: {wf['total_executions']} executions, "
508
+ f"{wf['success_rate']*100:.1f}% success")
509
+ elif sub == "trends":
510
+ metric_type = getattr(args, "metric_type", "agent_duration")
511
+ days = getattr(args, "days", 30)
512
+ trends = dashboard.get_trends(metric_type, days=days)
513
+ if fmt == "json":
514
+ format_json_output(trends)
515
+ else:
516
+ for t in trends:
517
+ print(f"{t['metric_name']}: {len(t['values'])} data points")
518
+ elif sub == "system":
519
+ status = dashboard.get_system_status()
520
+ if fmt == "json":
521
+ format_json_output(status)
522
+ else:
523
+ print(f"System Status (as of {status['timestamp']}):")
524
+ print(f" Total Agents: {status['total_agents']}")
525
+ print(f" Active Workflows: {status['active_workflows']}")
526
+ print(f" Completed Today: {status['completed_workflows_today']}")
527
+ print(f" Failed Today: {status['failed_workflows_today']}")
528
+
529
+
530
+ def handle_health_overview_command(
531
+ output_format: str = "text",
532
+ project_root: Path | None = None,
533
+ ) -> None:
534
+ """
535
+ Single 1000-foot view: health checks + usage rolled up for all subsystems.
536
+
537
+ Renders one easy-to-read report: overall health, each health check one line,
538
+ then usage at a glance (system, top agents, top workflows).
539
+ """
540
+ from ...core.analytics_dashboard import AnalyticsDashboard
541
+
542
+ project_root = project_root or Path.cwd()
543
+
544
+ # 1. Health checks
545
+ registry = HealthCheckRegistry()
546
+ registry.register(EnvironmentHealthCheck(project_root=project_root))
547
+ registry.register(AutomationHealthCheck(project_root=project_root))
548
+ registry.register(ExecutionHealthCheck(project_root=project_root))
549
+ registry.register(Context7CacheHealthCheck(project_root=project_root))
550
+ registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
551
+ registry.register(OutcomeHealthCheck(project_root=project_root))
552
+ metrics_collector = HealthMetricsCollector(project_root=project_root)
553
+ orchestrator = HealthOrchestrator(
554
+ registry=registry,
555
+ metrics_collector=metrics_collector,
556
+ project_root=project_root,
557
+ )
558
+ health_results = orchestrator.run_all_checks(save_metrics=True)
559
+ overall = orchestrator.get_overall_health(health_results)
560
+
561
+ # 2. Usage (best-effort; prefer analytics, fallback to execution metrics — HM-001-S1)
562
+ _log = logging.getLogger(__name__)
563
+ usage_data = None
564
+ try:
565
+ usage_dashboard = AnalyticsDashboard()
566
+ usage_data = usage_dashboard.get_dashboard_data()
567
+ except Exception:
568
+ pass
569
+ # If analytics has no agent/workflow data, derive from execution metrics
570
+ fallback_used = False
571
+ if usage_data:
572
+ agents = usage_data.get("agents") or []
573
+ workflows = usage_data.get("workflows") or []
574
+ total_runs = sum(a.get("total_executions", 0) for a in agents) + sum(
575
+ w.get("total_executions", 0) for w in workflows
576
+ )
577
+ if total_runs == 0:
578
+ fallback = _usage_data_from_execution_metrics(project_root)
579
+ if fallback:
580
+ fallback_used = True
581
+ usage_data = fallback
582
+ else:
583
+ fallback = _usage_data_from_execution_metrics(project_root)
584
+ if fallback:
585
+ fallback_used = True
586
+ usage_data = fallback
587
+ if fallback_used and usage_data:
588
+ n_agents = len(usage_data.get("agents") or [])
589
+ n_workflows = len(usage_data.get("workflows") or [])
590
+ _log.info(
591
+ "Health overview: using execution metrics fallback (%s agents, %s workflows)",
592
+ n_agents, n_workflows,
593
+ )
594
+
595
+ # 3. Build output
596
+ feedback = get_feedback()
597
+ feedback.format_type = output_format
598
+
599
+ if output_format == "json":
600
+ out = {
601
+ "overview": {
602
+ "overall_health": overall,
603
+ "health_checks": {
604
+ name: {
605
+ "status": r.status,
606
+ "score": r.score,
607
+ "message": r.message,
608
+ }
609
+ for name, r in health_results.items()
610
+ if r
611
+ },
612
+ },
613
+ "usage": usage_data,
614
+ }
615
+ format_json_output(out)
616
+ return
617
+
618
+ # Text: 1000-foot, great-looking, easy to read
619
+ width = 72
620
+ lines = []
621
+ lines.append("")
622
+ lines.append("=" * width)
623
+ lines.append(" TAPPS-AGENTS | HEALTH + USAGE | 1000-FOOT VIEW")
624
+ lines.append("=" * width)
625
+ lines.append("")
626
+
627
+ # Overall health
628
+ status_sym = {"healthy": "[OK] ", "degraded": "[WARN]", "unhealthy": "[FAIL]", "unknown": "[?] "}
629
+ sym = status_sym.get(overall["status"], "[?] ")
630
+ lines.append(f" {sym} Overall: {overall['status'].upper()} ({overall['score']:.1f}/100)")
631
+ lines.append("")
632
+
633
+ # Subsystems (health checks) - one line each
634
+ lines.append(" SUBSYSTEMS (health)")
635
+ lines.append(" " + "-" * (width - 2))
636
+ for name, result in sorted(health_results.items()):
637
+ if not result:
638
+ continue
639
+ s = status_sym.get(result.status, "[?] ")
640
+ label = name.replace("_", " ").upper()
641
+ lines.append(f" {s} {label}: {result.score:.1f}/100 | {result.message[:50]}{'...' if len(result.message) > 50 else ''}")
642
+ lines.append("")
643
+
644
+ # Usage at a glance
645
+ lines.append(" USAGE (agents & workflows)")
646
+ lines.append(" " + "-" * (width - 2))
647
+ if usage_data:
648
+ sys_data = usage_data.get("system", {})
649
+ lines.append(f" Today: completed {sys_data.get('completed_workflows_today', 0)} workflows, failed {sys_data.get('failed_workflows_today', 0)} | active: {sys_data.get('active_workflows', 0)}")
650
+ lines.append(f" Avg workflow duration: {sys_data.get('average_workflow_duration', 0):.1f}s | CPU: {sys_data.get('cpu_usage', 0):.0f}% Mem: {sys_data.get('memory_usage', 0):.0f}% Disk: {sys_data.get('disk_usage', 0):.0f}%")
651
+ agents = sorted(usage_data.get("agents", []), key=lambda x: x.get("total_executions", 0), reverse=True)[:5]
652
+ if agents:
653
+ lines.append(" Top agents (30d): " + " | ".join(f"{a.get('agent_name', '')}: {a.get('total_executions', 0)} runs ({a.get('success_rate', 0)*100:.0f}% ok)" for a in agents))
654
+ workflows = sorted(usage_data.get("workflows", []), key=lambda x: x.get("total_executions", 0), reverse=True)[:5]
655
+ if workflows:
656
+ lines.append(" Top workflows (30d): " + " | ".join(f"{w.get('workflow_name', '')}: {w.get('total_executions', 0)} ({w.get('success_rate', 0)*100:.0f}% ok)" for w in workflows))
657
+ else:
658
+ lines.append(" (No usage data yet. Run agents/workflows to populate.)")
659
+ lines.append("")
660
+ lines.append("=" * width)
661
+ lines.append("")
662
+
663
+ feedback.clear_progress()
664
+ print("\n".join(lines))
665
+