empathy-framework 5.3.0__py3-none-any.whl → 5.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (458) hide show
  1. empathy_framework-5.4.0.dist-info/METADATA +47 -0
  2. empathy_framework-5.4.0.dist-info/RECORD +8 -0
  3. {empathy_framework-5.3.0.dist-info → empathy_framework-5.4.0.dist-info}/top_level.txt +0 -1
  4. empathy_healthcare_plugin/__init__.py +12 -11
  5. empathy_llm_toolkit/__init__.py +12 -26
  6. empathy_os/__init__.py +12 -356
  7. empathy_software_plugin/__init__.py +12 -11
  8. empathy_framework-5.3.0.dist-info/METADATA +0 -1026
  9. empathy_framework-5.3.0.dist-info/RECORD +0 -456
  10. empathy_framework-5.3.0.dist-info/entry_points.txt +0 -26
  11. empathy_framework-5.3.0.dist-info/licenses/LICENSE +0 -201
  12. empathy_framework-5.3.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -101
  13. empathy_healthcare_plugin/monitors/__init__.py +0 -9
  14. empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +0 -315
  15. empathy_healthcare_plugin/monitors/monitoring/__init__.py +0 -44
  16. empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +0 -300
  17. empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +0 -214
  18. empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +0 -306
  19. empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +0 -389
  20. empathy_healthcare_plugin/protocols/cardiac.json +0 -93
  21. empathy_healthcare_plugin/protocols/post_operative.json +0 -92
  22. empathy_healthcare_plugin/protocols/respiratory.json +0 -92
  23. empathy_healthcare_plugin/protocols/sepsis.json +0 -141
  24. empathy_llm_toolkit/README.md +0 -553
  25. empathy_llm_toolkit/agent_factory/__init__.py +0 -53
  26. empathy_llm_toolkit/agent_factory/adapters/__init__.py +0 -85
  27. empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +0 -312
  28. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +0 -483
  29. empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +0 -298
  30. empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +0 -362
  31. empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +0 -333
  32. empathy_llm_toolkit/agent_factory/adapters/native.py +0 -228
  33. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +0 -423
  34. empathy_llm_toolkit/agent_factory/base.py +0 -305
  35. empathy_llm_toolkit/agent_factory/crews/__init__.py +0 -67
  36. empathy_llm_toolkit/agent_factory/crews/code_review.py +0 -1113
  37. empathy_llm_toolkit/agent_factory/crews/health_check.py +0 -1262
  38. empathy_llm_toolkit/agent_factory/crews/refactoring.py +0 -1128
  39. empathy_llm_toolkit/agent_factory/crews/security_audit.py +0 -1018
  40. empathy_llm_toolkit/agent_factory/decorators.py +0 -287
  41. empathy_llm_toolkit/agent_factory/factory.py +0 -558
  42. empathy_llm_toolkit/agent_factory/framework.py +0 -193
  43. empathy_llm_toolkit/agent_factory/memory_integration.py +0 -328
  44. empathy_llm_toolkit/agent_factory/resilient.py +0 -320
  45. empathy_llm_toolkit/agents_md/__init__.py +0 -22
  46. empathy_llm_toolkit/agents_md/loader.py +0 -218
  47. empathy_llm_toolkit/agents_md/parser.py +0 -271
  48. empathy_llm_toolkit/agents_md/registry.py +0 -307
  49. empathy_llm_toolkit/claude_memory.py +0 -466
  50. empathy_llm_toolkit/cli/__init__.py +0 -8
  51. empathy_llm_toolkit/cli/sync_claude.py +0 -487
  52. empathy_llm_toolkit/code_health.py +0 -1313
  53. empathy_llm_toolkit/commands/__init__.py +0 -51
  54. empathy_llm_toolkit/commands/context.py +0 -375
  55. empathy_llm_toolkit/commands/loader.py +0 -301
  56. empathy_llm_toolkit/commands/models.py +0 -231
  57. empathy_llm_toolkit/commands/parser.py +0 -371
  58. empathy_llm_toolkit/commands/registry.py +0 -429
  59. empathy_llm_toolkit/config/__init__.py +0 -29
  60. empathy_llm_toolkit/config/unified.py +0 -291
  61. empathy_llm_toolkit/context/__init__.py +0 -22
  62. empathy_llm_toolkit/context/compaction.py +0 -455
  63. empathy_llm_toolkit/context/manager.py +0 -434
  64. empathy_llm_toolkit/contextual_patterns.py +0 -361
  65. empathy_llm_toolkit/core.py +0 -907
  66. empathy_llm_toolkit/git_pattern_extractor.py +0 -435
  67. empathy_llm_toolkit/hooks/__init__.py +0 -24
  68. empathy_llm_toolkit/hooks/config.py +0 -306
  69. empathy_llm_toolkit/hooks/executor.py +0 -289
  70. empathy_llm_toolkit/hooks/registry.py +0 -302
  71. empathy_llm_toolkit/hooks/scripts/__init__.py +0 -39
  72. empathy_llm_toolkit/hooks/scripts/evaluate_session.py +0 -201
  73. empathy_llm_toolkit/hooks/scripts/first_time_init.py +0 -285
  74. empathy_llm_toolkit/hooks/scripts/pre_compact.py +0 -207
  75. empathy_llm_toolkit/hooks/scripts/session_end.py +0 -183
  76. empathy_llm_toolkit/hooks/scripts/session_start.py +0 -163
  77. empathy_llm_toolkit/hooks/scripts/suggest_compact.py +0 -225
  78. empathy_llm_toolkit/learning/__init__.py +0 -30
  79. empathy_llm_toolkit/learning/evaluator.py +0 -438
  80. empathy_llm_toolkit/learning/extractor.py +0 -514
  81. empathy_llm_toolkit/learning/storage.py +0 -560
  82. empathy_llm_toolkit/levels.py +0 -227
  83. empathy_llm_toolkit/pattern_confidence.py +0 -414
  84. empathy_llm_toolkit/pattern_resolver.py +0 -272
  85. empathy_llm_toolkit/pattern_summary.py +0 -350
  86. empathy_llm_toolkit/providers.py +0 -967
  87. empathy_llm_toolkit/routing/__init__.py +0 -32
  88. empathy_llm_toolkit/routing/model_router.py +0 -362
  89. empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +0 -413
  90. empathy_llm_toolkit/security/PHASE2_COMPLETE.md +0 -384
  91. empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +0 -271
  92. empathy_llm_toolkit/security/QUICK_REFERENCE.md +0 -316
  93. empathy_llm_toolkit/security/README.md +0 -262
  94. empathy_llm_toolkit/security/__init__.py +0 -62
  95. empathy_llm_toolkit/security/audit_logger.py +0 -929
  96. empathy_llm_toolkit/security/audit_logger_example.py +0 -152
  97. empathy_llm_toolkit/security/pii_scrubber.py +0 -640
  98. empathy_llm_toolkit/security/secrets_detector.py +0 -678
  99. empathy_llm_toolkit/security/secrets_detector_example.py +0 -304
  100. empathy_llm_toolkit/security/secure_memdocs.py +0 -1192
  101. empathy_llm_toolkit/security/secure_memdocs_example.py +0 -278
  102. empathy_llm_toolkit/session_status.py +0 -745
  103. empathy_llm_toolkit/state.py +0 -246
  104. empathy_llm_toolkit/utils/__init__.py +0 -5
  105. empathy_llm_toolkit/utils/tokens.py +0 -349
  106. empathy_os/adaptive/__init__.py +0 -13
  107. empathy_os/adaptive/task_complexity.py +0 -127
  108. empathy_os/agent_monitoring.py +0 -414
  109. empathy_os/cache/__init__.py +0 -117
  110. empathy_os/cache/base.py +0 -166
  111. empathy_os/cache/dependency_manager.py +0 -256
  112. empathy_os/cache/hash_only.py +0 -251
  113. empathy_os/cache/hybrid.py +0 -457
  114. empathy_os/cache/storage.py +0 -285
  115. empathy_os/cache_monitor.py +0 -356
  116. empathy_os/cache_stats.py +0 -298
  117. empathy_os/cli/__init__.py +0 -152
  118. empathy_os/cli/__main__.py +0 -12
  119. empathy_os/cli/commands/__init__.py +0 -1
  120. empathy_os/cli/commands/batch.py +0 -264
  121. empathy_os/cli/commands/cache.py +0 -248
  122. empathy_os/cli/commands/help.py +0 -331
  123. empathy_os/cli/commands/info.py +0 -140
  124. empathy_os/cli/commands/inspect.py +0 -436
  125. empathy_os/cli/commands/inspection.py +0 -57
  126. empathy_os/cli/commands/memory.py +0 -48
  127. empathy_os/cli/commands/metrics.py +0 -92
  128. empathy_os/cli/commands/orchestrate.py +0 -184
  129. empathy_os/cli/commands/patterns.py +0 -207
  130. empathy_os/cli/commands/profiling.py +0 -202
  131. empathy_os/cli/commands/provider.py +0 -98
  132. empathy_os/cli/commands/routing.py +0 -285
  133. empathy_os/cli/commands/setup.py +0 -96
  134. empathy_os/cli/commands/status.py +0 -235
  135. empathy_os/cli/commands/sync.py +0 -166
  136. empathy_os/cli/commands/tier.py +0 -121
  137. empathy_os/cli/commands/utilities.py +0 -114
  138. empathy_os/cli/commands/workflow.py +0 -579
  139. empathy_os/cli/core.py +0 -32
  140. empathy_os/cli/parsers/__init__.py +0 -68
  141. empathy_os/cli/parsers/batch.py +0 -118
  142. empathy_os/cli/parsers/cache.py +0 -65
  143. empathy_os/cli/parsers/help.py +0 -41
  144. empathy_os/cli/parsers/info.py +0 -26
  145. empathy_os/cli/parsers/inspect.py +0 -66
  146. empathy_os/cli/parsers/metrics.py +0 -42
  147. empathy_os/cli/parsers/orchestrate.py +0 -61
  148. empathy_os/cli/parsers/patterns.py +0 -54
  149. empathy_os/cli/parsers/provider.py +0 -40
  150. empathy_os/cli/parsers/routing.py +0 -110
  151. empathy_os/cli/parsers/setup.py +0 -42
  152. empathy_os/cli/parsers/status.py +0 -47
  153. empathy_os/cli/parsers/sync.py +0 -31
  154. empathy_os/cli/parsers/tier.py +0 -33
  155. empathy_os/cli/parsers/workflow.py +0 -77
  156. empathy_os/cli/utils/__init__.py +0 -1
  157. empathy_os/cli/utils/data.py +0 -242
  158. empathy_os/cli/utils/helpers.py +0 -68
  159. empathy_os/cli_legacy.py +0 -3957
  160. empathy_os/cli_minimal.py +0 -1159
  161. empathy_os/cli_router.py +0 -437
  162. empathy_os/cli_unified.py +0 -814
  163. empathy_os/config/__init__.py +0 -66
  164. empathy_os/config/xml_config.py +0 -286
  165. empathy_os/config.py +0 -545
  166. empathy_os/coordination.py +0 -870
  167. empathy_os/core.py +0 -1511
  168. empathy_os/core_modules/__init__.py +0 -15
  169. empathy_os/cost_tracker.py +0 -626
  170. empathy_os/dashboard/__init__.py +0 -41
  171. empathy_os/dashboard/app.py +0 -512
  172. empathy_os/dashboard/simple_server.py +0 -435
  173. empathy_os/dashboard/standalone_server.py +0 -547
  174. empathy_os/discovery.py +0 -306
  175. empathy_os/emergence.py +0 -306
  176. empathy_os/exceptions.py +0 -123
  177. empathy_os/feedback_loops.py +0 -373
  178. empathy_os/hot_reload/README.md +0 -473
  179. empathy_os/hot_reload/__init__.py +0 -62
  180. empathy_os/hot_reload/config.py +0 -83
  181. empathy_os/hot_reload/integration.py +0 -229
  182. empathy_os/hot_reload/reloader.py +0 -298
  183. empathy_os/hot_reload/watcher.py +0 -183
  184. empathy_os/hot_reload/websocket.py +0 -177
  185. empathy_os/levels.py +0 -577
  186. empathy_os/leverage_points.py +0 -441
  187. empathy_os/logging_config.py +0 -261
  188. empathy_os/mcp/__init__.py +0 -10
  189. empathy_os/mcp/server.py +0 -506
  190. empathy_os/memory/__init__.py +0 -237
  191. empathy_os/memory/claude_memory.py +0 -469
  192. empathy_os/memory/config.py +0 -224
  193. empathy_os/memory/control_panel.py +0 -1290
  194. empathy_os/memory/control_panel_support.py +0 -145
  195. empathy_os/memory/cross_session.py +0 -845
  196. empathy_os/memory/edges.py +0 -179
  197. empathy_os/memory/encryption.py +0 -159
  198. empathy_os/memory/file_session.py +0 -770
  199. empathy_os/memory/graph.py +0 -570
  200. empathy_os/memory/long_term.py +0 -913
  201. empathy_os/memory/long_term_types.py +0 -99
  202. empathy_os/memory/mixins/__init__.py +0 -25
  203. empathy_os/memory/mixins/backend_init_mixin.py +0 -249
  204. empathy_os/memory/mixins/capabilities_mixin.py +0 -208
  205. empathy_os/memory/mixins/handoff_mixin.py +0 -208
  206. empathy_os/memory/mixins/lifecycle_mixin.py +0 -49
  207. empathy_os/memory/mixins/long_term_mixin.py +0 -352
  208. empathy_os/memory/mixins/promotion_mixin.py +0 -109
  209. empathy_os/memory/mixins/short_term_mixin.py +0 -182
  210. empathy_os/memory/nodes.py +0 -179
  211. empathy_os/memory/redis_bootstrap.py +0 -540
  212. empathy_os/memory/security/__init__.py +0 -31
  213. empathy_os/memory/security/audit_logger.py +0 -932
  214. empathy_os/memory/security/pii_scrubber.py +0 -640
  215. empathy_os/memory/security/secrets_detector.py +0 -678
  216. empathy_os/memory/short_term.py +0 -2192
  217. empathy_os/memory/simple_storage.py +0 -302
  218. empathy_os/memory/storage/__init__.py +0 -15
  219. empathy_os/memory/storage_backend.py +0 -167
  220. empathy_os/memory/summary_index.py +0 -583
  221. empathy_os/memory/types.py +0 -446
  222. empathy_os/memory/unified.py +0 -182
  223. empathy_os/meta_workflows/__init__.py +0 -74
  224. empathy_os/meta_workflows/agent_creator.py +0 -248
  225. empathy_os/meta_workflows/builtin_templates.py +0 -567
  226. empathy_os/meta_workflows/cli_commands/__init__.py +0 -56
  227. empathy_os/meta_workflows/cli_commands/agent_commands.py +0 -321
  228. empathy_os/meta_workflows/cli_commands/analytics_commands.py +0 -442
  229. empathy_os/meta_workflows/cli_commands/config_commands.py +0 -232
  230. empathy_os/meta_workflows/cli_commands/memory_commands.py +0 -182
  231. empathy_os/meta_workflows/cli_commands/template_commands.py +0 -354
  232. empathy_os/meta_workflows/cli_commands/workflow_commands.py +0 -382
  233. empathy_os/meta_workflows/cli_meta_workflows.py +0 -59
  234. empathy_os/meta_workflows/form_engine.py +0 -292
  235. empathy_os/meta_workflows/intent_detector.py +0 -409
  236. empathy_os/meta_workflows/models.py +0 -569
  237. empathy_os/meta_workflows/pattern_learner.py +0 -738
  238. empathy_os/meta_workflows/plan_generator.py +0 -384
  239. empathy_os/meta_workflows/session_context.py +0 -397
  240. empathy_os/meta_workflows/template_registry.py +0 -229
  241. empathy_os/meta_workflows/workflow.py +0 -984
  242. empathy_os/metrics/__init__.py +0 -12
  243. empathy_os/metrics/collector.py +0 -31
  244. empathy_os/metrics/prompt_metrics.py +0 -194
  245. empathy_os/models/__init__.py +0 -172
  246. empathy_os/models/__main__.py +0 -13
  247. empathy_os/models/adaptive_routing.py +0 -437
  248. empathy_os/models/auth_cli.py +0 -444
  249. empathy_os/models/auth_strategy.py +0 -450
  250. empathy_os/models/cli.py +0 -655
  251. empathy_os/models/empathy_executor.py +0 -354
  252. empathy_os/models/executor.py +0 -257
  253. empathy_os/models/fallback.py +0 -762
  254. empathy_os/models/provider_config.py +0 -282
  255. empathy_os/models/registry.py +0 -472
  256. empathy_os/models/tasks.py +0 -359
  257. empathy_os/models/telemetry/__init__.py +0 -71
  258. empathy_os/models/telemetry/analytics.py +0 -594
  259. empathy_os/models/telemetry/backend.py +0 -196
  260. empathy_os/models/telemetry/data_models.py +0 -431
  261. empathy_os/models/telemetry/storage.py +0 -489
  262. empathy_os/models/token_estimator.py +0 -420
  263. empathy_os/models/validation.py +0 -280
  264. empathy_os/monitoring/__init__.py +0 -52
  265. empathy_os/monitoring/alerts.py +0 -946
  266. empathy_os/monitoring/alerts_cli.py +0 -448
  267. empathy_os/monitoring/multi_backend.py +0 -271
  268. empathy_os/monitoring/otel_backend.py +0 -362
  269. empathy_os/optimization/__init__.py +0 -19
  270. empathy_os/optimization/context_optimizer.py +0 -272
  271. empathy_os/orchestration/__init__.py +0 -67
  272. empathy_os/orchestration/agent_templates.py +0 -707
  273. empathy_os/orchestration/config_store.py +0 -499
  274. empathy_os/orchestration/execution_strategies.py +0 -2111
  275. empathy_os/orchestration/meta_orchestrator.py +0 -1168
  276. empathy_os/orchestration/pattern_learner.py +0 -696
  277. empathy_os/orchestration/real_tools.py +0 -931
  278. empathy_os/pattern_cache.py +0 -187
  279. empathy_os/pattern_library.py +0 -542
  280. empathy_os/patterns/debugging/all_patterns.json +0 -81
  281. empathy_os/patterns/debugging/workflow_20260107_1770825e.json +0 -77
  282. empathy_os/patterns/refactoring_memory.json +0 -89
  283. empathy_os/persistence.py +0 -564
  284. empathy_os/platform_utils.py +0 -265
  285. empathy_os/plugins/__init__.py +0 -28
  286. empathy_os/plugins/base.py +0 -361
  287. empathy_os/plugins/registry.py +0 -268
  288. empathy_os/project_index/__init__.py +0 -32
  289. empathy_os/project_index/cli.py +0 -335
  290. empathy_os/project_index/index.py +0 -667
  291. empathy_os/project_index/models.py +0 -504
  292. empathy_os/project_index/reports.py +0 -474
  293. empathy_os/project_index/scanner.py +0 -777
  294. empathy_os/project_index/scanner_parallel.py +0 -291
  295. empathy_os/prompts/__init__.py +0 -61
  296. empathy_os/prompts/config.py +0 -77
  297. empathy_os/prompts/context.py +0 -177
  298. empathy_os/prompts/parser.py +0 -285
  299. empathy_os/prompts/registry.py +0 -313
  300. empathy_os/prompts/templates.py +0 -208
  301. empathy_os/redis_config.py +0 -302
  302. empathy_os/redis_memory.py +0 -799
  303. empathy_os/resilience/__init__.py +0 -56
  304. empathy_os/resilience/circuit_breaker.py +0 -256
  305. empathy_os/resilience/fallback.py +0 -179
  306. empathy_os/resilience/health.py +0 -300
  307. empathy_os/resilience/retry.py +0 -209
  308. empathy_os/resilience/timeout.py +0 -135
  309. empathy_os/routing/__init__.py +0 -43
  310. empathy_os/routing/chain_executor.py +0 -433
  311. empathy_os/routing/classifier.py +0 -217
  312. empathy_os/routing/smart_router.py +0 -234
  313. empathy_os/routing/workflow_registry.py +0 -343
  314. empathy_os/scaffolding/README.md +0 -589
  315. empathy_os/scaffolding/__init__.py +0 -35
  316. empathy_os/scaffolding/__main__.py +0 -14
  317. empathy_os/scaffolding/cli.py +0 -240
  318. empathy_os/socratic/__init__.py +0 -256
  319. empathy_os/socratic/ab_testing.py +0 -958
  320. empathy_os/socratic/blueprint.py +0 -533
  321. empathy_os/socratic/cli.py +0 -703
  322. empathy_os/socratic/collaboration.py +0 -1114
  323. empathy_os/socratic/domain_templates.py +0 -924
  324. empathy_os/socratic/embeddings.py +0 -738
  325. empathy_os/socratic/engine.py +0 -794
  326. empathy_os/socratic/explainer.py +0 -682
  327. empathy_os/socratic/feedback.py +0 -772
  328. empathy_os/socratic/forms.py +0 -629
  329. empathy_os/socratic/generator.py +0 -732
  330. empathy_os/socratic/llm_analyzer.py +0 -637
  331. empathy_os/socratic/mcp_server.py +0 -702
  332. empathy_os/socratic/session.py +0 -312
  333. empathy_os/socratic/storage.py +0 -667
  334. empathy_os/socratic/success.py +0 -730
  335. empathy_os/socratic/visual_editor.py +0 -860
  336. empathy_os/socratic/web_ui.py +0 -958
  337. empathy_os/telemetry/__init__.py +0 -39
  338. empathy_os/telemetry/agent_coordination.py +0 -475
  339. empathy_os/telemetry/agent_tracking.py +0 -367
  340. empathy_os/telemetry/approval_gates.py +0 -545
  341. empathy_os/telemetry/cli.py +0 -1231
  342. empathy_os/telemetry/commands/__init__.py +0 -14
  343. empathy_os/telemetry/commands/dashboard_commands.py +0 -696
  344. empathy_os/telemetry/event_streaming.py +0 -409
  345. empathy_os/telemetry/feedback_loop.py +0 -567
  346. empathy_os/telemetry/usage_tracker.py +0 -591
  347. empathy_os/templates.py +0 -754
  348. empathy_os/test_generator/__init__.py +0 -38
  349. empathy_os/test_generator/__main__.py +0 -14
  350. empathy_os/test_generator/cli.py +0 -234
  351. empathy_os/test_generator/generator.py +0 -355
  352. empathy_os/test_generator/risk_analyzer.py +0 -216
  353. empathy_os/tier_recommender.py +0 -384
  354. empathy_os/tools.py +0 -183
  355. empathy_os/trust/__init__.py +0 -28
  356. empathy_os/trust/circuit_breaker.py +0 -579
  357. empathy_os/trust_building.py +0 -527
  358. empathy_os/validation/__init__.py +0 -19
  359. empathy_os/validation/xml_validator.py +0 -281
  360. empathy_os/vscode_bridge.py +0 -173
  361. empathy_os/workflow_commands.py +0 -780
  362. empathy_os/workflow_patterns/__init__.py +0 -33
  363. empathy_os/workflow_patterns/behavior.py +0 -249
  364. empathy_os/workflow_patterns/core.py +0 -76
  365. empathy_os/workflow_patterns/output.py +0 -99
  366. empathy_os/workflow_patterns/registry.py +0 -255
  367. empathy_os/workflow_patterns/structural.py +0 -288
  368. empathy_os/workflows/__init__.py +0 -539
  369. empathy_os/workflows/autonomous_test_gen.py +0 -1268
  370. empathy_os/workflows/base.py +0 -2667
  371. empathy_os/workflows/batch_processing.py +0 -342
  372. empathy_os/workflows/bug_predict.py +0 -1084
  373. empathy_os/workflows/builder.py +0 -273
  374. empathy_os/workflows/caching.py +0 -253
  375. empathy_os/workflows/code_review.py +0 -1048
  376. empathy_os/workflows/code_review_adapters.py +0 -312
  377. empathy_os/workflows/code_review_pipeline.py +0 -722
  378. empathy_os/workflows/config.py +0 -645
  379. empathy_os/workflows/dependency_check.py +0 -644
  380. empathy_os/workflows/document_gen/__init__.py +0 -25
  381. empathy_os/workflows/document_gen/config.py +0 -30
  382. empathy_os/workflows/document_gen/report_formatter.py +0 -162
  383. empathy_os/workflows/document_gen/workflow.py +0 -1426
  384. empathy_os/workflows/document_manager.py +0 -216
  385. empathy_os/workflows/document_manager_README.md +0 -134
  386. empathy_os/workflows/documentation_orchestrator.py +0 -1205
  387. empathy_os/workflows/history.py +0 -510
  388. empathy_os/workflows/keyboard_shortcuts/__init__.py +0 -39
  389. empathy_os/workflows/keyboard_shortcuts/generators.py +0 -391
  390. empathy_os/workflows/keyboard_shortcuts/parsers.py +0 -416
  391. empathy_os/workflows/keyboard_shortcuts/prompts.py +0 -295
  392. empathy_os/workflows/keyboard_shortcuts/schema.py +0 -193
  393. empathy_os/workflows/keyboard_shortcuts/workflow.py +0 -509
  394. empathy_os/workflows/llm_base.py +0 -363
  395. empathy_os/workflows/manage_docs.py +0 -87
  396. empathy_os/workflows/manage_docs_README.md +0 -134
  397. empathy_os/workflows/manage_documentation.py +0 -821
  398. empathy_os/workflows/new_sample_workflow1.py +0 -149
  399. empathy_os/workflows/new_sample_workflow1_README.md +0 -150
  400. empathy_os/workflows/orchestrated_health_check.py +0 -849
  401. empathy_os/workflows/orchestrated_release_prep.py +0 -600
  402. empathy_os/workflows/output.py +0 -413
  403. empathy_os/workflows/perf_audit.py +0 -863
  404. empathy_os/workflows/pr_review.py +0 -762
  405. empathy_os/workflows/progress.py +0 -785
  406. empathy_os/workflows/progress_server.py +0 -322
  407. empathy_os/workflows/progressive/README 2.md +0 -454
  408. empathy_os/workflows/progressive/README.md +0 -454
  409. empathy_os/workflows/progressive/__init__.py +0 -82
  410. empathy_os/workflows/progressive/cli.py +0 -219
  411. empathy_os/workflows/progressive/core.py +0 -488
  412. empathy_os/workflows/progressive/orchestrator.py +0 -723
  413. empathy_os/workflows/progressive/reports.py +0 -520
  414. empathy_os/workflows/progressive/telemetry.py +0 -274
  415. empathy_os/workflows/progressive/test_gen.py +0 -495
  416. empathy_os/workflows/progressive/workflow.py +0 -589
  417. empathy_os/workflows/refactor_plan.py +0 -694
  418. empathy_os/workflows/release_prep.py +0 -895
  419. empathy_os/workflows/release_prep_crew.py +0 -969
  420. empathy_os/workflows/research_synthesis.py +0 -404
  421. empathy_os/workflows/routing.py +0 -168
  422. empathy_os/workflows/secure_release.py +0 -593
  423. empathy_os/workflows/security_adapters.py +0 -297
  424. empathy_os/workflows/security_audit.py +0 -1329
  425. empathy_os/workflows/security_audit_phase3.py +0 -355
  426. empathy_os/workflows/seo_optimization.py +0 -633
  427. empathy_os/workflows/step_config.py +0 -234
  428. empathy_os/workflows/telemetry_mixin.py +0 -269
  429. empathy_os/workflows/test5.py +0 -125
  430. empathy_os/workflows/test5_README.md +0 -158
  431. empathy_os/workflows/test_coverage_boost_crew.py +0 -849
  432. empathy_os/workflows/test_gen/__init__.py +0 -52
  433. empathy_os/workflows/test_gen/ast_analyzer.py +0 -249
  434. empathy_os/workflows/test_gen/config.py +0 -88
  435. empathy_os/workflows/test_gen/data_models.py +0 -38
  436. empathy_os/workflows/test_gen/report_formatter.py +0 -289
  437. empathy_os/workflows/test_gen/test_templates.py +0 -381
  438. empathy_os/workflows/test_gen/workflow.py +0 -655
  439. empathy_os/workflows/test_gen.py +0 -54
  440. empathy_os/workflows/test_gen_behavioral.py +0 -477
  441. empathy_os/workflows/test_gen_parallel.py +0 -341
  442. empathy_os/workflows/test_lifecycle.py +0 -526
  443. empathy_os/workflows/test_maintenance.py +0 -627
  444. empathy_os/workflows/test_maintenance_cli.py +0 -590
  445. empathy_os/workflows/test_maintenance_crew.py +0 -840
  446. empathy_os/workflows/test_runner.py +0 -622
  447. empathy_os/workflows/tier_tracking.py +0 -531
  448. empathy_os/workflows/xml_enhanced_crew.py +0 -285
  449. empathy_software_plugin/SOFTWARE_PLUGIN_README.md +0 -57
  450. empathy_software_plugin/cli/__init__.py +0 -120
  451. empathy_software_plugin/cli/inspect.py +0 -362
  452. empathy_software_plugin/cli.py +0 -574
  453. empathy_software_plugin/plugin.py +0 -188
  454. workflow_scaffolding/__init__.py +0 -11
  455. workflow_scaffolding/__main__.py +0 -12
  456. workflow_scaffolding/cli.py +0 -206
  457. workflow_scaffolding/generator.py +0 -265
  458. {empathy_framework-5.3.0.dist-info → empathy_framework-5.4.0.dist-info}/WHEEL +0 -0
@@ -1,1231 +0,0 @@
1
- """CLI commands for telemetry tracking.
2
-
3
- Provides commands to view, analyze, and manage local usage telemetry data.
4
-
5
- Copyright 2025 Smart-AI-Memory
6
- Licensed under Fair Source License 0.9
7
- """
8
-
9
- import csv
10
- import json
11
- import sys
12
- from datetime import datetime
13
- from typing import Any
14
-
15
- try:
16
- from rich.console import Console
17
- from rich.panel import Panel
18
- from rich.table import Table
19
- from rich.text import Text
20
-
21
- RICH_AVAILABLE = True
22
- except ImportError:
23
- RICH_AVAILABLE = False
24
- Console = None # type: ignore
25
-
26
- from empathy_os.config import _validate_file_path
27
-
28
- from .usage_tracker import UsageTracker
29
-
30
- # _validate_file_path is now imported from empathy_os.config
31
- # This eliminates the duplicate definition that previously existed here (lines 30-69)
32
-
33
-
34
- def cmd_telemetry_show(args: Any) -> int:
35
- """Show recent telemetry entries.
36
-
37
- Args:
38
- args: Parsed command-line arguments
39
-
40
- Returns:
41
- Exit code (0 for success)
42
-
43
- """
44
- tracker = UsageTracker.get_instance()
45
- limit = getattr(args, "limit", 20)
46
- days = getattr(args, "days", None)
47
-
48
- entries = tracker.get_recent_entries(limit=limit, days=days)
49
-
50
- if not entries:
51
- print("No telemetry data found.")
52
- print(f"Data location: {tracker.telemetry_dir}")
53
- return 0
54
-
55
- if RICH_AVAILABLE and Console is not None:
56
- console = Console()
57
- table = Table(title="Recent LLM Calls", show_header=True, header_style="bold magenta")
58
- table.add_column("Time", style="cyan", width=19)
59
- table.add_column("Workflow", style="green")
60
- table.add_column("Stage", style="blue")
61
- table.add_column("Tier", style="yellow")
62
- table.add_column("Cost", style="red", justify="right")
63
- table.add_column("Tokens", justify="right")
64
- table.add_column("Cache", style="green")
65
- table.add_column("Duration", justify="right")
66
-
67
- total_cost = 0.0
68
- total_duration = 0
69
-
70
- for entry in entries:
71
- ts = entry.get("ts", "")
72
- # Format timestamp
73
- try:
74
- dt = datetime.fromisoformat(ts.rstrip("Z"))
75
- ts_display = dt.strftime("%Y-%m-%d %H:%M:%S")
76
- except (ValueError, AttributeError):
77
- ts_display = ts[:19] if len(ts) >= 19 else ts
78
-
79
- workflow = entry.get("workflow", "unknown")
80
- stage = entry.get("stage", "-")
81
- tier = entry.get("tier", "unknown")
82
- cost = entry.get("cost", 0.0)
83
- tokens = entry.get("tokens", {})
84
- cache = entry.get("cache", {})
85
- duration_ms = entry.get("duration_ms", 0)
86
-
87
- tokens_str = f"{tokens.get('input', 0)}/{tokens.get('output', 0)}"
88
- cache_str = "HIT" if cache.get("hit") else "MISS"
89
- if cache.get("hit"):
90
- cache_type = cache.get("type", "")
91
- if cache_type:
92
- cache_str += f" ({cache_type})"
93
-
94
- table.add_row(
95
- ts_display,
96
- workflow[:20],
97
- stage[:15] if stage else "-",
98
- tier,
99
- f"${cost:.4f}",
100
- tokens_str,
101
- cache_str,
102
- f"{duration_ms}ms",
103
- )
104
-
105
- total_cost += cost
106
- total_duration += duration_ms
107
-
108
- console.print(table)
109
- console.print()
110
- console.print(f"[bold]Total Cost:[/bold] ${total_cost:.4f}")
111
- console.print(f"[bold]Avg Duration:[/bold] {total_duration // len(entries)}ms")
112
- console.print(f"\n[dim]Data location: {tracker.telemetry_dir}[/dim]")
113
- else:
114
- # Fallback to plain text
115
- print(
116
- f"\n{'Time':<19} {'Workflow':<20} {'Stage':<15} {'Tier':<10} {'Cost':>10} {'Cache':<10} {'Duration':>10}"
117
- )
118
- print("-" * 120)
119
- total_cost = 0.0
120
- for entry in entries:
121
- ts = entry.get("ts", "")[:19]
122
- workflow = entry.get("workflow", "unknown")[:20]
123
- stage = entry.get("stage", "-")[:15]
124
- tier = entry.get("tier", "unknown")
125
- cost = entry.get("cost", 0.0)
126
- cache = entry.get("cache", {})
127
- duration_ms = entry.get("duration_ms", 0)
128
-
129
- cache_str = "HIT" if cache.get("hit") else "MISS"
130
- print(
131
- f"{ts:<19} {workflow:<20} {stage:<15} {tier:<10} ${cost:>9.4f} {cache_str:<10} {duration_ms:>9}ms"
132
- )
133
- total_cost += cost
134
-
135
- print("-" * 120)
136
- print(f"Total Cost: ${total_cost:.4f}")
137
- print(f"\nData location: {tracker.telemetry_dir}")
138
-
139
- return 0
140
-
141
-
142
- def cmd_telemetry_savings(args: Any) -> int:
143
- """Calculate and display cost savings.
144
-
145
- Args:
146
- args: Parsed command-line arguments
147
-
148
- Returns:
149
- Exit code (0 for success)
150
-
151
- """
152
- tracker = UsageTracker.get_instance()
153
- days = getattr(args, "days", 30)
154
-
155
- savings = tracker.calculate_savings(days=days)
156
-
157
- if savings["total_calls"] == 0:
158
- print("No telemetry data found for the specified period.")
159
- return 0
160
-
161
- if RICH_AVAILABLE and Console is not None:
162
- console = Console()
163
-
164
- # Create savings report
165
- title = Text("Cost Savings Analysis", style="bold magenta")
166
- content_lines = []
167
-
168
- content_lines.append(f"Period: Last {days} days")
169
- content_lines.append("")
170
- content_lines.append("Usage Pattern:")
171
- for tier, pct in sorted(savings["tier_distribution"].items()):
172
- content_lines.append(f" {tier:8}: {pct:5.1f}%")
173
- content_lines.append("")
174
- content_lines.append("Cost Comparison:")
175
- content_lines.append(f" Baseline (all PREMIUM): ${savings['baseline_cost']:.2f}")
176
- content_lines.append(f" Actual (tier routing): ${savings['actual_cost']:.2f}")
177
- content_lines.append("")
178
- savings_color = "green" if savings["savings"] > 0 else "red"
179
- content_lines.append(
180
- f"[bold {savings_color}]YOUR SAVINGS: ${savings['savings']:.2f} ({savings['savings_percent']:.1f}%)[/bold {savings_color}]"
181
- )
182
- content_lines.append("")
183
- content_lines.append(f"Cache savings: ${savings['cache_savings']:.2f}")
184
- content_lines.append(f"Total calls: {savings['total_calls']}")
185
-
186
- panel = Panel(
187
- "\n".join(content_lines),
188
- title=title,
189
- border_style="cyan",
190
- )
191
- console.print(panel)
192
- else:
193
- # Fallback to plain text
194
- print("\n" + "=" * 60)
195
- print("COST SAVINGS ANALYSIS")
196
- print("=" * 60)
197
- print(f"Period: Last {days} days\n")
198
- print("Usage Pattern:")
199
- for tier, pct in sorted(savings["tier_distribution"].items()):
200
- print(f" {tier:8}: {pct:5.1f}%")
201
- print("\nCost Comparison:")
202
- print(f" Baseline (all PREMIUM): ${savings['baseline_cost']:.2f}")
203
- print(f" Actual (tier routing): ${savings['actual_cost']:.2f}")
204
- print(f"\nYOUR SAVINGS: ${savings['savings']:.2f} ({savings['savings_percent']:.1f}%)")
205
- print(f"\nCache savings: ${savings['cache_savings']:.2f}")
206
- print(f"Total calls: {savings['total_calls']}")
207
- print("=" * 60)
208
-
209
- return 0
210
-
211
-
212
- def cmd_telemetry_cache_stats(args: Any) -> int:
213
- """Show prompt caching performance statistics.
214
-
215
- Displays cache hit rates, cost savings, and workflow-level stats.
216
-
217
- Args:
218
- args: Parsed command-line arguments
219
-
220
- Returns:
221
- Exit code (0 for success)
222
- """
223
- tracker = UsageTracker.get_instance()
224
- days = getattr(args, "days", 7)
225
-
226
- stats = tracker.get_cache_stats(days=days)
227
-
228
- if stats["total_requests"] == 0:
229
- print("No telemetry data found for cache analysis.")
230
- print(f"Data location: {tracker.telemetry_dir}")
231
- return 0
232
-
233
- if RICH_AVAILABLE and Console is not None:
234
- console = Console()
235
-
236
- # Main stats table
237
- table = Table(
238
- title=f"Prompt Caching Stats (Last {days} Days)",
239
- show_header=True,
240
- header_style="bold magenta",
241
- )
242
- table.add_column("Metric", style="cyan")
243
- table.add_column("Value", style="green", justify="right")
244
-
245
- # Cache hit rate
246
- hit_rate_color = "green" if stats["hit_rate"] > 0.5 else "yellow"
247
- table.add_row(
248
- "Cache Hit Rate",
249
- f"[{hit_rate_color}]{stats['hit_rate']:.1%}[/{hit_rate_color}]",
250
- )
251
-
252
- # Tokens
253
- table.add_row("Cache Reads", f"{stats['total_reads']:,} tokens")
254
- table.add_row("Cache Writes", f"{stats['total_writes']:,} tokens")
255
-
256
- # Cost savings
257
- savings_color = "green" if stats["savings"] > 0 else "dim"
258
- table.add_row(
259
- "Estimated Savings",
260
- f"[bold {savings_color}]${stats['savings']:.2f}[/bold {savings_color}]",
261
- )
262
-
263
- # Requests
264
- table.add_row("Requests with Cache Hits", f"{stats['hit_count']:,}")
265
- table.add_row("Total Requests", f"{stats['total_requests']:,}")
266
-
267
- console.print(table)
268
-
269
- # Per-workflow breakdown
270
- if stats["by_workflow"]:
271
- console.print("\n")
272
- wf_table = Table(
273
- title="Cache Performance by Workflow",
274
- show_header=True,
275
- header_style="bold magenta",
276
- )
277
- wf_table.add_column("Workflow", style="cyan")
278
- wf_table.add_column("Hit Rate", justify="right")
279
- wf_table.add_column("Reads", justify="right")
280
- wf_table.add_column("Writes", justify="right")
281
-
282
- # Sort by hit rate descending
283
- sorted_workflows = sorted(
284
- stats["by_workflow"].items(),
285
- key=lambda x: x[1].get("hit_rate", 0),
286
- reverse=True,
287
- )
288
-
289
- for workflow, wf_stats in sorted_workflows[:10]: # Top 10
290
- hit_rate = wf_stats.get("hit_rate", 0.0)
291
- hit_rate_color = "green" if hit_rate > 0.5 else "yellow"
292
- wf_table.add_row(
293
- workflow,
294
- f"[{hit_rate_color}]{hit_rate:.1%}[/{hit_rate_color}]",
295
- f"{wf_stats['reads']:,}",
296
- f"{wf_stats['writes']:,}",
297
- )
298
-
299
- console.print(wf_table)
300
-
301
- # Recommendations
302
- if stats["hit_rate"] < 0.3:
303
- console.print("\n")
304
- console.print(
305
- Panel(
306
- "[yellow]⚠ Cache hit rate is low (<30%)[/yellow]\n\n"
307
- "Recommendations:\n"
308
- " • Increase reuse of system prompts across requests\n"
309
- " • Group similar requests together (5-min cache TTL)\n"
310
- " • Consider using workflow batching\n"
311
- " • Structure prompts with static content first",
312
- title="Optimization Tips",
313
- border_style="yellow",
314
- )
315
- )
316
- else:
317
- # Fallback to plain text
318
- print("\n" + "=" * 60)
319
- print(f"PROMPT CACHING STATS (LAST {days} DAYS)")
320
- print("=" * 60)
321
- print(f"Cache Hit Rate: {stats['hit_rate']:.1%}")
322
- print(f"Cache Reads: {stats['total_reads']:,} tokens")
323
- print(f"Cache Writes: {stats['total_writes']:,} tokens")
324
- print(f"Estimated Savings: ${stats['savings']:.2f}")
325
- print(f"Requests with Cache Hits: {stats['hit_count']:,}")
326
- print(f"Total Requests: {stats['total_requests']:,}")
327
- print("=" * 60)
328
-
329
- if stats["hit_rate"] < 0.3:
330
- print("\n⚠ Cache hit rate is low (<30%)")
331
- print("Recommendations:")
332
- print(" • Increase reuse of system prompts across requests")
333
- print(" • Group similar requests together (5-min cache TTL)")
334
- print(" • Consider using workflow batching")
335
-
336
- return 0
337
-
338
-
339
- def cmd_telemetry_compare(args: Any) -> int:
340
- """Compare telemetry across two time periods.
341
-
342
- Args:
343
- args: Parsed command-line arguments
344
-
345
- Returns:
346
- Exit code (0 for success)
347
-
348
- """
349
- tracker = UsageTracker.get_instance()
350
- period1_days = getattr(args, "period1", 7)
351
- period2_days = getattr(args, "period2", 30)
352
-
353
- # Get stats for both periods
354
- stats1 = tracker.get_stats(days=period1_days)
355
- stats2 = tracker.get_stats(days=period2_days)
356
-
357
- if stats1["total_calls"] == 0 or stats2["total_calls"] == 0:
358
- print("Insufficient telemetry data for comparison.")
359
- return 0
360
-
361
- if RICH_AVAILABLE and Console is not None:
362
- console = Console()
363
- table = Table(title="Telemetry Comparison", show_header=True, header_style="bold magenta")
364
- table.add_column("Metric", style="cyan")
365
- table.add_column(f"Last {period1_days} days", justify="right", style="green")
366
- table.add_column(f"Last {period2_days} days", justify="right", style="yellow")
367
- table.add_column("Change", justify="right", style="blue")
368
-
369
- # Total calls
370
- calls_change = (
371
- ((stats1["total_calls"] - stats2["total_calls"]) / stats2["total_calls"] * 100)
372
- if stats2["total_calls"] > 0
373
- else 0
374
- )
375
- table.add_row(
376
- "Total Calls",
377
- str(stats1["total_calls"]),
378
- str(stats2["total_calls"]),
379
- f"{calls_change:+.1f}%",
380
- )
381
-
382
- # Total cost
383
- cost_change = (
384
- ((stats1["total_cost"] - stats2["total_cost"]) / stats2["total_cost"] * 100)
385
- if stats2["total_cost"] > 0
386
- else 0
387
- )
388
- table.add_row(
389
- "Total Cost",
390
- f"${stats1['total_cost']:.2f}",
391
- f"${stats2['total_cost']:.2f}",
392
- f"{cost_change:+.1f}%",
393
- )
394
-
395
- # Avg cost per call
396
- avg1 = stats1["total_cost"] / stats1["total_calls"] if stats1["total_calls"] > 0 else 0
397
- avg2 = stats2["total_cost"] / stats2["total_calls"] if stats2["total_calls"] > 0 else 0
398
- avg_change = ((avg1 - avg2) / avg2 * 100) if avg2 > 0 else 0
399
- table.add_row(
400
- "Avg Cost/Call",
401
- f"${avg1:.4f}",
402
- f"${avg2:.4f}",
403
- f"{avg_change:+.1f}%",
404
- )
405
-
406
- # Cache hit rate
407
- cache_change = stats1["cache_hit_rate"] - stats2["cache_hit_rate"]
408
- table.add_row(
409
- "Cache Hit Rate",
410
- f"{stats1['cache_hit_rate']:.1f}%",
411
- f"{stats2['cache_hit_rate']:.1f}%",
412
- f"{cache_change:+.1f}pp",
413
- )
414
-
415
- console.print(table)
416
- else:
417
- # Fallback to plain text
418
- print("\n" + "=" * 80)
419
- print("TELEMETRY COMPARISON")
420
- print("=" * 80)
421
- print(
422
- f"{'Metric':<20} {'Last ' + str(period1_days) + ' days':>20} {'Last ' + str(period2_days) + ' days':>20} {'Change':>15}"
423
- )
424
- print("-" * 80)
425
-
426
- calls_change = (
427
- ((stats1["total_calls"] - stats2["total_calls"]) / stats2["total_calls"] * 100)
428
- if stats2["total_calls"] > 0
429
- else 0
430
- )
431
- print(
432
- f"{'Total Calls':<20} {stats1['total_calls']:>20} {stats2['total_calls']:>20} {calls_change:>14.1f}%"
433
- )
434
-
435
- cost_change = (
436
- ((stats1["total_cost"] - stats2["total_cost"]) / stats2["total_cost"] * 100)
437
- if stats2["total_cost"] > 0
438
- else 0
439
- )
440
- print(
441
- f"{'Total Cost':<20} ${stats1['total_cost']:>19.2f} ${stats2['total_cost']:>19.2f} {cost_change:>14.1f}%"
442
- )
443
-
444
- avg1 = stats1["total_cost"] / stats1["total_calls"] if stats1["total_calls"] > 0 else 0
445
- avg2 = stats2["total_cost"] / stats2["total_calls"] if stats2["total_calls"] > 0 else 0
446
- avg_change = ((avg1 - avg2) / avg2 * 100) if avg2 > 0 else 0
447
- print(f"{'Avg Cost/Call':<20} ${avg1:>19.4f} ${avg2:>19.4f} {avg_change:>14.1f}%")
448
-
449
- cache_change = stats1["cache_hit_rate"] - stats2["cache_hit_rate"]
450
- print(
451
- f"{'Cache Hit Rate':<20} {stats1['cache_hit_rate']:>19.1f}% {stats2['cache_hit_rate']:>19.1f}% {cache_change:>14.1f}pp"
452
- )
453
-
454
- print("=" * 80)
455
-
456
- return 0
457
-
458
-
459
- def cmd_telemetry_reset(args: Any) -> int:
460
- """Reset/clear all telemetry data.
461
-
462
- Args:
463
- args: Parsed command-line arguments
464
-
465
- Returns:
466
- Exit code (0 for success)
467
-
468
- """
469
- tracker = UsageTracker.get_instance()
470
- confirm = getattr(args, "confirm", False)
471
-
472
- if not confirm:
473
- print("WARNING: This will permanently delete all telemetry data.")
474
- print(f"Location: {tracker.telemetry_dir}")
475
- print("\nUse --confirm to proceed.")
476
- return 1
477
-
478
- count = tracker.reset()
479
- print(f"Deleted {count} telemetry entries.")
480
- print("New tracking starts now.")
481
- return 0
482
-
483
-
484
- def cmd_telemetry_export(args: Any) -> int:
485
- """Export telemetry data to JSON or CSV.
486
-
487
- Args:
488
- args: Parsed command-line arguments
489
-
490
- Returns:
491
- Exit code (0 for success)
492
-
493
- """
494
- tracker = UsageTracker.get_instance()
495
- format_type = getattr(args, "format", "json")
496
- output_file = getattr(args, "output", None)
497
- days = getattr(args, "days", None)
498
-
499
- entries = tracker.export_to_dict(days=days)
500
-
501
- if not entries:
502
- print("No telemetry data to export.")
503
- return 0
504
-
505
- if format_type == "json":
506
- # Export as JSON
507
- if output_file:
508
- validated_path = _validate_file_path(output_file)
509
- with open(validated_path, "w", encoding="utf-8") as f:
510
- json.dump(entries, f, indent=2)
511
- print(f"Exported {len(entries)} entries to {validated_path}")
512
- else:
513
- print(json.dumps(entries, indent=2))
514
- elif format_type == "csv":
515
- # Export as CSV
516
- if not entries:
517
- print("No data to export.")
518
- return 0
519
-
520
- # Get all possible fields
521
- fieldnames = [
522
- "ts",
523
- "workflow",
524
- "stage",
525
- "tier",
526
- "model",
527
- "provider",
528
- "cost",
529
- "tokens_input",
530
- "tokens_output",
531
- "cache_hit",
532
- "cache_type",
533
- "duration_ms",
534
- ]
535
-
536
- if output_file:
537
- validated_path = _validate_file_path(output_file)
538
- with open(validated_path, "w", newline="", encoding="utf-8") as f:
539
- writer = csv.DictWriter(f, fieldnames=fieldnames)
540
- writer.writeheader()
541
- for entry in entries:
542
- row = {
543
- "ts": entry.get("ts", ""),
544
- "workflow": entry.get("workflow", ""),
545
- "stage": entry.get("stage", ""),
546
- "tier": entry.get("tier", ""),
547
- "model": entry.get("model", ""),
548
- "provider": entry.get("provider", ""),
549
- "cost": entry.get("cost", 0.0),
550
- "tokens_input": entry.get("tokens", {}).get("input", 0),
551
- "tokens_output": entry.get("tokens", {}).get("output", 0),
552
- "cache_hit": entry.get("cache", {}).get("hit", False),
553
- "cache_type": entry.get("cache", {}).get("type", ""),
554
- "duration_ms": entry.get("duration_ms", 0),
555
- }
556
- writer.writerow(row)
557
- print(f"Exported {len(entries)} entries to {validated_path}")
558
- else:
559
- # Print to stdout
560
- writer = csv.DictWriter(sys.stdout, fieldnames=fieldnames)
561
- writer.writeheader()
562
- for entry in entries:
563
- row = {
564
- "ts": entry.get("ts", ""),
565
- "workflow": entry.get("workflow", ""),
566
- "stage": entry.get("stage", ""),
567
- "tier": entry.get("tier", ""),
568
- "model": entry.get("model", ""),
569
- "provider": entry.get("provider", ""),
570
- "cost": entry.get("cost", 0.0),
571
- "tokens_input": entry.get("tokens", {}).get("input", 0),
572
- "tokens_output": entry.get("tokens", {}).get("output", 0),
573
- "cache_hit": entry.get("cache", {}).get("hit", False),
574
- "cache_type": entry.get("cache", {}).get("type", ""),
575
- "duration_ms": entry.get("duration_ms", 0),
576
- }
577
- writer.writerow(row)
578
- else:
579
- print(f"Unknown format: {format_type}")
580
- print("Supported formats: json, csv")
581
- return 1
582
-
583
- return 0
584
-
585
-
586
- # ==============================================================================
587
- # Dashboard Commands
588
- # ==============================================================================
589
- # cmd_telemetry_dashboard and cmd_file_test_dashboard have been moved to:
590
- # src/empathy_os/telemetry/commands/dashboard_commands.py
591
- # They are imported at the top of this file for backward compatibility.
592
- # ==============================================================================
593
-
594
-
595
- # ==============================================================================
596
- # Tier 1 Automation Monitoring CLI Commands
597
- # ==============================================================================
598
-
599
-
600
-
601
- # ==============================================================================
602
- # Dashboard Commands (Extracted to Separate Module)
603
- # ==============================================================================
604
- # cmd_telemetry_dashboard and cmd_file_test_dashboard moved to:
605
- # src/empathy_os/telemetry/commands/dashboard_commands.py
606
- # Imported at top of file for backward compatibility.
607
- # ==============================================================================
608
-
609
- def cmd_tier1_status(args: Any) -> int:
610
- """Show comprehensive Tier 1 automation status.
611
-
612
- Args:
613
- args: Parsed command-line arguments (hours)
614
-
615
- Returns:
616
- Exit code (0 for success)
617
- """
618
- from datetime import timedelta
619
-
620
- from empathy_os.models.telemetry import TelemetryAnalytics, get_telemetry_store
621
-
622
- try:
623
- store = get_telemetry_store()
624
- analytics = TelemetryAnalytics(store)
625
-
626
- hours = getattr(args, "hours", 24)
627
- since = datetime.utcnow() - timedelta(hours=hours)
628
-
629
- summary = analytics.tier1_summary(since=since)
630
- except Exception as e:
631
- print(f"Error retrieving Tier 1 status: {e}")
632
- return 1
633
-
634
- if RICH_AVAILABLE and Console is not None:
635
- console = Console()
636
-
637
- # Task Routing Panel
638
- routing = summary["task_routing"]
639
- routing_text = Text()
640
- routing_text.append(f"Total Tasks: {routing['total_tasks']}\n")
641
- routing_text.append(f"Success Rate: {routing['accuracy_rate']:.1%}\n", style="green bold")
642
- routing_text.append(f"Avg Confidence: {routing['avg_confidence']:.2f}\n")
643
-
644
- # Test Execution Panel
645
- tests = summary["test_execution"]
646
- tests_text = Text()
647
- tests_text.append(f"Total Runs: {tests['total_executions']}\n")
648
- tests_text.append(f"Success Rate: {tests['success_rate']:.1%}\n", style="green bold")
649
- tests_text.append(f"Avg Duration: {tests['avg_duration_seconds']:.1f}s\n")
650
- tests_text.append(f"Total Failures: {tests['total_failures']}\n")
651
-
652
- # Coverage Panel
653
- coverage = summary["coverage"]
654
- coverage_text = Text()
655
- coverage_text.append(f"Current: {coverage['current_coverage']:.1f}%\n", style="cyan bold")
656
- coverage_text.append(f"Change: {coverage['change']:+.1f}%\n")
657
- coverage_text.append(f"Trend: {coverage['trend']}\n")
658
- coverage_text.append(f"Critical Gaps: {coverage['critical_gaps_count']}\n")
659
-
660
- # Agent Performance Panel
661
- agent = summary["agent_performance"]
662
- agent_text = Text()
663
- agent_text.append(f"Active Agents: {len(agent['by_agent'])}\n")
664
- agent_text.append(f"Automation Rate: {agent['automation_rate']:.1%}\n", style="green bold")
665
- agent_text.append(f"Human Review Rate: {agent['human_review_rate']:.1%}\n")
666
-
667
- # Display all panels
668
- console.print(f"\n[bold]Tier 1 Automation Status[/bold] (last {hours} hours)\n")
669
- console.print(Panel(routing_text, title="Task Routing", border_style="blue"))
670
- console.print(Panel(tests_text, title="Test Execution", border_style="green"))
671
- console.print(Panel(coverage_text, title="Coverage", border_style="cyan"))
672
- console.print(Panel(agent_text, title="Agent Performance", border_style="magenta"))
673
- else:
674
- # Plain text fallback
675
- routing = summary["task_routing"]
676
- tests = summary["test_execution"]
677
- coverage = summary["coverage"]
678
- agent = summary["agent_performance"]
679
-
680
- print(f"\nTier 1 Automation Status (last {hours} hours)")
681
- print("=" * 50)
682
- print("\nTask Routing:")
683
- print(f" Total Tasks: {routing['total_tasks']}")
684
- print(f" Success Rate: {routing['accuracy_rate']:.1%}")
685
- print(f" Avg Confidence: {routing['avg_confidence']:.2f}")
686
-
687
- print("\nTest Execution:")
688
- print(f" Total Runs: {tests['total_executions']}")
689
- print(f" Success Rate: {tests['success_rate']:.1%}")
690
- print(f" Avg Duration: {tests['avg_duration_seconds']:.1f}s")
691
-
692
- print("\nCoverage:")
693
- print(f" Current: {coverage['current_coverage']:.1f}%")
694
- print(f" Trend: {coverage['trend']}")
695
-
696
- print("\nAgent Performance:")
697
- print(f" Active Agents: {len(agent['by_agent'])}")
698
- print(f" Automation Rate: {agent['automation_rate']:.1%}")
699
-
700
- return 0
701
-
702
-
703
- def cmd_task_routing_report(args: Any) -> int:
704
- """Show detailed task routing report.
705
-
706
- Args:
707
- args: Parsed command-line arguments (hours)
708
-
709
- Returns:
710
- Exit code (0 for success)
711
- """
712
- from datetime import timedelta
713
-
714
- from empathy_os.models.telemetry import TelemetryAnalytics, get_telemetry_store
715
-
716
- try:
717
- store = get_telemetry_store()
718
- analytics = TelemetryAnalytics(store)
719
-
720
- hours = getattr(args, "hours", 24)
721
- since = datetime.utcnow() - timedelta(hours=hours)
722
-
723
- stats = analytics.task_routing_accuracy(since=since)
724
- except Exception as e:
725
- print(f"Error retrieving task routing report: {e}")
726
- return 1
727
-
728
- if not stats["total_tasks"]:
729
- print(f"No task routing data found in the last {hours} hours.")
730
- return 0
731
-
732
- if RICH_AVAILABLE and Console is not None:
733
- console = Console()
734
-
735
- # Summary table
736
- table = Table(title=f"Task Routing Report (last {hours} hours)")
737
- table.add_column("Metric", style="cyan")
738
- table.add_column("Value", style="green", justify="right")
739
-
740
- table.add_row("Total Tasks", str(stats["total_tasks"]))
741
- table.add_row("Successful", str(stats["successful_routing"]))
742
- table.add_row("Accuracy Rate", f"{stats['accuracy_rate']:.1%}")
743
- table.add_row("Avg Confidence", f"{stats['avg_confidence']:.2f}")
744
-
745
- console.print(table)
746
-
747
- # By task type table
748
- if stats["by_task_type"]:
749
- type_table = Table(title="Breakdown by Task Type")
750
- type_table.add_column("Task Type", style="cyan")
751
- type_table.add_column("Total", justify="right")
752
- type_table.add_column("Success", justify="right")
753
- type_table.add_column("Rate", justify="right", style="green")
754
-
755
- for task_type, data in stats["by_task_type"].items():
756
- type_table.add_row(
757
- task_type, str(data["total"]), str(data["success"]), f"{data['rate']:.1%}"
758
- )
759
-
760
- console.print(type_table)
761
- else:
762
- # Plain text fallback
763
- print(f"\nTask Routing Report (last {hours} hours)")
764
- print("=" * 50)
765
- print(f"Total Tasks: {stats['total_tasks']}")
766
- print(f"Successful: {stats['successful_routing']}")
767
- print(f"Accuracy Rate: {stats['accuracy_rate']:.1%}")
768
- print(f"Avg Confidence: {stats['avg_confidence']:.2f}")
769
-
770
- if stats["by_task_type"]:
771
- print("\nBy Task Type:")
772
- for task_type, data in stats["by_task_type"].items():
773
- print(f" {task_type}: {data['success']}/{data['total']} ({data['rate']:.1%})")
774
-
775
- return 0
776
-
777
-
778
- def cmd_test_status(args: Any) -> int:
779
- """Show test execution status.
780
-
781
- Args:
782
- args: Parsed command-line arguments (hours)
783
-
784
- Returns:
785
- Exit code (0 for success)
786
- """
787
- from datetime import timedelta
788
-
789
- from empathy_os.models.telemetry import TelemetryAnalytics, get_telemetry_store
790
-
791
- try:
792
- store = get_telemetry_store()
793
- analytics = TelemetryAnalytics(store)
794
-
795
- hours = getattr(args, "hours", 24)
796
- since = datetime.utcnow() - timedelta(hours=hours)
797
-
798
- stats = analytics.test_execution_trends(since=since)
799
- coverage = analytics.coverage_progress(since=since)
800
- except Exception as e:
801
- print(f"Error retrieving test status: {e}")
802
- return 1
803
-
804
- if not stats["total_executions"]:
805
- print(f"No test execution data found in the last {hours} hours.")
806
- return 0
807
-
808
- if RICH_AVAILABLE and Console is not None:
809
- console = Console()
810
-
811
- # Test execution table
812
- table = Table(title=f"Test Execution Status (last {hours} hours)")
813
- table.add_column("Metric", style="cyan")
814
- table.add_column("Value", style="green", justify="right")
815
-
816
- table.add_row("Total Runs", str(stats["total_executions"]))
817
- table.add_row("Success Rate", f"{stats['success_rate']:.1%}")
818
- table.add_row("Avg Duration", f"{stats['avg_duration_seconds']:.1f}s")
819
- table.add_row("Total Tests Run", str(stats["total_tests_run"]))
820
- table.add_row("Total Failures", str(stats["total_failures"]))
821
- table.add_row("Current Coverage", f"{coverage['current_coverage']:.1f}%")
822
- table.add_row("Coverage Trend", coverage["trend"])
823
-
824
- console.print(table)
825
-
826
- # Most failing tests
827
- if stats["most_failing_tests"]:
828
- fail_table = Table(title="Most Frequently Failing Tests")
829
- fail_table.add_column("Test Name", style="cyan")
830
- fail_table.add_column("Failures", justify="right", style="red")
831
-
832
- for test in stats["most_failing_tests"][:10]:
833
- fail_table.add_row(test["name"], str(test["failures"]))
834
-
835
- console.print(fail_table)
836
- else:
837
- # Plain text fallback
838
- print(f"\nTest Execution Status (last {hours} hours)")
839
- print("=" * 50)
840
- print(f"Total Runs: {stats['total_executions']}")
841
- print(f"Success Rate: {stats['success_rate']:.1%}")
842
- print(f"Avg Duration: {stats['avg_duration_seconds']:.1f}s")
843
- print(f"Total Tests Run: {stats['total_tests_run']}")
844
- print(f"Total Failures: {stats['total_failures']}")
845
- print(f"Current Coverage: {coverage['current_coverage']:.1f}%")
846
-
847
- if stats["most_failing_tests"]:
848
- print("\nMost Frequently Failing Tests:")
849
- for test in stats["most_failing_tests"][:10]:
850
- print(f" {test['name']}: {test['failures']} failures")
851
-
852
- return 0
853
-
854
-
855
- def cmd_agent_performance(args: Any) -> int:
856
- """Show agent performance metrics.
857
-
858
- Args:
859
- args: Parsed command-line arguments (hours)
860
-
861
- Returns:
862
- Exit code (0 for success)
863
- """
864
- from datetime import timedelta
865
-
866
- from empathy_os.models.telemetry import TelemetryAnalytics, get_telemetry_store
867
-
868
- try:
869
- store = get_telemetry_store()
870
- analytics = TelemetryAnalytics(store)
871
-
872
- hours = getattr(args, "hours", 168) # Default 7 days for agent performance
873
- since = datetime.utcnow() - timedelta(hours=hours)
874
-
875
- stats = analytics.agent_performance(since=since)
876
- except Exception as e:
877
- print(f"Error retrieving agent performance: {e}")
878
- return 1
879
-
880
- if not stats["by_agent"]:
881
- print(f"No agent assignment data found in the last {hours} hours.")
882
- return 0
883
-
884
- if RICH_AVAILABLE and Console is not None:
885
- console = Console()
886
-
887
- # Agent performance table
888
- table = Table(title=f"Agent Performance (last {hours} hours)")
889
- table.add_column("Agent", style="cyan")
890
- table.add_column("Assignments", justify="right")
891
- table.add_column("Completed", justify="right")
892
- table.add_column("Success Rate", justify="right", style="green")
893
- table.add_column("Avg Duration", justify="right")
894
-
895
- for agent, data in stats["by_agent"].items():
896
- table.add_row(
897
- agent,
898
- str(data["assignments"]),
899
- str(data["completed"]),
900
- f"{data['success_rate']:.1%}",
901
- f"{data['avg_duration_hours']:.2f}h",
902
- )
903
-
904
- console.print(table)
905
-
906
- # Summary panel
907
- summary_text = Text()
908
- summary_text.append(
909
- f"Automation Rate: {stats['automation_rate']:.1%}\n", style="green bold"
910
- )
911
- summary_text.append(f"Human Review Rate: {stats['human_review_rate']:.1%}\n")
912
-
913
- console.print(Panel(summary_text, title="Summary", border_style="blue"))
914
- else:
915
- # Plain text fallback
916
- print(f"\nAgent Performance (last {hours} hours)")
917
- print("=" * 50)
918
-
919
- for agent, data in stats["by_agent"].items():
920
- print(f"\n{agent}:")
921
- print(f" Assignments: {data['assignments']}")
922
- print(f" Completed: {data['completed']}")
923
- print(f" Success Rate: {data['success_rate']:.1%}")
924
- print(f" Avg Duration: {data['avg_duration_hours']:.2f}h")
925
-
926
- print(f"\nAutomation Rate: {stats['automation_rate']:.1%}")
927
- print(f"Human Review Rate: {stats['human_review_rate']:.1%}")
928
-
929
- return 0
930
-
931
-
932
- def cmd_sonnet_opus_analysis(args: Any) -> int:
933
- """Show Sonnet 4.5 → Opus 4.5 fallback analysis and cost savings.
934
-
935
- Args:
936
- args: Parsed command-line arguments (days)
937
-
938
- Returns:
939
- Exit code (0 for success)
940
- """
941
- from datetime import timedelta
942
-
943
- from empathy_os.models.telemetry import TelemetryAnalytics, get_telemetry_store
944
-
945
- store = get_telemetry_store()
946
- analytics = TelemetryAnalytics(store)
947
-
948
- days = getattr(args, "days", 30)
949
- since = datetime.utcnow() - timedelta(days=days)
950
-
951
- stats = analytics.sonnet_opus_fallback_analysis(since=since)
952
-
953
- if stats["total_calls"] == 0:
954
- print(f"No Sonnet/Opus calls found in the last {days} days.")
955
- return 0
956
-
957
- if RICH_AVAILABLE and Console is not None:
958
- console = Console()
959
-
960
- # Fallback Performance Panel
961
- perf_text = Text()
962
- perf_text.append(f"Total Anthropic Calls: {stats['total_calls']}\n")
963
- perf_text.append(f"Sonnet 4.5 Attempts: {stats['sonnet_attempts']}\n")
964
- perf_text.append(
965
- f"Sonnet Success Rate: {stats['success_rate_sonnet']:.1f}%\n",
966
- style="green bold",
967
- )
968
- perf_text.append(f"Opus Fallbacks: {stats['opus_fallbacks']}\n")
969
- perf_text.append(
970
- f"Fallback Rate: {stats['fallback_rate']:.1f}%\n",
971
- style="yellow bold" if stats["fallback_rate"] > 10 else "green",
972
- )
973
-
974
- console.print(
975
- Panel(
976
- perf_text,
977
- title=f"Sonnet 4.5 → Opus 4.5 Fallback Performance (last {days} days)",
978
- border_style="cyan",
979
- )
980
- )
981
-
982
- # Cost Savings Panel
983
- savings_text = Text()
984
- savings_text.append(f"Actual Cost: ${stats['actual_cost']:.2f}\n")
985
- savings_text.append(f"Always-Opus Cost: ${stats['always_opus_cost']:.2f}\n")
986
- savings_text.append(
987
- f"Savings: ${stats['savings']:.2f} ({stats['savings_percent']:.1f}%)\n",
988
- style="green bold",
989
- )
990
- savings_text.append("\n")
991
- savings_text.append(f"Avg Cost/Call (actual): ${stats['avg_cost_per_call']:.4f}\n")
992
- savings_text.append(f"Avg Cost/Call (all Opus): ${stats['avg_opus_cost_per_call']:.4f}\n")
993
-
994
- console.print(Panel(savings_text, title="Cost Savings Analysis", border_style="green"))
995
-
996
- # Recommendation
997
- if stats["fallback_rate"] < 5:
998
- rec_text = Text()
999
- rec_text.append("✅ Excellent Performance!\n", style="green bold")
1000
- rec_text.append(
1001
- f"Sonnet 4.5 handles {100 - stats['fallback_rate']:.1f}% of tasks successfully.\n"
1002
- )
1003
- rec_text.append(
1004
- f"You're saving ${stats['savings']:.2f} compared to always using Opus.\n"
1005
- )
1006
- console.print(Panel(rec_text, title="Recommendation", border_style="green"))
1007
- elif stats["fallback_rate"] < 15:
1008
- rec_text = Text()
1009
- rec_text.append("⚠️ Moderate Fallback Rate\n", style="yellow bold")
1010
- rec_text.append(f"{stats['fallback_rate']:.1f}% of tasks need Opus fallback.\n")
1011
- rec_text.append("Consider analyzing which tasks fail on Sonnet.\n")
1012
- console.print(Panel(rec_text, title="Recommendation", border_style="yellow"))
1013
- else:
1014
- rec_text = Text()
1015
- rec_text.append("❌ High Fallback Rate\n", style="red bold")
1016
- rec_text.append(f"{stats['fallback_rate']:.1f}% of tasks need Opus fallback.\n")
1017
- rec_text.append(
1018
- "Consider using Opus directly for complex tasks to avoid retry overhead.\n"
1019
- )
1020
- console.print(Panel(rec_text, title="Recommendation", border_style="red"))
1021
- else:
1022
- # Plain text fallback
1023
- print(f"\nSonnet 4.5 → Opus 4.5 Fallback Analysis (last {days} days)")
1024
- print("=" * 60)
1025
- print("\nFallback Performance:")
1026
- print(f" Total Anthropic Calls: {stats['total_calls']}")
1027
- print(f" Sonnet 4.5 Attempts: {stats['sonnet_attempts']}")
1028
- print(f" Sonnet Success Rate: {stats['success_rate_sonnet']:.1f}%")
1029
- print(f" Opus Fallbacks: {stats['opus_fallbacks']}")
1030
- print(f" Fallback Rate: {stats['fallback_rate']:.1f}%")
1031
- print("\nCost Savings:")
1032
- print(f" Actual Cost: ${stats['actual_cost']:.2f}")
1033
- print(f" Always-Opus Cost: ${stats['always_opus_cost']:.2f}")
1034
- print(f" Savings: ${stats['savings']:.2f} ({stats['savings_percent']:.1f}%)")
1035
- print(f" Avg Cost/Call (actual): ${stats['avg_cost_per_call']:.4f}")
1036
- print(f" Avg Cost/Call (all Opus): ${stats['avg_opus_cost_per_call']:.4f}")
1037
-
1038
- if stats["fallback_rate"] < 5:
1039
- print(f"\n✅ Excellent! Sonnet handles {100 - stats['fallback_rate']:.1f}% of tasks.")
1040
- elif stats["fallback_rate"] < 15:
1041
- print(f"\n⚠️ Moderate fallback rate ({stats['fallback_rate']:.1f}%).")
1042
- else:
1043
- print(f"\n❌ High fallback rate ({stats['fallback_rate']:.1f}%).")
1044
-
1045
- return 0
1046
-
1047
-
1048
- def cmd_file_test_status(args: Any) -> int:
1049
- """Show per-file test status.
1050
-
1051
- Displays the test status for individual files, including:
1052
- - Last test result (passed/failed/error/no_tests)
1053
- - When tests were last run
1054
- - Whether tests are stale (source modified since last test)
1055
-
1056
- Args:
1057
- args: Parsed command-line arguments
1058
- - file: Optional specific file to check
1059
- - failed: Show only failed tests
1060
- - stale: Show only stale tests
1061
- - limit: Maximum files to show
1062
-
1063
- Returns:
1064
- Exit code (0 for success)
1065
- """
1066
- from empathy_os.models.telemetry import get_telemetry_store
1067
-
1068
- try:
1069
- store = get_telemetry_store()
1070
-
1071
- file_path = getattr(args, "file", None)
1072
- failed_only = getattr(args, "failed", False)
1073
- stale_only = getattr(args, "stale", False)
1074
- limit = getattr(args, "limit", 50)
1075
-
1076
- if file_path:
1077
- # Show status for a specific file
1078
- record = store.get_latest_file_test(file_path)
1079
- if record is None:
1080
- print(f"No test record found for: {file_path}")
1081
- return 0
1082
- records = [record]
1083
- else:
1084
- # Get all file test records
1085
- all_records = store.get_file_tests(limit=100000)
1086
-
1087
- if not all_records:
1088
- print("No per-file test records found.")
1089
- print("Run: empathy test-file <source_file> to track tests for a file.")
1090
- return 0
1091
-
1092
- # Get latest record per file
1093
- latest_by_file: dict[str, Any] = {}
1094
- for record in all_records:
1095
- existing = latest_by_file.get(record.file_path)
1096
- if existing is None or record.timestamp > existing.timestamp:
1097
- latest_by_file[record.file_path] = record
1098
-
1099
- records = list(latest_by_file.values())
1100
-
1101
- # Apply filters
1102
- if failed_only:
1103
- records = [r for r in records if r.last_test_result in ("failed", "error")]
1104
- if stale_only:
1105
- records = [r for r in records if r.is_stale]
1106
-
1107
- # Sort by file path and limit
1108
- records.sort(key=lambda r: r.file_path)
1109
- records = records[:limit]
1110
-
1111
- except Exception as e:
1112
- print(f"Error retrieving file test status: {e}")
1113
- return 1
1114
-
1115
- if not records:
1116
- filter_desc = []
1117
- if failed_only:
1118
- filter_desc.append("failed")
1119
- if stale_only:
1120
- filter_desc.append("stale")
1121
- filter_str = " and ".join(filter_desc) if filter_desc else "matching"
1122
- print(f"No {filter_str} file test records found.")
1123
- return 0
1124
-
1125
- if RICH_AVAILABLE and Console is not None:
1126
- console = Console()
1127
-
1128
- # Summary stats
1129
- total = len(records)
1130
- passed = sum(1 for r in records if r.last_test_result == "passed")
1131
- failed = sum(1 for r in records if r.last_test_result in ("failed", "error"))
1132
- no_tests = sum(1 for r in records if r.last_test_result == "no_tests")
1133
- stale = sum(1 for r in records if r.is_stale)
1134
-
1135
- summary = Text()
1136
- summary.append(f"Files: {total} ", style="bold")
1137
- summary.append(f"Passed: {passed} ", style="green")
1138
- summary.append(f"Failed: {failed} ", style="red")
1139
- summary.append(f"No Tests: {no_tests} ", style="yellow")
1140
- summary.append(f"Stale: {stale}", style="magenta")
1141
- console.print(Panel(summary, title="Per-File Test Status Summary", border_style="cyan"))
1142
-
1143
- # File status table
1144
- table = Table(title="File Test Status")
1145
- table.add_column("File", style="cyan", max_width=50)
1146
- table.add_column("Result", style="bold")
1147
- table.add_column("Tests", justify="right")
1148
- table.add_column("Passed", justify="right", style="green")
1149
- table.add_column("Failed", justify="right", style="red")
1150
- table.add_column("Duration", justify="right")
1151
- table.add_column("Last Run", style="dim")
1152
- table.add_column("Stale", style="magenta")
1153
-
1154
- for record in records:
1155
- # Format result with color
1156
- result = record.last_test_result
1157
- if result == "passed":
1158
- result_style = "green"
1159
- elif result in ("failed", "error"):
1160
- result_style = "red"
1161
- elif result == "no_tests":
1162
- result_style = "yellow"
1163
- else:
1164
- result_style = "dim"
1165
-
1166
- # Format timestamp
1167
- try:
1168
- dt = datetime.fromisoformat(record.timestamp.rstrip("Z"))
1169
- ts_display = dt.strftime("%Y-%m-%d %H:%M")
1170
- except (ValueError, AttributeError):
1171
- ts_display = record.timestamp[:16] if record.timestamp else "-"
1172
-
1173
- # Stale indicator
1174
- stale_str = "YES" if record.is_stale else ""
1175
-
1176
- table.add_row(
1177
- record.file_path,
1178
- Text(result, style=result_style),
1179
- str(record.test_count),
1180
- str(record.passed),
1181
- str(record.failed + record.errors),
1182
- f"{record.duration_seconds:.1f}s" if record.duration_seconds else "-",
1183
- ts_display,
1184
- stale_str,
1185
- )
1186
-
1187
- console.print(table)
1188
-
1189
- # Show failed test details if any
1190
- failed_records = [r for r in records if r.failed_tests]
1191
- if failed_records:
1192
- fail_table = Table(title="Failed Test Details")
1193
- fail_table.add_column("File", style="cyan")
1194
- fail_table.add_column("Test Name", style="red")
1195
- fail_table.add_column("Error")
1196
-
1197
- for record in failed_records[:10]:
1198
- for test in record.failed_tests[:3]:
1199
- fail_table.add_row(
1200
- record.file_path,
1201
- test.get("name", "unknown"),
1202
- test.get("error", "")[:50],
1203
- )
1204
-
1205
- console.print(fail_table)
1206
-
1207
- else:
1208
- # Plain text fallback
1209
- print("\nPer-File Test Status")
1210
- print("=" * 80)
1211
-
1212
- for record in records:
1213
- status = record.last_test_result.upper()
1214
- stale_marker = " [STALE]" if record.is_stale else ""
1215
- print(f"\n{record.file_path}")
1216
- print(f" Status: {status}{stale_marker}")
1217
- print(
1218
- f" Tests: {record.test_count} (passed: {record.passed}, failed: {record.failed})"
1219
- )
1220
- if record.duration_seconds:
1221
- print(f" Duration: {record.duration_seconds:.1f}s")
1222
- print(f" Last Run: {record.timestamp[:19]}")
1223
-
1224
- if record.failed_tests:
1225
- print(" Failed Tests:")
1226
- for test in record.failed_tests[:3]:
1227
- print(f" - {test.get('name', 'unknown')}: {test.get('error', '')[:40]}")
1228
-
1229
- return 0
1230
-
1231
-