empathy-framework 5.2.1__py3-none-any.whl → 5.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (480) hide show
  1. empathy_framework-5.4.0.dist-info/METADATA +47 -0
  2. empathy_framework-5.4.0.dist-info/RECORD +8 -0
  3. {empathy_framework-5.2.1.dist-info → empathy_framework-5.4.0.dist-info}/top_level.txt +0 -1
  4. empathy_healthcare_plugin/__init__.py +12 -11
  5. empathy_llm_toolkit/__init__.py +12 -26
  6. empathy_os/__init__.py +12 -356
  7. empathy_software_plugin/__init__.py +12 -11
  8. empathy_framework-5.2.1.dist-info/METADATA +0 -1002
  9. empathy_framework-5.2.1.dist-info/RECORD +0 -478
  10. empathy_framework-5.2.1.dist-info/entry_points.txt +0 -26
  11. empathy_framework-5.2.1.dist-info/licenses/LICENSE +0 -201
  12. empathy_framework-5.2.1.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -101
  13. empathy_healthcare_plugin/monitors/__init__.py +0 -9
  14. empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +0 -315
  15. empathy_healthcare_plugin/monitors/monitoring/__init__.py +0 -44
  16. empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +0 -300
  17. empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +0 -214
  18. empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +0 -306
  19. empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +0 -389
  20. empathy_healthcare_plugin/protocols/cardiac.json +0 -93
  21. empathy_healthcare_plugin/protocols/post_operative.json +0 -92
  22. empathy_healthcare_plugin/protocols/respiratory.json +0 -92
  23. empathy_healthcare_plugin/protocols/sepsis.json +0 -141
  24. empathy_llm_toolkit/README.md +0 -553
  25. empathy_llm_toolkit/agent_factory/__init__.py +0 -53
  26. empathy_llm_toolkit/agent_factory/adapters/__init__.py +0 -85
  27. empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +0 -312
  28. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +0 -483
  29. empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +0 -298
  30. empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +0 -362
  31. empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +0 -333
  32. empathy_llm_toolkit/agent_factory/adapters/native.py +0 -228
  33. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +0 -423
  34. empathy_llm_toolkit/agent_factory/base.py +0 -305
  35. empathy_llm_toolkit/agent_factory/crews/__init__.py +0 -67
  36. empathy_llm_toolkit/agent_factory/crews/code_review.py +0 -1113
  37. empathy_llm_toolkit/agent_factory/crews/health_check.py +0 -1262
  38. empathy_llm_toolkit/agent_factory/crews/refactoring.py +0 -1128
  39. empathy_llm_toolkit/agent_factory/crews/security_audit.py +0 -1018
  40. empathy_llm_toolkit/agent_factory/decorators.py +0 -287
  41. empathy_llm_toolkit/agent_factory/factory.py +0 -558
  42. empathy_llm_toolkit/agent_factory/framework.py +0 -193
  43. empathy_llm_toolkit/agent_factory/memory_integration.py +0 -328
  44. empathy_llm_toolkit/agent_factory/resilient.py +0 -320
  45. empathy_llm_toolkit/agents_md/__init__.py +0 -22
  46. empathy_llm_toolkit/agents_md/loader.py +0 -218
  47. empathy_llm_toolkit/agents_md/parser.py +0 -271
  48. empathy_llm_toolkit/agents_md/registry.py +0 -307
  49. empathy_llm_toolkit/claude_memory.py +0 -466
  50. empathy_llm_toolkit/cli/__init__.py +0 -8
  51. empathy_llm_toolkit/cli/sync_claude.py +0 -487
  52. empathy_llm_toolkit/code_health.py +0 -1313
  53. empathy_llm_toolkit/commands/__init__.py +0 -51
  54. empathy_llm_toolkit/commands/context.py +0 -375
  55. empathy_llm_toolkit/commands/loader.py +0 -301
  56. empathy_llm_toolkit/commands/models.py +0 -231
  57. empathy_llm_toolkit/commands/parser.py +0 -371
  58. empathy_llm_toolkit/commands/registry.py +0 -429
  59. empathy_llm_toolkit/config/__init__.py +0 -29
  60. empathy_llm_toolkit/config/unified.py +0 -291
  61. empathy_llm_toolkit/context/__init__.py +0 -22
  62. empathy_llm_toolkit/context/compaction.py +0 -455
  63. empathy_llm_toolkit/context/manager.py +0 -434
  64. empathy_llm_toolkit/contextual_patterns.py +0 -361
  65. empathy_llm_toolkit/core.py +0 -907
  66. empathy_llm_toolkit/git_pattern_extractor.py +0 -435
  67. empathy_llm_toolkit/hooks/__init__.py +0 -24
  68. empathy_llm_toolkit/hooks/config.py +0 -306
  69. empathy_llm_toolkit/hooks/executor.py +0 -289
  70. empathy_llm_toolkit/hooks/registry.py +0 -302
  71. empathy_llm_toolkit/hooks/scripts/__init__.py +0 -39
  72. empathy_llm_toolkit/hooks/scripts/evaluate_session.py +0 -201
  73. empathy_llm_toolkit/hooks/scripts/first_time_init.py +0 -285
  74. empathy_llm_toolkit/hooks/scripts/pre_compact.py +0 -207
  75. empathy_llm_toolkit/hooks/scripts/session_end.py +0 -183
  76. empathy_llm_toolkit/hooks/scripts/session_start.py +0 -163
  77. empathy_llm_toolkit/hooks/scripts/suggest_compact.py +0 -225
  78. empathy_llm_toolkit/learning/__init__.py +0 -30
  79. empathy_llm_toolkit/learning/evaluator.py +0 -438
  80. empathy_llm_toolkit/learning/extractor.py +0 -514
  81. empathy_llm_toolkit/learning/storage.py +0 -560
  82. empathy_llm_toolkit/levels.py +0 -227
  83. empathy_llm_toolkit/pattern_confidence.py +0 -414
  84. empathy_llm_toolkit/pattern_resolver.py +0 -272
  85. empathy_llm_toolkit/pattern_summary.py +0 -350
  86. empathy_llm_toolkit/providers.py +0 -967
  87. empathy_llm_toolkit/routing/__init__.py +0 -32
  88. empathy_llm_toolkit/routing/model_router.py +0 -362
  89. empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +0 -413
  90. empathy_llm_toolkit/security/PHASE2_COMPLETE.md +0 -384
  91. empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +0 -271
  92. empathy_llm_toolkit/security/QUICK_REFERENCE.md +0 -316
  93. empathy_llm_toolkit/security/README.md +0 -262
  94. empathy_llm_toolkit/security/__init__.py +0 -62
  95. empathy_llm_toolkit/security/audit_logger.py +0 -929
  96. empathy_llm_toolkit/security/audit_logger_example.py +0 -152
  97. empathy_llm_toolkit/security/pii_scrubber.py +0 -640
  98. empathy_llm_toolkit/security/secrets_detector.py +0 -678
  99. empathy_llm_toolkit/security/secrets_detector_example.py +0 -304
  100. empathy_llm_toolkit/security/secure_memdocs.py +0 -1192
  101. empathy_llm_toolkit/security/secure_memdocs_example.py +0 -278
  102. empathy_llm_toolkit/session_status.py +0 -745
  103. empathy_llm_toolkit/state.py +0 -246
  104. empathy_llm_toolkit/utils/__init__.py +0 -5
  105. empathy_llm_toolkit/utils/tokens.py +0 -349
  106. empathy_os/adaptive/__init__.py +0 -13
  107. empathy_os/adaptive/task_complexity.py +0 -127
  108. empathy_os/agent_monitoring.py +0 -414
  109. empathy_os/cache/__init__.py +0 -117
  110. empathy_os/cache/base.py +0 -166
  111. empathy_os/cache/dependency_manager.py +0 -256
  112. empathy_os/cache/hash_only.py +0 -251
  113. empathy_os/cache/hybrid.py +0 -453
  114. empathy_os/cache/storage.py +0 -285
  115. empathy_os/cache_monitor.py +0 -356
  116. empathy_os/cache_stats.py +0 -298
  117. empathy_os/cli/__init__.py +0 -152
  118. empathy_os/cli/__main__.py +0 -12
  119. empathy_os/cli/commands/__init__.py +0 -1
  120. empathy_os/cli/commands/batch.py +0 -256
  121. empathy_os/cli/commands/cache.py +0 -248
  122. empathy_os/cli/commands/help.py +0 -331
  123. empathy_os/cli/commands/info.py +0 -140
  124. empathy_os/cli/commands/inspect.py +0 -436
  125. empathy_os/cli/commands/inspection.py +0 -57
  126. empathy_os/cli/commands/memory.py +0 -48
  127. empathy_os/cli/commands/metrics.py +0 -92
  128. empathy_os/cli/commands/orchestrate.py +0 -184
  129. empathy_os/cli/commands/patterns.py +0 -207
  130. empathy_os/cli/commands/profiling.py +0 -198
  131. empathy_os/cli/commands/provider.py +0 -98
  132. empathy_os/cli/commands/routing.py +0 -285
  133. empathy_os/cli/commands/setup.py +0 -96
  134. empathy_os/cli/commands/status.py +0 -235
  135. empathy_os/cli/commands/sync.py +0 -166
  136. empathy_os/cli/commands/tier.py +0 -121
  137. empathy_os/cli/commands/utilities.py +0 -114
  138. empathy_os/cli/commands/workflow.py +0 -575
  139. empathy_os/cli/core.py +0 -32
  140. empathy_os/cli/parsers/__init__.py +0 -68
  141. empathy_os/cli/parsers/batch.py +0 -118
  142. empathy_os/cli/parsers/cache 2.py +0 -65
  143. empathy_os/cli/parsers/cache.py +0 -65
  144. empathy_os/cli/parsers/help.py +0 -41
  145. empathy_os/cli/parsers/info.py +0 -26
  146. empathy_os/cli/parsers/inspect.py +0 -66
  147. empathy_os/cli/parsers/metrics.py +0 -42
  148. empathy_os/cli/parsers/orchestrate.py +0 -61
  149. empathy_os/cli/parsers/patterns.py +0 -54
  150. empathy_os/cli/parsers/provider.py +0 -40
  151. empathy_os/cli/parsers/routing.py +0 -110
  152. empathy_os/cli/parsers/setup.py +0 -42
  153. empathy_os/cli/parsers/status.py +0 -47
  154. empathy_os/cli/parsers/sync.py +0 -31
  155. empathy_os/cli/parsers/tier.py +0 -33
  156. empathy_os/cli/parsers/workflow.py +0 -77
  157. empathy_os/cli/utils/__init__.py +0 -1
  158. empathy_os/cli/utils/data.py +0 -242
  159. empathy_os/cli/utils/helpers.py +0 -68
  160. empathy_os/cli_legacy.py +0 -3957
  161. empathy_os/cli_minimal.py +0 -1159
  162. empathy_os/cli_router 2.py +0 -416
  163. empathy_os/cli_router.py +0 -437
  164. empathy_os/cli_unified.py +0 -814
  165. empathy_os/config/__init__.py +0 -66
  166. empathy_os/config/xml_config.py +0 -286
  167. empathy_os/config.py +0 -532
  168. empathy_os/coordination.py +0 -870
  169. empathy_os/core.py +0 -1511
  170. empathy_os/core_modules/__init__.py +0 -15
  171. empathy_os/cost_tracker.py +0 -626
  172. empathy_os/dashboard/__init__.py +0 -41
  173. empathy_os/dashboard/app 2.py +0 -512
  174. empathy_os/dashboard/app.py +0 -512
  175. empathy_os/dashboard/simple_server 2.py +0 -403
  176. empathy_os/dashboard/simple_server.py +0 -403
  177. empathy_os/dashboard/standalone_server 2.py +0 -536
  178. empathy_os/dashboard/standalone_server.py +0 -547
  179. empathy_os/discovery.py +0 -306
  180. empathy_os/emergence.py +0 -306
  181. empathy_os/exceptions.py +0 -123
  182. empathy_os/feedback_loops.py +0 -373
  183. empathy_os/hot_reload/README.md +0 -473
  184. empathy_os/hot_reload/__init__.py +0 -62
  185. empathy_os/hot_reload/config.py +0 -83
  186. empathy_os/hot_reload/integration.py +0 -229
  187. empathy_os/hot_reload/reloader.py +0 -298
  188. empathy_os/hot_reload/watcher.py +0 -183
  189. empathy_os/hot_reload/websocket.py +0 -177
  190. empathy_os/levels.py +0 -577
  191. empathy_os/leverage_points.py +0 -441
  192. empathy_os/logging_config.py +0 -261
  193. empathy_os/mcp/__init__.py +0 -10
  194. empathy_os/mcp/server.py +0 -506
  195. empathy_os/memory/__init__.py +0 -237
  196. empathy_os/memory/claude_memory.py +0 -469
  197. empathy_os/memory/config.py +0 -224
  198. empathy_os/memory/control_panel.py +0 -1290
  199. empathy_os/memory/control_panel_support.py +0 -145
  200. empathy_os/memory/cross_session.py +0 -845
  201. empathy_os/memory/edges.py +0 -179
  202. empathy_os/memory/encryption.py +0 -159
  203. empathy_os/memory/file_session.py +0 -770
  204. empathy_os/memory/graph.py +0 -570
  205. empathy_os/memory/long_term.py +0 -913
  206. empathy_os/memory/long_term_types.py +0 -99
  207. empathy_os/memory/mixins/__init__.py +0 -25
  208. empathy_os/memory/mixins/backend_init_mixin.py +0 -244
  209. empathy_os/memory/mixins/capabilities_mixin.py +0 -199
  210. empathy_os/memory/mixins/handoff_mixin.py +0 -208
  211. empathy_os/memory/mixins/lifecycle_mixin.py +0 -49
  212. empathy_os/memory/mixins/long_term_mixin.py +0 -352
  213. empathy_os/memory/mixins/promotion_mixin.py +0 -109
  214. empathy_os/memory/mixins/short_term_mixin.py +0 -182
  215. empathy_os/memory/nodes.py +0 -179
  216. empathy_os/memory/redis_bootstrap.py +0 -540
  217. empathy_os/memory/security/__init__.py +0 -31
  218. empathy_os/memory/security/audit_logger.py +0 -932
  219. empathy_os/memory/security/pii_scrubber.py +0 -640
  220. empathy_os/memory/security/secrets_detector.py +0 -678
  221. empathy_os/memory/short_term.py +0 -2150
  222. empathy_os/memory/simple_storage.py +0 -302
  223. empathy_os/memory/storage/__init__.py +0 -15
  224. empathy_os/memory/storage_backend.py +0 -167
  225. empathy_os/memory/summary_index.py +0 -583
  226. empathy_os/memory/types.py +0 -441
  227. empathy_os/memory/unified.py +0 -182
  228. empathy_os/meta_workflows/__init__.py +0 -74
  229. empathy_os/meta_workflows/agent_creator.py +0 -248
  230. empathy_os/meta_workflows/builtin_templates.py +0 -567
  231. empathy_os/meta_workflows/cli_commands/__init__.py +0 -56
  232. empathy_os/meta_workflows/cli_commands/agent_commands.py +0 -321
  233. empathy_os/meta_workflows/cli_commands/analytics_commands.py +0 -442
  234. empathy_os/meta_workflows/cli_commands/config_commands.py +0 -232
  235. empathy_os/meta_workflows/cli_commands/memory_commands.py +0 -182
  236. empathy_os/meta_workflows/cli_commands/template_commands.py +0 -354
  237. empathy_os/meta_workflows/cli_commands/workflow_commands.py +0 -382
  238. empathy_os/meta_workflows/cli_meta_workflows.py +0 -59
  239. empathy_os/meta_workflows/form_engine.py +0 -292
  240. empathy_os/meta_workflows/intent_detector.py +0 -409
  241. empathy_os/meta_workflows/models.py +0 -569
  242. empathy_os/meta_workflows/pattern_learner.py +0 -738
  243. empathy_os/meta_workflows/plan_generator.py +0 -384
  244. empathy_os/meta_workflows/session_context.py +0 -397
  245. empathy_os/meta_workflows/template_registry.py +0 -229
  246. empathy_os/meta_workflows/workflow.py +0 -984
  247. empathy_os/metrics/__init__.py +0 -12
  248. empathy_os/metrics/collector.py +0 -31
  249. empathy_os/metrics/prompt_metrics.py +0 -194
  250. empathy_os/models/__init__.py +0 -172
  251. empathy_os/models/__main__.py +0 -13
  252. empathy_os/models/adaptive_routing 2.py +0 -437
  253. empathy_os/models/adaptive_routing.py +0 -437
  254. empathy_os/models/auth_cli.py +0 -444
  255. empathy_os/models/auth_strategy.py +0 -450
  256. empathy_os/models/cli.py +0 -655
  257. empathy_os/models/empathy_executor.py +0 -354
  258. empathy_os/models/executor.py +0 -257
  259. empathy_os/models/fallback.py +0 -762
  260. empathy_os/models/provider_config.py +0 -282
  261. empathy_os/models/registry.py +0 -472
  262. empathy_os/models/tasks.py +0 -359
  263. empathy_os/models/telemetry/__init__.py +0 -71
  264. empathy_os/models/telemetry/analytics.py +0 -594
  265. empathy_os/models/telemetry/backend.py +0 -196
  266. empathy_os/models/telemetry/data_models.py +0 -431
  267. empathy_os/models/telemetry/storage.py +0 -489
  268. empathy_os/models/token_estimator.py +0 -420
  269. empathy_os/models/validation.py +0 -280
  270. empathy_os/monitoring/__init__.py +0 -52
  271. empathy_os/monitoring/alerts.py +0 -946
  272. empathy_os/monitoring/alerts_cli.py +0 -448
  273. empathy_os/monitoring/multi_backend.py +0 -271
  274. empathy_os/monitoring/otel_backend.py +0 -362
  275. empathy_os/optimization/__init__.py +0 -19
  276. empathy_os/optimization/context_optimizer.py +0 -272
  277. empathy_os/orchestration/__init__.py +0 -67
  278. empathy_os/orchestration/agent_templates.py +0 -707
  279. empathy_os/orchestration/config_store.py +0 -499
  280. empathy_os/orchestration/execution_strategies.py +0 -2111
  281. empathy_os/orchestration/meta_orchestrator.py +0 -1168
  282. empathy_os/orchestration/pattern_learner.py +0 -696
  283. empathy_os/orchestration/real_tools.py +0 -931
  284. empathy_os/pattern_cache.py +0 -187
  285. empathy_os/pattern_library.py +0 -542
  286. empathy_os/patterns/debugging/all_patterns.json +0 -81
  287. empathy_os/patterns/debugging/workflow_20260107_1770825e.json +0 -77
  288. empathy_os/patterns/refactoring_memory.json +0 -89
  289. empathy_os/persistence.py +0 -564
  290. empathy_os/platform_utils.py +0 -265
  291. empathy_os/plugins/__init__.py +0 -28
  292. empathy_os/plugins/base.py +0 -361
  293. empathy_os/plugins/registry.py +0 -268
  294. empathy_os/project_index/__init__.py +0 -32
  295. empathy_os/project_index/cli.py +0 -335
  296. empathy_os/project_index/index.py +0 -667
  297. empathy_os/project_index/models.py +0 -504
  298. empathy_os/project_index/reports.py +0 -474
  299. empathy_os/project_index/scanner.py +0 -777
  300. empathy_os/project_index/scanner_parallel 2.py +0 -291
  301. empathy_os/project_index/scanner_parallel.py +0 -291
  302. empathy_os/prompts/__init__.py +0 -61
  303. empathy_os/prompts/config.py +0 -77
  304. empathy_os/prompts/context.py +0 -177
  305. empathy_os/prompts/parser.py +0 -285
  306. empathy_os/prompts/registry.py +0 -313
  307. empathy_os/prompts/templates.py +0 -208
  308. empathy_os/redis_config.py +0 -302
  309. empathy_os/redis_memory.py +0 -799
  310. empathy_os/resilience/__init__.py +0 -56
  311. empathy_os/resilience/circuit_breaker.py +0 -256
  312. empathy_os/resilience/fallback.py +0 -179
  313. empathy_os/resilience/health.py +0 -300
  314. empathy_os/resilience/retry.py +0 -209
  315. empathy_os/resilience/timeout.py +0 -135
  316. empathy_os/routing/__init__.py +0 -43
  317. empathy_os/routing/chain_executor.py +0 -433
  318. empathy_os/routing/classifier.py +0 -217
  319. empathy_os/routing/smart_router.py +0 -234
  320. empathy_os/routing/workflow_registry.py +0 -343
  321. empathy_os/scaffolding/README.md +0 -589
  322. empathy_os/scaffolding/__init__.py +0 -35
  323. empathy_os/scaffolding/__main__.py +0 -14
  324. empathy_os/scaffolding/cli.py +0 -240
  325. empathy_os/socratic/__init__.py +0 -256
  326. empathy_os/socratic/ab_testing.py +0 -958
  327. empathy_os/socratic/blueprint.py +0 -533
  328. empathy_os/socratic/cli.py +0 -703
  329. empathy_os/socratic/collaboration.py +0 -1114
  330. empathy_os/socratic/domain_templates.py +0 -924
  331. empathy_os/socratic/embeddings.py +0 -738
  332. empathy_os/socratic/engine.py +0 -794
  333. empathy_os/socratic/explainer.py +0 -682
  334. empathy_os/socratic/feedback.py +0 -772
  335. empathy_os/socratic/forms.py +0 -629
  336. empathy_os/socratic/generator.py +0 -732
  337. empathy_os/socratic/llm_analyzer.py +0 -637
  338. empathy_os/socratic/mcp_server.py +0 -702
  339. empathy_os/socratic/session.py +0 -312
  340. empathy_os/socratic/storage.py +0 -667
  341. empathy_os/socratic/success.py +0 -730
  342. empathy_os/socratic/visual_editor.py +0 -860
  343. empathy_os/socratic/web_ui.py +0 -958
  344. empathy_os/telemetry/__init__.py +0 -39
  345. empathy_os/telemetry/agent_coordination 2.py +0 -478
  346. empathy_os/telemetry/agent_coordination.py +0 -476
  347. empathy_os/telemetry/agent_tracking 2.py +0 -350
  348. empathy_os/telemetry/agent_tracking.py +0 -348
  349. empathy_os/telemetry/approval_gates 2.py +0 -563
  350. empathy_os/telemetry/approval_gates.py +0 -551
  351. empathy_os/telemetry/cli.py +0 -1231
  352. empathy_os/telemetry/commands/__init__.py +0 -14
  353. empathy_os/telemetry/commands/dashboard_commands.py +0 -696
  354. empathy_os/telemetry/event_streaming 2.py +0 -405
  355. empathy_os/telemetry/event_streaming.py +0 -405
  356. empathy_os/telemetry/feedback_loop 2.py +0 -557
  357. empathy_os/telemetry/feedback_loop.py +0 -554
  358. empathy_os/telemetry/usage_tracker.py +0 -591
  359. empathy_os/templates.py +0 -754
  360. empathy_os/test_generator/__init__.py +0 -38
  361. empathy_os/test_generator/__main__.py +0 -14
  362. empathy_os/test_generator/cli.py +0 -234
  363. empathy_os/test_generator/generator.py +0 -355
  364. empathy_os/test_generator/risk_analyzer.py +0 -216
  365. empathy_os/tier_recommender.py +0 -384
  366. empathy_os/tools.py +0 -183
  367. empathy_os/trust/__init__.py +0 -28
  368. empathy_os/trust/circuit_breaker.py +0 -579
  369. empathy_os/trust_building.py +0 -527
  370. empathy_os/validation/__init__.py +0 -19
  371. empathy_os/validation/xml_validator.py +0 -281
  372. empathy_os/vscode_bridge 2.py +0 -173
  373. empathy_os/vscode_bridge.py +0 -173
  374. empathy_os/workflow_commands.py +0 -780
  375. empathy_os/workflow_patterns/__init__.py +0 -33
  376. empathy_os/workflow_patterns/behavior.py +0 -249
  377. empathy_os/workflow_patterns/core.py +0 -76
  378. empathy_os/workflow_patterns/output.py +0 -99
  379. empathy_os/workflow_patterns/registry.py +0 -255
  380. empathy_os/workflow_patterns/structural.py +0 -288
  381. empathy_os/workflows/__init__.py +0 -539
  382. empathy_os/workflows/autonomous_test_gen.py +0 -1268
  383. empathy_os/workflows/base.py +0 -2667
  384. empathy_os/workflows/batch_processing.py +0 -342
  385. empathy_os/workflows/bug_predict.py +0 -1084
  386. empathy_os/workflows/builder.py +0 -273
  387. empathy_os/workflows/caching.py +0 -253
  388. empathy_os/workflows/code_review.py +0 -1048
  389. empathy_os/workflows/code_review_adapters.py +0 -312
  390. empathy_os/workflows/code_review_pipeline.py +0 -722
  391. empathy_os/workflows/config.py +0 -645
  392. empathy_os/workflows/dependency_check.py +0 -644
  393. empathy_os/workflows/document_gen/__init__.py +0 -25
  394. empathy_os/workflows/document_gen/config.py +0 -30
  395. empathy_os/workflows/document_gen/report_formatter.py +0 -162
  396. empathy_os/workflows/document_gen/workflow.py +0 -1426
  397. empathy_os/workflows/document_gen.py +0 -29
  398. empathy_os/workflows/document_manager.py +0 -216
  399. empathy_os/workflows/document_manager_README.md +0 -134
  400. empathy_os/workflows/documentation_orchestrator.py +0 -1205
  401. empathy_os/workflows/history.py +0 -510
  402. empathy_os/workflows/keyboard_shortcuts/__init__.py +0 -39
  403. empathy_os/workflows/keyboard_shortcuts/generators.py +0 -391
  404. empathy_os/workflows/keyboard_shortcuts/parsers.py +0 -416
  405. empathy_os/workflows/keyboard_shortcuts/prompts.py +0 -295
  406. empathy_os/workflows/keyboard_shortcuts/schema.py +0 -193
  407. empathy_os/workflows/keyboard_shortcuts/workflow.py +0 -509
  408. empathy_os/workflows/llm_base.py +0 -363
  409. empathy_os/workflows/manage_docs.py +0 -87
  410. empathy_os/workflows/manage_docs_README.md +0 -134
  411. empathy_os/workflows/manage_documentation.py +0 -821
  412. empathy_os/workflows/new_sample_workflow1.py +0 -149
  413. empathy_os/workflows/new_sample_workflow1_README.md +0 -150
  414. empathy_os/workflows/orchestrated_health_check.py +0 -849
  415. empathy_os/workflows/orchestrated_release_prep.py +0 -600
  416. empathy_os/workflows/output.py +0 -410
  417. empathy_os/workflows/perf_audit.py +0 -863
  418. empathy_os/workflows/pr_review.py +0 -762
  419. empathy_os/workflows/progress.py +0 -779
  420. empathy_os/workflows/progress_server.py +0 -322
  421. empathy_os/workflows/progressive/README 2.md +0 -454
  422. empathy_os/workflows/progressive/README.md +0 -454
  423. empathy_os/workflows/progressive/__init__ 2.py +0 -92
  424. empathy_os/workflows/progressive/__init__.py +0 -82
  425. empathy_os/workflows/progressive/cli 2.py +0 -242
  426. empathy_os/workflows/progressive/cli.py +0 -219
  427. empathy_os/workflows/progressive/core 2.py +0 -488
  428. empathy_os/workflows/progressive/core.py +0 -488
  429. empathy_os/workflows/progressive/orchestrator 2.py +0 -701
  430. empathy_os/workflows/progressive/orchestrator.py +0 -723
  431. empathy_os/workflows/progressive/reports 2.py +0 -528
  432. empathy_os/workflows/progressive/reports.py +0 -520
  433. empathy_os/workflows/progressive/telemetry 2.py +0 -280
  434. empathy_os/workflows/progressive/telemetry.py +0 -274
  435. empathy_os/workflows/progressive/test_gen 2.py +0 -514
  436. empathy_os/workflows/progressive/test_gen.py +0 -495
  437. empathy_os/workflows/progressive/workflow 2.py +0 -628
  438. empathy_os/workflows/progressive/workflow.py +0 -589
  439. empathy_os/workflows/refactor_plan.py +0 -694
  440. empathy_os/workflows/release_prep.py +0 -895
  441. empathy_os/workflows/release_prep_crew.py +0 -969
  442. empathy_os/workflows/research_synthesis.py +0 -404
  443. empathy_os/workflows/routing.py +0 -168
  444. empathy_os/workflows/secure_release.py +0 -593
  445. empathy_os/workflows/security_adapters.py +0 -297
  446. empathy_os/workflows/security_audit.py +0 -1329
  447. empathy_os/workflows/security_audit_phase3.py +0 -355
  448. empathy_os/workflows/seo_optimization.py +0 -633
  449. empathy_os/workflows/step_config.py +0 -234
  450. empathy_os/workflows/telemetry_mixin.py +0 -269
  451. empathy_os/workflows/test5.py +0 -125
  452. empathy_os/workflows/test5_README.md +0 -158
  453. empathy_os/workflows/test_coverage_boost_crew.py +0 -849
  454. empathy_os/workflows/test_gen/__init__.py +0 -52
  455. empathy_os/workflows/test_gen/ast_analyzer.py +0 -249
  456. empathy_os/workflows/test_gen/config.py +0 -88
  457. empathy_os/workflows/test_gen/data_models.py +0 -38
  458. empathy_os/workflows/test_gen/report_formatter.py +0 -289
  459. empathy_os/workflows/test_gen/test_templates.py +0 -381
  460. empathy_os/workflows/test_gen/workflow.py +0 -655
  461. empathy_os/workflows/test_gen.py +0 -54
  462. empathy_os/workflows/test_gen_behavioral.py +0 -477
  463. empathy_os/workflows/test_gen_parallel.py +0 -341
  464. empathy_os/workflows/test_lifecycle.py +0 -526
  465. empathy_os/workflows/test_maintenance.py +0 -627
  466. empathy_os/workflows/test_maintenance_cli.py +0 -590
  467. empathy_os/workflows/test_maintenance_crew.py +0 -840
  468. empathy_os/workflows/test_runner.py +0 -622
  469. empathy_os/workflows/tier_tracking.py +0 -531
  470. empathy_os/workflows/xml_enhanced_crew.py +0 -285
  471. empathy_software_plugin/SOFTWARE_PLUGIN_README.md +0 -57
  472. empathy_software_plugin/cli/__init__.py +0 -120
  473. empathy_software_plugin/cli/inspect.py +0 -362
  474. empathy_software_plugin/cli.py +0 -574
  475. empathy_software_plugin/plugin.py +0 -188
  476. workflow_scaffolding/__init__.py +0 -11
  477. workflow_scaffolding/__main__.py +0 -12
  478. workflow_scaffolding/cli.py +0 -206
  479. workflow_scaffolding/generator.py +0 -265
  480. {empathy_framework-5.2.1.dist-info → empathy_framework-5.4.0.dist-info}/WHEEL +0 -0
@@ -1,1205 +0,0 @@
1
- """Documentation Orchestrator - Combined Scout + Writer Workflow
2
-
3
- Combines ManageDocumentationCrew (scout/analyst) with DocumentGenerationWorkflow
4
- (writer) to provide an end-to-end documentation management solution:
5
-
6
- 1. SCOUT Phase: ManageDocumentationCrew scans for stale docs and gaps
7
- 2. PRIORITIZE Phase: Filters and ranks items by severity and impact
8
- 3. GENERATE Phase: DocumentGenerationWorkflow creates/updates documentation
9
- 4. UPDATE Phase: ProjectIndex is updated with new documentation status
10
-
11
- This orchestrator provides intelligent documentation maintenance:
12
- - Detects when source code changes make docs stale
13
- - Identifies undocumented files by priority (LOC, complexity)
14
- - Generates documentation using cost-optimized 3-stage pipeline
15
- - Tracks all costs and provides detailed reporting
16
-
17
- Copyright 2025 Smart-AI-Memory
18
- Licensed under Fair Source License 0.9
19
- """
20
-
21
- import asyncio
22
- import logging
23
- from dataclasses import dataclass, field
24
- from datetime import datetime
25
- from pathlib import Path
26
- from typing import Any
27
-
28
- logger = logging.getLogger(__name__)
29
-
30
- # Import scout workflow
31
- ManageDocumentationCrew = None
32
- ManageDocumentationCrewResult = None
33
- HAS_SCOUT = False
34
-
35
- try:
36
- from .manage_documentation import ManageDocumentationCrew as _ManageDocumentationCrew
37
- from .manage_documentation import (
38
- ManageDocumentationCrewResult as _ManageDocumentationCrewResult,
39
- )
40
-
41
- ManageDocumentationCrew = _ManageDocumentationCrew
42
- ManageDocumentationCrewResult = _ManageDocumentationCrewResult
43
- HAS_SCOUT = True
44
- except ImportError:
45
- pass
46
-
47
- # Import writer workflow
48
- DocumentGenerationWorkflow = None
49
- HAS_WRITER = False
50
-
51
- try:
52
- from .document_gen import DocumentGenerationWorkflow as _DocumentGenerationWorkflow
53
-
54
- DocumentGenerationWorkflow = _DocumentGenerationWorkflow
55
- HAS_WRITER = True
56
- except ImportError:
57
- pass
58
-
59
- # Import ProjectIndex for tracking
60
- ProjectIndex = None
61
- HAS_PROJECT_INDEX = False
62
-
63
- try:
64
- from empathy_os.project_index import ProjectIndex as _ProjectIndex
65
-
66
- ProjectIndex = _ProjectIndex
67
- HAS_PROJECT_INDEX = True
68
- except ImportError:
69
- pass
70
-
71
-
72
- @dataclass
73
- class DocumentationItem:
74
- """A single item that needs documentation work."""
75
-
76
- file_path: str
77
- issue_type: str # "missing_docstring" | "stale_doc" | "no_documentation"
78
- severity: str # "high" | "medium" | "low"
79
- priority: int # 1-5, lower is higher priority
80
- details: str = ""
81
- related_source: list[str] = field(default_factory=list)
82
- days_stale: int = 0
83
- loc: int = 0
84
-
85
-
86
- @dataclass
87
- class OrchestratorResult:
88
- """Result from DocumentationOrchestrator execution."""
89
-
90
- success: bool
91
- phase: str # "scout" | "prioritize" | "generate" | "complete"
92
-
93
- # Scout phase results
94
- items_found: int = 0
95
- stale_docs: int = 0
96
- missing_docs: int = 0
97
-
98
- # Generation phase results
99
- items_processed: int = 0
100
- docs_generated: list[str] = field(default_factory=list)
101
- docs_updated: list[str] = field(default_factory=list)
102
- docs_skipped: list[str] = field(default_factory=list)
103
-
104
- # Cost tracking
105
- scout_cost: float = 0.0
106
- generation_cost: float = 0.0
107
- total_cost: float = 0.0
108
-
109
- # Timing
110
- duration_ms: int = 0
111
-
112
- # Details
113
- errors: list[str] = field(default_factory=list)
114
- warnings: list[str] = field(default_factory=list)
115
- summary: str = ""
116
-
117
- def to_dict(self) -> dict:
118
- return {
119
- "success": self.success,
120
- "phase": self.phase,
121
- "items_found": self.items_found,
122
- "stale_docs": self.stale_docs,
123
- "missing_docs": self.missing_docs,
124
- "items_processed": self.items_processed,
125
- "docs_generated": self.docs_generated,
126
- "docs_updated": self.docs_updated,
127
- "docs_skipped": self.docs_skipped,
128
- "scout_cost": self.scout_cost,
129
- "generation_cost": self.generation_cost,
130
- "total_cost": self.total_cost,
131
- "duration_ms": self.duration_ms,
132
- "errors": self.errors,
133
- "warnings": self.warnings,
134
- "summary": self.summary,
135
- }
136
-
137
-
138
- class DocumentationOrchestrator:
139
- """End-to-end documentation management orchestrator.
140
-
141
- Combines the ManageDocumentationCrew (scout) with DocumentGenerationWorkflow
142
- (writer) to provide intelligent, automated documentation maintenance.
143
-
144
- Phases:
145
- 1. SCOUT: Analyze codebase for documentation gaps and staleness
146
- 2. PRIORITIZE: Rank items by severity, LOC, and business impact
147
- 3. GENERATE: Create/update documentation for priority items
148
- 4. UPDATE: Update ProjectIndex with new documentation status
149
-
150
- Usage:
151
- orchestrator = DocumentationOrchestrator(
152
- project_root=".",
153
- max_items=5, # Process top 5 priority items
154
- max_cost=2.0, # Stop at $2 total cost
155
- auto_approve=False, # Require approval before generation
156
- )
157
- result = await orchestrator.execute()
158
- """
159
-
160
- name = "documentation-orchestrator"
161
- description = "End-to-end documentation management: scout gaps, prioritize, generate docs"
162
-
163
- # Patterns to exclude from SCANNING - things we don't want to analyze for documentation gaps
164
- # Note: The ALLOWED_OUTPUT_EXTENSIONS whitelist is the primary safety mechanism for writes
165
- DEFAULT_EXCLUDE_PATTERNS = [
166
- # Generated/build directories (would bloat results)
167
- "site/**",
168
- "dist/**",
169
- "build/**",
170
- "out/**",
171
- "node_modules/**",
172
- "__pycache__/**",
173
- ".git/**",
174
- "*.egg-info/**",
175
- # Framework internal/working directories
176
- ".empathy/**",
177
- ".empathy_index/**",
178
- ".claude/**",
179
- # Book/large doc source folders
180
- "book/**",
181
- "docs/book/**",
182
- "docs/generated/**",
183
- "docs/word/**",
184
- "docs/pdf/**",
185
- # Dependency/config files (not source code - don't need documentation)
186
- "requirements*.txt",
187
- "package.json",
188
- "package-lock.json",
189
- "yarn.lock",
190
- "Pipfile",
191
- "Pipfile.lock",
192
- "poetry.lock",
193
- "pyproject.toml",
194
- "setup.py",
195
- "setup.cfg",
196
- "*.toml",
197
- "*.cfg",
198
- "*.ini",
199
- "*.env",
200
- ".env*",
201
- "Makefile",
202
- "Dockerfile",
203
- "docker-compose*.yml",
204
- "*.yaml",
205
- "*.yml",
206
- # Binary files (cannot be documented as code)
207
- "*.png",
208
- "*.jpg",
209
- "*.jpeg",
210
- "*.gif",
211
- "*.ico",
212
- "*.svg",
213
- "*.pdf",
214
- "*.woff",
215
- "*.woff2",
216
- "*.ttf",
217
- "*.eot",
218
- "*.pyc",
219
- "*.pyo",
220
- "*.so",
221
- "*.dll",
222
- "*.exe",
223
- "*.zip",
224
- "*.tar",
225
- "*.gz",
226
- "*.vsix",
227
- "*.docx",
228
- "*.doc",
229
- ]
230
-
231
- # ALLOWED file extensions for OUTPUT - documentation can ONLY create/modify these types
232
- # This is the PRIMARY safety mechanism - even if scanning includes wrong files,
233
- # only markdown documentation files can ever be written
234
- ALLOWED_OUTPUT_EXTENSIONS = [
235
- ".md", # Markdown documentation
236
- ".mdx", # MDX (Markdown with JSX)
237
- ".rst", # reStructuredText
238
- ]
239
-
240
- def __init__(
241
- self,
242
- project_root: str = ".",
243
- max_items: int = 5,
244
- max_cost: float = 5.0,
245
- auto_approve: bool = False,
246
- export_path: str | Path | None = None,
247
- include_stale: bool = True,
248
- include_missing: bool = True,
249
- min_severity: str = "low", # "high" | "medium" | "low"
250
- doc_type: str = "api_reference",
251
- audience: str = "developers",
252
- dry_run: bool = False,
253
- exclude_patterns: list[str] | None = None,
254
- **kwargs: Any,
255
- ):
256
- """Initialize the orchestrator.
257
-
258
- Args:
259
- project_root: Root directory of the project
260
- max_items: Maximum number of items to process (default 5)
261
- max_cost: Maximum total cost in USD (default $5)
262
- auto_approve: If True, generate docs without confirmation
263
- export_path: Directory to export generated docs
264
- include_stale: Include stale docs in processing
265
- include_missing: Include missing docs in processing
266
- min_severity: Minimum severity to include ("high", "medium", "low")
267
- doc_type: Type of documentation to generate
268
- audience: Target audience for documentation
269
- dry_run: If True, scout only without generating
270
- exclude_patterns: Additional patterns to exclude (merged with defaults)
271
-
272
- """
273
- self.project_root = Path(project_root)
274
- self.max_items = max_items
275
- self.max_cost = max_cost
276
- self.auto_approve = auto_approve
277
-
278
- # Merge default exclusions with any custom patterns
279
- self.exclude_patterns = list(self.DEFAULT_EXCLUDE_PATTERNS)
280
- if exclude_patterns:
281
- self.exclude_patterns.extend(exclude_patterns)
282
- self.export_path = (
283
- Path(export_path) if export_path else self.project_root / "docs" / "generated"
284
- )
285
- self.include_stale = include_stale
286
- self.include_missing = include_missing
287
- self.min_severity = min_severity
288
- self.doc_type = doc_type
289
- self.audience = audience
290
- self.dry_run = dry_run
291
- self.config = kwargs
292
- self._quiet = False # Set to True for JSON output mode
293
-
294
- # Initialize components
295
- self._scout: Any = None
296
- self._writer: Any = None
297
- self._project_index: Any = None
298
-
299
- self._total_cost = 0.0
300
- self._items: list[DocumentationItem] = []
301
- self._excluded_files: list[dict] = [] # Track files excluded by patterns
302
-
303
- # Initialize scout if available
304
- if HAS_SCOUT and ManageDocumentationCrew is not None:
305
- self._scout = ManageDocumentationCrew(project_root=str(self.project_root))
306
-
307
- # Initialize writer if available
308
- if HAS_WRITER and DocumentGenerationWorkflow is not None:
309
- self._writer = DocumentGenerationWorkflow(
310
- export_path=str(self.export_path),
311
- max_cost=max_cost / 2, # Reserve half budget for generation
312
- graceful_degradation=True,
313
- )
314
-
315
- # Initialize project index if available
316
- if HAS_PROJECT_INDEX and ProjectIndex is not None:
317
- try:
318
- self._project_index = ProjectIndex(str(self.project_root))
319
- if not self._project_index.load():
320
- self._project_index.refresh()
321
- except Exception as e:
322
- logger.warning(f"Could not initialize ProjectIndex: {e}")
323
-
324
- def describe(self) -> str:
325
- """Get a human-readable description of the workflow."""
326
- lines = [
327
- f"Workflow: {self.name}",
328
- f"Description: {self.description}",
329
- "",
330
- "Phases:",
331
- " 1. SCOUT - Analyze codebase for documentation gaps and staleness",
332
- " 2. PRIORITIZE - Rank items by severity, LOC, and business impact",
333
- " 3. GENERATE - Create/update documentation for priority items",
334
- " 4. UPDATE - Update ProjectIndex with new documentation status",
335
- "",
336
- "Configuration:",
337
- f" max_items: {self.max_items}",
338
- f" max_cost: ${self.max_cost:.2f}",
339
- f" auto_approve: {self.auto_approve}",
340
- f" dry_run: {self.dry_run}",
341
- f" include_stale: {self.include_stale}",
342
- f" include_missing: {self.include_missing}",
343
- "",
344
- "Components:",
345
- f" Scout (ManageDocumentationCrew): {'Available' if self._scout else 'Not available'}",
346
- f" Writer (DocumentGenerationWorkflow): {'Available' if self._writer else 'Not available'}",
347
- f" ProjectIndex: {'Available' if self._project_index else 'Not available'}",
348
- ]
349
- return "\n".join(lines)
350
-
351
- def _severity_to_priority(self, severity: str) -> int:
352
- """Convert severity string to numeric priority (1=highest)."""
353
- return {"high": 1, "medium": 2, "low": 3}.get(severity.lower(), 3)
354
-
355
- def _should_include_severity(self, severity: str) -> bool:
356
- """Check if severity meets minimum threshold."""
357
- severity_order = {"high": 1, "medium": 2, "low": 3}
358
- item_level = severity_order.get(severity.lower(), 3)
359
- min_level = severity_order.get(self.min_severity.lower(), 3)
360
- return item_level <= min_level
361
-
362
- def _should_exclude(self, file_path: str, track: bool = False) -> bool:
363
- """Check if a file should be excluded from documentation generation.
364
-
365
- Uses fnmatch-style pattern matching against exclude_patterns.
366
-
367
- Args:
368
- file_path: Path to check (relative or absolute)
369
- track: If True, add to _excluded_files list when excluded
370
-
371
- Returns:
372
- True if file should be excluded
373
-
374
- """
375
- import fnmatch
376
-
377
- # Normalize path for matching
378
- path_str = str(file_path)
379
- # Also check just the filename for simple patterns
380
- filename = Path(file_path).name
381
-
382
- for pattern in self.exclude_patterns:
383
- # Check full path
384
- if fnmatch.fnmatch(path_str, pattern):
385
- if track:
386
- self._excluded_files.append(
387
- {
388
- "file_path": path_str,
389
- "matched_pattern": pattern,
390
- "reason": self._get_exclusion_reason(pattern),
391
- },
392
- )
393
- return True
394
- # Check just filename
395
- if fnmatch.fnmatch(filename, pattern):
396
- if track:
397
- self._excluded_files.append(
398
- {
399
- "file_path": path_str,
400
- "matched_pattern": pattern,
401
- "reason": self._get_exclusion_reason(pattern),
402
- },
403
- )
404
- return True
405
- # Check if path contains the pattern (for directory patterns)
406
- if "**" in pattern:
407
- # Convert ** pattern to a simpler check
408
- base_pattern = pattern.replace("/**", "").replace("**", "")
409
- if base_pattern in path_str:
410
- if track:
411
- self._excluded_files.append(
412
- {
413
- "file_path": path_str,
414
- "matched_pattern": pattern,
415
- "reason": self._get_exclusion_reason(pattern),
416
- },
417
- )
418
- return True
419
-
420
- return False
421
-
422
- def _get_exclusion_reason(self, pattern: str) -> str:
423
- """Get a human-readable reason for why a pattern excludes a file."""
424
- # Generated directories
425
- if any(
426
- p in pattern
427
- for p in [
428
- "site/**",
429
- "dist/**",
430
- "build/**",
431
- "out/**",
432
- "node_modules/**",
433
- "__pycache__/**",
434
- ".git/**",
435
- "egg-info",
436
- ]
437
- ):
438
- return "Generated/build directory"
439
- # Binary files
440
- if any(
441
- p in pattern
442
- for p in [
443
- ".png",
444
- ".jpg",
445
- ".jpeg",
446
- ".gif",
447
- ".ico",
448
- ".svg",
449
- ".pdf",
450
- ".woff",
451
- ".ttf",
452
- ".pyc",
453
- ".so",
454
- ".dll",
455
- ".exe",
456
- ".zip",
457
- ".tar",
458
- ".gz",
459
- ".vsix",
460
- ]
461
- ):
462
- return "Binary/asset file"
463
- # Empathy internal
464
- if any(p in pattern for p in [".empathy/**", ".claude/**", ".empathy_index/**"]):
465
- return "Framework internal file"
466
- # Book/docs
467
- if any(
468
- p in pattern
469
- for p in [
470
- "book/**",
471
- "docs/generated/**",
472
- "docs/word/**",
473
- "docs/pdf/**",
474
- ".docx",
475
- ".doc",
476
- ]
477
- ):
478
- return "Book/document source"
479
- return "Excluded by pattern"
480
-
481
- def _is_allowed_output(self, file_path: str) -> bool:
482
- """Check if a file is allowed to be created/modified.
483
-
484
- Uses the ALLOWED_OUTPUT_EXTENSIONS whitelist - this is the PRIMARY
485
- safety mechanism to ensure only documentation files can be written.
486
-
487
- Args:
488
- file_path: Path to check
489
-
490
- Returns:
491
- True if the file extension is in the allowed whitelist
492
-
493
- """
494
- ext = Path(file_path).suffix.lower()
495
- return ext in self.ALLOWED_OUTPUT_EXTENSIONS
496
-
497
- async def _run_scout_phase(self) -> tuple[list[DocumentationItem], float]:
498
- """Run the scout phase to identify documentation gaps.
499
-
500
- Returns:
501
- Tuple of (items found, cost)
502
-
503
- """
504
- items: list[DocumentationItem] = []
505
- cost = 0.0
506
-
507
- if self._scout is None:
508
- logger.warning("Scout (ManageDocumentationCrew) not available")
509
- # Fall back to ProjectIndex if available
510
- if self._project_index is not None:
511
- items = self._items_from_index()
512
- return items, cost
513
-
514
- logger.info("Starting scout phase...")
515
- print("\n[SCOUT PHASE] Analyzing codebase for documentation gaps...")
516
-
517
- result = await self._scout.execute(path=str(self.project_root))
518
- cost = result.cost
519
-
520
- if not result.success:
521
- logger.error("Scout phase failed")
522
- return items, cost
523
-
524
- # Parse scout findings into DocumentationItems
525
- items = self._parse_scout_findings(result)
526
-
527
- # Supplement with ProjectIndex data if available
528
- if self._project_index is not None:
529
- index_items = self._items_from_index()
530
- # Merge, preferring scout items but adding unique index items
531
- existing_paths = {item.file_path for item in items}
532
- for idx_item in index_items:
533
- if idx_item.file_path not in existing_paths:
534
- items.append(idx_item)
535
-
536
- logger.info(f"Scout phase found {len(items)} items (cost: ${cost:.4f})")
537
- return items, cost
538
-
539
- def _items_from_index(self) -> list[DocumentationItem]:
540
- """Extract documentation items from ProjectIndex."""
541
- items: list[DocumentationItem] = []
542
-
543
- if self._project_index is None:
544
- return items
545
-
546
- try:
547
- context = self._project_index.get_context_for_workflow("documentation")
548
-
549
- # Get files without docstrings
550
- if self.include_missing:
551
- files_without_docs = context.get("files_without_docstrings", [])
552
- for f in files_without_docs[:20]: # Limit
553
- file_path = f.get("path", "")
554
- if self._should_exclude(file_path, track=True):
555
- continue
556
- items.append(
557
- DocumentationItem(
558
- file_path=file_path,
559
- issue_type="missing_docstring",
560
- severity="medium",
561
- priority=2,
562
- details=f"Missing docstring - {f.get('loc', 0)} LOC",
563
- loc=f.get("loc", 0),
564
- ),
565
- )
566
-
567
- # Get stale docs
568
- if self.include_stale:
569
- docs_needing_review = context.get("docs_needing_review", [])
570
- for d in docs_needing_review[:10]:
571
- if d.get("source_modified_after_doc"):
572
- file_path = d.get("doc_file", "")
573
- if self._should_exclude(file_path, track=True):
574
- continue
575
- items.append(
576
- DocumentationItem(
577
- file_path=file_path,
578
- issue_type="stale_doc",
579
- severity="high",
580
- priority=1,
581
- details="Source modified after doc update",
582
- related_source=d.get("related_source_files", [])[:3],
583
- days_stale=d.get("days_since_doc_update", 0),
584
- ),
585
- )
586
- except Exception as e:
587
- logger.warning(f"Error extracting items from index: {e}")
588
-
589
- return items
590
-
591
- def _parse_scout_findings(self, result: Any) -> list[DocumentationItem]:
592
- """Parse scout result into DocumentationItems."""
593
- items: list[DocumentationItem] = []
594
-
595
- # Scout returns findings as list of dicts with agent responses
596
- for finding in result.findings:
597
- response = finding.get("response", "")
598
- agent = finding.get("agent", "")
599
-
600
- # Try to extract structured data from analyst response
601
- if "Analyst" in agent:
602
- # Parse mock or real findings
603
- # Look for JSON-like structures in the response
604
- import re
605
-
606
- # Find file paths mentioned
607
- file_pattern = r'"file_path":\s*"([^"]+)"'
608
- issue_pattern = r'"issue_type":\s*"([^"]+)"'
609
- severity_pattern = r'"severity":\s*"([^"]+)"'
610
-
611
- file_matches = re.findall(file_pattern, response)
612
- issue_matches = re.findall(issue_pattern, response)
613
- severity_matches = re.findall(severity_pattern, response)
614
-
615
- for i, file_path in enumerate(file_matches):
616
- issue_type = issue_matches[i] if i < len(issue_matches) else "unknown"
617
- severity = severity_matches[i] if i < len(severity_matches) else "medium"
618
-
619
- # Filter by settings
620
- if issue_type == "stale_doc" and not self.include_stale:
621
- continue
622
- if (
623
- issue_type in ("missing_docstring", "no_documentation")
624
- and not self.include_missing
625
- ):
626
- continue
627
- if not self._should_include_severity(severity):
628
- continue
629
- # Skip excluded files (requirements.txt, package.json, etc.)
630
- if self._should_exclude(file_path):
631
- continue
632
-
633
- items.append(
634
- DocumentationItem(
635
- file_path=file_path,
636
- issue_type=issue_type,
637
- severity=severity,
638
- priority=self._severity_to_priority(severity),
639
- details=f"Found by {agent}",
640
- ),
641
- )
642
-
643
- return items
644
-
645
- def _prioritize_items(self, items: list[DocumentationItem]) -> list[DocumentationItem]:
646
- """Prioritize items for generation.
647
-
648
- Priority order:
649
- 1. Stale docs (source changed) - highest urgency
650
- 2. High-severity missing docs
651
- 3. Files with most LOC
652
- 4. Medium/low severity
653
- """
654
- # Sort by: priority (asc), days_stale (desc), loc (desc)
655
- sorted_items = sorted(
656
- items,
657
- key=lambda x: (
658
- x.priority,
659
- -x.days_stale,
660
- -x.loc,
661
- ),
662
- )
663
-
664
- return sorted_items[: self.max_items]
665
-
666
- async def _run_generate_phase(
667
- self,
668
- items: list[DocumentationItem],
669
- ) -> tuple[list[str], list[str], list[str], float]:
670
- """Run the generation phase for prioritized items.
671
-
672
- Returns:
673
- Tuple of (generated, updated, skipped, cost)
674
-
675
- """
676
- generated: list[str] = []
677
- updated: list[str] = []
678
- skipped: list[str] = []
679
- cost = 0.0
680
-
681
- if self._writer is None:
682
- logger.warning("Writer (DocumentGenerationWorkflow) not available")
683
- return generated, updated, [item.file_path for item in items], cost
684
-
685
- logger.info(f"Starting generation phase for {len(items)} items...")
686
- print(f"\n[GENERATE PHASE] Processing {len(items)} documentation items...")
687
-
688
- for i, item in enumerate(items):
689
- # Check cost limit
690
- if self._total_cost + cost >= self.max_cost:
691
- remaining = items[i:]
692
- skipped.extend([r.file_path for r in remaining])
693
- logger.warning(f"Cost limit reached. Skipping {len(remaining)} items.")
694
- print(f" [!] Cost limit ${self.max_cost:.2f} reached. Skipping remaining items.")
695
- break
696
-
697
- print(f" [{i + 1}/{len(items)}] {item.issue_type}: {item.file_path}")
698
-
699
- try:
700
- # Read source file content
701
- source_path = self.project_root / item.file_path
702
- source_content = ""
703
-
704
- if source_path.exists():
705
- try:
706
- source_content = source_path.read_text(encoding="utf-8")
707
- except Exception as e:
708
- logger.warning(f"Could not read {source_path}: {e}")
709
-
710
- # Run documentation generation
711
- result = await self._writer.execute(
712
- source_code=source_content,
713
- target=item.file_path,
714
- doc_type=self.doc_type,
715
- audience=self.audience,
716
- )
717
-
718
- # Track cost from result
719
- if isinstance(result, dict):
720
- step_cost = result.get("accumulated_cost", 0.0)
721
- cost += step_cost
722
-
723
- # Categorize result
724
- if item.issue_type == "stale_doc":
725
- updated.append(item.file_path)
726
- else:
727
- generated.append(item.file_path)
728
-
729
- export_path = result.get("export_path")
730
- if export_path:
731
- print(f" -> Saved to: {export_path}")
732
- else:
733
- skipped.append(item.file_path)
734
-
735
- except Exception as e:
736
- logger.error(f"Error generating docs for {item.file_path}: {e}")
737
- skipped.append(item.file_path)
738
-
739
- logger.info(
740
- f"Generation phase: {len(generated)} generated, {len(updated)} updated, {len(skipped)} skipped",
741
- )
742
- return generated, updated, skipped, cost
743
-
744
- def _update_project_index(self, generated: list[str], updated: list[str]) -> None:
745
- """Update ProjectIndex with newly documented files."""
746
- if self._project_index is None:
747
- return
748
-
749
- try:
750
- # Mark files as documented
751
- for file_path in generated + updated:
752
- # Update record if it exists
753
- record = self._project_index.get_record(file_path)
754
- if record:
755
- record.has_docstring = True
756
- record.last_modified = datetime.now()
757
-
758
- # Save index
759
- self._project_index.save()
760
- logger.info(
761
- f"ProjectIndex updated with {len(generated) + len(updated)} documented files",
762
- )
763
- except Exception as e:
764
- logger.warning(f"Could not update ProjectIndex: {e}")
765
-
766
- def _generate_summary(
767
- self,
768
- result: OrchestratorResult,
769
- items: list[DocumentationItem],
770
- ) -> str:
771
- """Generate human-readable summary."""
772
- lines = [
773
- "=" * 60,
774
- "DOCUMENTATION ORCHESTRATOR REPORT",
775
- "=" * 60,
776
- "",
777
- f"Project: {self.project_root}",
778
- f"Status: {'SUCCESS' if result.success else 'PARTIAL'}",
779
- "",
780
- "-" * 60,
781
- "SCOUT PHASE",
782
- "-" * 60,
783
- f" Items found: {result.items_found}",
784
- f" Stale docs: {result.stale_docs}",
785
- f" Missing docs: {result.missing_docs}",
786
- f" Cost: ${result.scout_cost:.4f}",
787
- "",
788
- ]
789
-
790
- if items:
791
- lines.extend(
792
- [
793
- "Priority Items:",
794
- ],
795
- )
796
- for i, item in enumerate(items[:10]):
797
- lines.append(f" {i + 1}. [{item.severity.upper()}] {item.file_path}")
798
- lines.append(f" Type: {item.issue_type}")
799
- if item.days_stale:
800
- lines.append(f" Days stale: {item.days_stale}")
801
- lines.append("")
802
-
803
- if not self.dry_run:
804
- lines.extend(
805
- [
806
- "-" * 60,
807
- "GENERATION PHASE",
808
- "-" * 60,
809
- f" Items processed: {result.items_processed}",
810
- f" Docs generated: {len(result.docs_generated)}",
811
- f" Docs updated: {len(result.docs_updated)}",
812
- f" Skipped: {len(result.docs_skipped)}",
813
- f" Cost: ${result.generation_cost:.4f}",
814
- "",
815
- ],
816
- )
817
-
818
- if result.docs_generated:
819
- lines.append("Generated:")
820
- for doc in result.docs_generated[:5]:
821
- lines.append(f" + {doc}")
822
- if len(result.docs_generated) > 5:
823
- lines.append(f" ... and {len(result.docs_generated) - 5} more")
824
- lines.append("")
825
-
826
- if result.docs_updated:
827
- lines.append("Updated:")
828
- for doc in result.docs_updated[:5]:
829
- lines.append(f" ~ {doc}")
830
- lines.append("")
831
-
832
- if result.errors:
833
- lines.extend(
834
- [
835
- "-" * 60,
836
- "ERRORS",
837
- "-" * 60,
838
- ],
839
- )
840
- for error in result.errors:
841
- lines.append(f" ! {error}")
842
- lines.append("")
843
-
844
- if result.warnings:
845
- lines.extend(
846
- [
847
- "-" * 60,
848
- "WARNINGS",
849
- "-" * 60,
850
- ],
851
- )
852
- for warning in result.warnings:
853
- lines.append(f" * {warning}")
854
- lines.append("")
855
-
856
- lines.extend(
857
- [
858
- "-" * 60,
859
- "TOTALS",
860
- "-" * 60,
861
- f" Total cost: ${result.total_cost:.4f}",
862
- f" Duration: {result.duration_ms}ms",
863
- f" Export path: {self.export_path}",
864
- "",
865
- "=" * 60,
866
- ],
867
- )
868
-
869
- return "\n".join(lines)
870
-
871
- async def execute(
872
- self,
873
- context: dict | None = None,
874
- **kwargs: Any,
875
- ) -> OrchestratorResult:
876
- """Execute the full documentation orchestration pipeline.
877
-
878
- Args:
879
- context: Additional context for the workflows
880
- **kwargs: Additional arguments
881
-
882
- Returns:
883
- OrchestratorResult with full details
884
-
885
- """
886
- started_at = datetime.now()
887
- result = OrchestratorResult(success=False, phase="scout")
888
- errors: list[str] = []
889
- warnings: list[str] = []
890
-
891
- # Validate dependencies
892
- if not HAS_SCOUT:
893
- warnings.append("ManageDocumentationCrew not available - using ProjectIndex fallback")
894
- if not HAS_WRITER:
895
- errors.append("DocumentGenerationWorkflow not available - cannot generate docs")
896
- if not self.dry_run:
897
- result.errors = errors
898
- result.warnings = warnings
899
- return result
900
- if not HAS_PROJECT_INDEX:
901
- warnings.append("ProjectIndex not available - limited file tracking")
902
-
903
- # Phase 1: Scout
904
- print("\n" + "=" * 60)
905
- print("DOCUMENTATION ORCHESTRATOR")
906
- print("=" * 60)
907
-
908
- items, scout_cost = await self._run_scout_phase()
909
- self._total_cost += scout_cost
910
-
911
- result.items_found = len(items)
912
- result.stale_docs = sum(1 for i in items if i.issue_type == "stale_doc")
913
- result.missing_docs = sum(1 for i in items if i.issue_type != "stale_doc")
914
- result.scout_cost = scout_cost
915
- result.phase = "prioritize"
916
-
917
- if not items:
918
- print("\n[✓] No documentation gaps found!")
919
- result.success = True
920
- result.phase = "complete"
921
- result.duration_ms = int((datetime.now() - started_at).total_seconds() * 1000)
922
- result.total_cost = self._total_cost
923
- result.summary = self._generate_summary(result, items)
924
- return result
925
-
926
- # Phase 2: Prioritize
927
- print(f"\n[PRIORITIZE] Found {len(items)} items, selecting top {self.max_items}...")
928
- priority_items = self._prioritize_items(items)
929
- self._items = priority_items
930
-
931
- print("\nTop priority items:")
932
- for i, item in enumerate(priority_items):
933
- status = "STALE" if item.issue_type == "stale_doc" else "MISSING"
934
- print(f" {i + 1}. [{status}] {item.file_path}")
935
-
936
- # Check for dry run
937
- if self.dry_run:
938
- print("\n[DRY RUN] Skipping generation phase")
939
- result.success = True
940
- result.phase = "complete"
941
- result.docs_skipped = [i.file_path for i in priority_items]
942
- result.duration_ms = int((datetime.now() - started_at).total_seconds() * 1000)
943
- result.total_cost = self._total_cost
944
- result.summary = self._generate_summary(result, priority_items)
945
- return result
946
-
947
- # Check for approval if not auto_approve
948
- if not self.auto_approve:
949
- print(f"\n[!] Ready to generate documentation for {len(priority_items)} items")
950
- print(f" Estimated max cost: ${self.max_cost:.2f}")
951
- print("\n Set auto_approve=True to proceed automatically")
952
- result.success = True
953
- result.phase = "awaiting_approval"
954
- result.docs_skipped = [i.file_path for i in priority_items]
955
- result.warnings = warnings
956
- result.duration_ms = int((datetime.now() - started_at).total_seconds() * 1000)
957
- result.total_cost = self._total_cost
958
- result.summary = self._generate_summary(result, priority_items)
959
- return result
960
-
961
- # Phase 3: Generate
962
- result.phase = "generate"
963
- generated, updated, skipped, gen_cost = await self._run_generate_phase(priority_items)
964
- self._total_cost += gen_cost
965
-
966
- result.docs_generated = generated
967
- result.docs_updated = updated
968
- result.docs_skipped = skipped
969
- result.generation_cost = gen_cost
970
- result.items_processed = len(generated) + len(updated)
971
-
972
- # Phase 4: Update index
973
- result.phase = "update"
974
- self._update_project_index(generated, updated)
975
-
976
- # Finalize
977
- result.success = True
978
- result.phase = "complete"
979
- result.total_cost = self._total_cost
980
- result.errors = errors
981
- result.warnings = warnings
982
- result.duration_ms = int((datetime.now() - started_at).total_seconds() * 1000)
983
- result.summary = self._generate_summary(result, priority_items)
984
-
985
- print(result.summary)
986
-
987
- return result
988
-
989
- async def scout_only(self) -> OrchestratorResult:
990
- """Run only the scout phase (equivalent to dry_run=True)."""
991
- self.dry_run = True
992
- return await self.execute()
993
-
994
- async def scout_as_json(self) -> dict:
995
- """Run scout phase and return JSON-serializable results.
996
-
997
- Used by VSCode extension to display results in Documentation Analysis panel.
998
-
999
- Returns:
1000
- Dict with stats and items list ready for JSON serialization
1001
-
1002
- """
1003
- import io
1004
- import sys
1005
-
1006
- self.dry_run = True
1007
- # Suppress console output during scout
1008
- old_stdout = sys.stdout
1009
- sys.stdout = io.StringIO()
1010
- try:
1011
- result = await self.execute()
1012
- finally:
1013
- sys.stdout = old_stdout
1014
-
1015
- return {
1016
- "success": result.success,
1017
- "stats": {
1018
- "items_found": result.items_found,
1019
- "stale_docs": result.stale_docs,
1020
- "missing_docs": result.missing_docs,
1021
- "scout_cost": result.scout_cost,
1022
- "duration_ms": result.duration_ms,
1023
- "excluded_count": len(self._excluded_files),
1024
- },
1025
- "items": [
1026
- {
1027
- "id": f"{item.file_path}:{item.issue_type}",
1028
- "file_path": item.file_path,
1029
- "issue_type": item.issue_type,
1030
- "severity": item.severity,
1031
- "priority": item.priority,
1032
- "details": item.details,
1033
- "days_stale": item.days_stale,
1034
- "loc": item.loc,
1035
- "related_source": item.related_source[:3] if item.related_source else [],
1036
- }
1037
- for item in self._items
1038
- ],
1039
- "excluded": self._excluded_files, # Files excluded from scanning
1040
- }
1041
-
1042
- async def generate_for_files(
1043
- self,
1044
- file_paths: list[str],
1045
- **kwargs: Any,
1046
- ) -> dict:
1047
- """Generate documentation for a list of specific files.
1048
-
1049
- Bypasses scout phase and generates directly for each file.
1050
-
1051
- Args:
1052
- file_paths: List of file paths to document
1053
- **kwargs: Additional arguments for DocumentGenerationWorkflow
1054
-
1055
- Returns:
1056
- Dict with results for each file
1057
-
1058
- """
1059
- generated: list[dict[str, str | float | None]] = []
1060
- failed: list[dict[str, str]] = []
1061
- skipped: list[dict[str, str]] = []
1062
- total_cost = 0.0
1063
- success = True
1064
-
1065
- for file_path in file_paths:
1066
- # Skip excluded files (requirements.txt, package.json, etc.)
1067
- if self._should_exclude(file_path):
1068
- skipped.append(
1069
- {
1070
- "file": file_path,
1071
- "reason": "Excluded by pattern (dependency/config/binary file)",
1072
- },
1073
- )
1074
- continue
1075
-
1076
- try:
1077
- result = await self.generate_for_file(file_path, **kwargs)
1078
- if isinstance(result, dict) and result.get("error"):
1079
- failed.append({"file": file_path, "error": result["error"]})
1080
- else:
1081
- export_path = result.get("export_path") if isinstance(result, dict) else None
1082
- cost = result.get("accumulated_cost", 0) if isinstance(result, dict) else 0
1083
- generated.append(
1084
- {
1085
- "file": file_path,
1086
- "export_path": export_path,
1087
- "cost": cost,
1088
- },
1089
- )
1090
- total_cost += cost
1091
- except Exception as e:
1092
- failed.append({"file": file_path, "error": str(e)})
1093
- success = False
1094
-
1095
- if failed:
1096
- success = len(generated) > 0 # Partial success
1097
-
1098
- return {
1099
- "success": success,
1100
- "generated": generated,
1101
- "failed": failed,
1102
- "skipped": skipped,
1103
- "total_cost": total_cost,
1104
- }
1105
-
1106
- async def generate_for_file(
1107
- self,
1108
- file_path: str,
1109
- **kwargs: Any,
1110
- ) -> dict:
1111
- """Generate documentation for a specific file.
1112
-
1113
- Bypasses scout phase and generates directly.
1114
-
1115
- Args:
1116
- file_path: Path to the file to document
1117
- **kwargs: Additional arguments for DocumentGenerationWorkflow
1118
-
1119
- Returns:
1120
- Generation result dict
1121
-
1122
- """
1123
- if self._writer is None:
1124
- return {"error": "DocumentGenerationWorkflow not available"}
1125
-
1126
- source_path = self.project_root / file_path
1127
- source_content = ""
1128
-
1129
- if source_path.exists():
1130
- try:
1131
- source_content = source_path.read_text(encoding="utf-8")
1132
- except Exception as e:
1133
- return {"error": f"Could not read file: {e}"}
1134
-
1135
- result: dict = await self._writer.execute(
1136
- source_code=source_content,
1137
- target=file_path,
1138
- doc_type=kwargs.get("doc_type", self.doc_type),
1139
- audience=kwargs.get("audience", self.audience),
1140
- )
1141
-
1142
- # Update index
1143
- if isinstance(result, dict) and result.get("document"):
1144
- self._update_project_index([file_path], [])
1145
-
1146
- return result
1147
-
1148
-
1149
- # CLI entry point
1150
- if __name__ == "__main__":
1151
- import json
1152
- import sys
1153
-
1154
- async def main():
1155
- path = sys.argv[1] if len(sys.argv) > 1 and not sys.argv[1].startswith("-") else "."
1156
- dry_run = "--dry-run" in sys.argv
1157
- auto_approve = "--auto" in sys.argv
1158
- scout_json = "--scout-json" in sys.argv
1159
-
1160
- # Parse --generate-files argument
1161
- generate_files: list[str] | None = None
1162
- for i, arg in enumerate(sys.argv):
1163
- if arg == "--generate-files" and i + 1 < len(sys.argv):
1164
- try:
1165
- generate_files = json.loads(sys.argv[i + 1])
1166
- except json.JSONDecodeError:
1167
- print("Error: --generate-files must be valid JSON array", file=sys.stderr)
1168
- sys.exit(1)
1169
-
1170
- orchestrator = DocumentationOrchestrator(
1171
- project_root=path,
1172
- max_items=10,
1173
- max_cost=5.0,
1174
- dry_run=dry_run,
1175
- auto_approve=auto_approve,
1176
- )
1177
-
1178
- # JSON scout output for VSCode extension
1179
- if scout_json:
1180
- result = await orchestrator.scout_as_json()
1181
- print(json.dumps(result))
1182
- return
1183
-
1184
- # Generate specific files
1185
- if generate_files:
1186
- result = await orchestrator.generate_for_files(generate_files)
1187
- print(json.dumps(result))
1188
- return
1189
-
1190
- # Normal execution
1191
- print("\nDocumentationOrchestrator")
1192
- print(f"Project: {path}")
1193
- print(f"Mode: {'DRY RUN' if dry_run else 'FULL' if auto_approve else 'SCOUT + AWAIT'}")
1194
-
1195
- print("\nComponents:")
1196
- print(f" Scout (ManageDocumentationCrew): {'✓' if orchestrator._scout else '✗'}")
1197
- print(f" Writer (DocumentGenerationWorkflow): {'✓' if orchestrator._writer else '✗'}")
1198
- print(f" ProjectIndex: {'✓' if orchestrator._project_index else '✗'}")
1199
-
1200
- result = await orchestrator.execute()
1201
-
1202
- if not result.summary:
1203
- print(f"\nResult: {result.to_dict()}")
1204
-
1205
- asyncio.run(main())