empathy-framework 5.2.1__py3-none-any.whl → 5.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (480) hide show
  1. empathy_framework-5.4.0.dist-info/METADATA +47 -0
  2. empathy_framework-5.4.0.dist-info/RECORD +8 -0
  3. {empathy_framework-5.2.1.dist-info → empathy_framework-5.4.0.dist-info}/top_level.txt +0 -1
  4. empathy_healthcare_plugin/__init__.py +12 -11
  5. empathy_llm_toolkit/__init__.py +12 -26
  6. empathy_os/__init__.py +12 -356
  7. empathy_software_plugin/__init__.py +12 -11
  8. empathy_framework-5.2.1.dist-info/METADATA +0 -1002
  9. empathy_framework-5.2.1.dist-info/RECORD +0 -478
  10. empathy_framework-5.2.1.dist-info/entry_points.txt +0 -26
  11. empathy_framework-5.2.1.dist-info/licenses/LICENSE +0 -201
  12. empathy_framework-5.2.1.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -101
  13. empathy_healthcare_plugin/monitors/__init__.py +0 -9
  14. empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +0 -315
  15. empathy_healthcare_plugin/monitors/monitoring/__init__.py +0 -44
  16. empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +0 -300
  17. empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +0 -214
  18. empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +0 -306
  19. empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +0 -389
  20. empathy_healthcare_plugin/protocols/cardiac.json +0 -93
  21. empathy_healthcare_plugin/protocols/post_operative.json +0 -92
  22. empathy_healthcare_plugin/protocols/respiratory.json +0 -92
  23. empathy_healthcare_plugin/protocols/sepsis.json +0 -141
  24. empathy_llm_toolkit/README.md +0 -553
  25. empathy_llm_toolkit/agent_factory/__init__.py +0 -53
  26. empathy_llm_toolkit/agent_factory/adapters/__init__.py +0 -85
  27. empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +0 -312
  28. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +0 -483
  29. empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +0 -298
  30. empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +0 -362
  31. empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +0 -333
  32. empathy_llm_toolkit/agent_factory/adapters/native.py +0 -228
  33. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +0 -423
  34. empathy_llm_toolkit/agent_factory/base.py +0 -305
  35. empathy_llm_toolkit/agent_factory/crews/__init__.py +0 -67
  36. empathy_llm_toolkit/agent_factory/crews/code_review.py +0 -1113
  37. empathy_llm_toolkit/agent_factory/crews/health_check.py +0 -1262
  38. empathy_llm_toolkit/agent_factory/crews/refactoring.py +0 -1128
  39. empathy_llm_toolkit/agent_factory/crews/security_audit.py +0 -1018
  40. empathy_llm_toolkit/agent_factory/decorators.py +0 -287
  41. empathy_llm_toolkit/agent_factory/factory.py +0 -558
  42. empathy_llm_toolkit/agent_factory/framework.py +0 -193
  43. empathy_llm_toolkit/agent_factory/memory_integration.py +0 -328
  44. empathy_llm_toolkit/agent_factory/resilient.py +0 -320
  45. empathy_llm_toolkit/agents_md/__init__.py +0 -22
  46. empathy_llm_toolkit/agents_md/loader.py +0 -218
  47. empathy_llm_toolkit/agents_md/parser.py +0 -271
  48. empathy_llm_toolkit/agents_md/registry.py +0 -307
  49. empathy_llm_toolkit/claude_memory.py +0 -466
  50. empathy_llm_toolkit/cli/__init__.py +0 -8
  51. empathy_llm_toolkit/cli/sync_claude.py +0 -487
  52. empathy_llm_toolkit/code_health.py +0 -1313
  53. empathy_llm_toolkit/commands/__init__.py +0 -51
  54. empathy_llm_toolkit/commands/context.py +0 -375
  55. empathy_llm_toolkit/commands/loader.py +0 -301
  56. empathy_llm_toolkit/commands/models.py +0 -231
  57. empathy_llm_toolkit/commands/parser.py +0 -371
  58. empathy_llm_toolkit/commands/registry.py +0 -429
  59. empathy_llm_toolkit/config/__init__.py +0 -29
  60. empathy_llm_toolkit/config/unified.py +0 -291
  61. empathy_llm_toolkit/context/__init__.py +0 -22
  62. empathy_llm_toolkit/context/compaction.py +0 -455
  63. empathy_llm_toolkit/context/manager.py +0 -434
  64. empathy_llm_toolkit/contextual_patterns.py +0 -361
  65. empathy_llm_toolkit/core.py +0 -907
  66. empathy_llm_toolkit/git_pattern_extractor.py +0 -435
  67. empathy_llm_toolkit/hooks/__init__.py +0 -24
  68. empathy_llm_toolkit/hooks/config.py +0 -306
  69. empathy_llm_toolkit/hooks/executor.py +0 -289
  70. empathy_llm_toolkit/hooks/registry.py +0 -302
  71. empathy_llm_toolkit/hooks/scripts/__init__.py +0 -39
  72. empathy_llm_toolkit/hooks/scripts/evaluate_session.py +0 -201
  73. empathy_llm_toolkit/hooks/scripts/first_time_init.py +0 -285
  74. empathy_llm_toolkit/hooks/scripts/pre_compact.py +0 -207
  75. empathy_llm_toolkit/hooks/scripts/session_end.py +0 -183
  76. empathy_llm_toolkit/hooks/scripts/session_start.py +0 -163
  77. empathy_llm_toolkit/hooks/scripts/suggest_compact.py +0 -225
  78. empathy_llm_toolkit/learning/__init__.py +0 -30
  79. empathy_llm_toolkit/learning/evaluator.py +0 -438
  80. empathy_llm_toolkit/learning/extractor.py +0 -514
  81. empathy_llm_toolkit/learning/storage.py +0 -560
  82. empathy_llm_toolkit/levels.py +0 -227
  83. empathy_llm_toolkit/pattern_confidence.py +0 -414
  84. empathy_llm_toolkit/pattern_resolver.py +0 -272
  85. empathy_llm_toolkit/pattern_summary.py +0 -350
  86. empathy_llm_toolkit/providers.py +0 -967
  87. empathy_llm_toolkit/routing/__init__.py +0 -32
  88. empathy_llm_toolkit/routing/model_router.py +0 -362
  89. empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +0 -413
  90. empathy_llm_toolkit/security/PHASE2_COMPLETE.md +0 -384
  91. empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +0 -271
  92. empathy_llm_toolkit/security/QUICK_REFERENCE.md +0 -316
  93. empathy_llm_toolkit/security/README.md +0 -262
  94. empathy_llm_toolkit/security/__init__.py +0 -62
  95. empathy_llm_toolkit/security/audit_logger.py +0 -929
  96. empathy_llm_toolkit/security/audit_logger_example.py +0 -152
  97. empathy_llm_toolkit/security/pii_scrubber.py +0 -640
  98. empathy_llm_toolkit/security/secrets_detector.py +0 -678
  99. empathy_llm_toolkit/security/secrets_detector_example.py +0 -304
  100. empathy_llm_toolkit/security/secure_memdocs.py +0 -1192
  101. empathy_llm_toolkit/security/secure_memdocs_example.py +0 -278
  102. empathy_llm_toolkit/session_status.py +0 -745
  103. empathy_llm_toolkit/state.py +0 -246
  104. empathy_llm_toolkit/utils/__init__.py +0 -5
  105. empathy_llm_toolkit/utils/tokens.py +0 -349
  106. empathy_os/adaptive/__init__.py +0 -13
  107. empathy_os/adaptive/task_complexity.py +0 -127
  108. empathy_os/agent_monitoring.py +0 -414
  109. empathy_os/cache/__init__.py +0 -117
  110. empathy_os/cache/base.py +0 -166
  111. empathy_os/cache/dependency_manager.py +0 -256
  112. empathy_os/cache/hash_only.py +0 -251
  113. empathy_os/cache/hybrid.py +0 -453
  114. empathy_os/cache/storage.py +0 -285
  115. empathy_os/cache_monitor.py +0 -356
  116. empathy_os/cache_stats.py +0 -298
  117. empathy_os/cli/__init__.py +0 -152
  118. empathy_os/cli/__main__.py +0 -12
  119. empathy_os/cli/commands/__init__.py +0 -1
  120. empathy_os/cli/commands/batch.py +0 -256
  121. empathy_os/cli/commands/cache.py +0 -248
  122. empathy_os/cli/commands/help.py +0 -331
  123. empathy_os/cli/commands/info.py +0 -140
  124. empathy_os/cli/commands/inspect.py +0 -436
  125. empathy_os/cli/commands/inspection.py +0 -57
  126. empathy_os/cli/commands/memory.py +0 -48
  127. empathy_os/cli/commands/metrics.py +0 -92
  128. empathy_os/cli/commands/orchestrate.py +0 -184
  129. empathy_os/cli/commands/patterns.py +0 -207
  130. empathy_os/cli/commands/profiling.py +0 -198
  131. empathy_os/cli/commands/provider.py +0 -98
  132. empathy_os/cli/commands/routing.py +0 -285
  133. empathy_os/cli/commands/setup.py +0 -96
  134. empathy_os/cli/commands/status.py +0 -235
  135. empathy_os/cli/commands/sync.py +0 -166
  136. empathy_os/cli/commands/tier.py +0 -121
  137. empathy_os/cli/commands/utilities.py +0 -114
  138. empathy_os/cli/commands/workflow.py +0 -575
  139. empathy_os/cli/core.py +0 -32
  140. empathy_os/cli/parsers/__init__.py +0 -68
  141. empathy_os/cli/parsers/batch.py +0 -118
  142. empathy_os/cli/parsers/cache 2.py +0 -65
  143. empathy_os/cli/parsers/cache.py +0 -65
  144. empathy_os/cli/parsers/help.py +0 -41
  145. empathy_os/cli/parsers/info.py +0 -26
  146. empathy_os/cli/parsers/inspect.py +0 -66
  147. empathy_os/cli/parsers/metrics.py +0 -42
  148. empathy_os/cli/parsers/orchestrate.py +0 -61
  149. empathy_os/cli/parsers/patterns.py +0 -54
  150. empathy_os/cli/parsers/provider.py +0 -40
  151. empathy_os/cli/parsers/routing.py +0 -110
  152. empathy_os/cli/parsers/setup.py +0 -42
  153. empathy_os/cli/parsers/status.py +0 -47
  154. empathy_os/cli/parsers/sync.py +0 -31
  155. empathy_os/cli/parsers/tier.py +0 -33
  156. empathy_os/cli/parsers/workflow.py +0 -77
  157. empathy_os/cli/utils/__init__.py +0 -1
  158. empathy_os/cli/utils/data.py +0 -242
  159. empathy_os/cli/utils/helpers.py +0 -68
  160. empathy_os/cli_legacy.py +0 -3957
  161. empathy_os/cli_minimal.py +0 -1159
  162. empathy_os/cli_router 2.py +0 -416
  163. empathy_os/cli_router.py +0 -437
  164. empathy_os/cli_unified.py +0 -814
  165. empathy_os/config/__init__.py +0 -66
  166. empathy_os/config/xml_config.py +0 -286
  167. empathy_os/config.py +0 -532
  168. empathy_os/coordination.py +0 -870
  169. empathy_os/core.py +0 -1511
  170. empathy_os/core_modules/__init__.py +0 -15
  171. empathy_os/cost_tracker.py +0 -626
  172. empathy_os/dashboard/__init__.py +0 -41
  173. empathy_os/dashboard/app 2.py +0 -512
  174. empathy_os/dashboard/app.py +0 -512
  175. empathy_os/dashboard/simple_server 2.py +0 -403
  176. empathy_os/dashboard/simple_server.py +0 -403
  177. empathy_os/dashboard/standalone_server 2.py +0 -536
  178. empathy_os/dashboard/standalone_server.py +0 -547
  179. empathy_os/discovery.py +0 -306
  180. empathy_os/emergence.py +0 -306
  181. empathy_os/exceptions.py +0 -123
  182. empathy_os/feedback_loops.py +0 -373
  183. empathy_os/hot_reload/README.md +0 -473
  184. empathy_os/hot_reload/__init__.py +0 -62
  185. empathy_os/hot_reload/config.py +0 -83
  186. empathy_os/hot_reload/integration.py +0 -229
  187. empathy_os/hot_reload/reloader.py +0 -298
  188. empathy_os/hot_reload/watcher.py +0 -183
  189. empathy_os/hot_reload/websocket.py +0 -177
  190. empathy_os/levels.py +0 -577
  191. empathy_os/leverage_points.py +0 -441
  192. empathy_os/logging_config.py +0 -261
  193. empathy_os/mcp/__init__.py +0 -10
  194. empathy_os/mcp/server.py +0 -506
  195. empathy_os/memory/__init__.py +0 -237
  196. empathy_os/memory/claude_memory.py +0 -469
  197. empathy_os/memory/config.py +0 -224
  198. empathy_os/memory/control_panel.py +0 -1290
  199. empathy_os/memory/control_panel_support.py +0 -145
  200. empathy_os/memory/cross_session.py +0 -845
  201. empathy_os/memory/edges.py +0 -179
  202. empathy_os/memory/encryption.py +0 -159
  203. empathy_os/memory/file_session.py +0 -770
  204. empathy_os/memory/graph.py +0 -570
  205. empathy_os/memory/long_term.py +0 -913
  206. empathy_os/memory/long_term_types.py +0 -99
  207. empathy_os/memory/mixins/__init__.py +0 -25
  208. empathy_os/memory/mixins/backend_init_mixin.py +0 -244
  209. empathy_os/memory/mixins/capabilities_mixin.py +0 -199
  210. empathy_os/memory/mixins/handoff_mixin.py +0 -208
  211. empathy_os/memory/mixins/lifecycle_mixin.py +0 -49
  212. empathy_os/memory/mixins/long_term_mixin.py +0 -352
  213. empathy_os/memory/mixins/promotion_mixin.py +0 -109
  214. empathy_os/memory/mixins/short_term_mixin.py +0 -182
  215. empathy_os/memory/nodes.py +0 -179
  216. empathy_os/memory/redis_bootstrap.py +0 -540
  217. empathy_os/memory/security/__init__.py +0 -31
  218. empathy_os/memory/security/audit_logger.py +0 -932
  219. empathy_os/memory/security/pii_scrubber.py +0 -640
  220. empathy_os/memory/security/secrets_detector.py +0 -678
  221. empathy_os/memory/short_term.py +0 -2150
  222. empathy_os/memory/simple_storage.py +0 -302
  223. empathy_os/memory/storage/__init__.py +0 -15
  224. empathy_os/memory/storage_backend.py +0 -167
  225. empathy_os/memory/summary_index.py +0 -583
  226. empathy_os/memory/types.py +0 -441
  227. empathy_os/memory/unified.py +0 -182
  228. empathy_os/meta_workflows/__init__.py +0 -74
  229. empathy_os/meta_workflows/agent_creator.py +0 -248
  230. empathy_os/meta_workflows/builtin_templates.py +0 -567
  231. empathy_os/meta_workflows/cli_commands/__init__.py +0 -56
  232. empathy_os/meta_workflows/cli_commands/agent_commands.py +0 -321
  233. empathy_os/meta_workflows/cli_commands/analytics_commands.py +0 -442
  234. empathy_os/meta_workflows/cli_commands/config_commands.py +0 -232
  235. empathy_os/meta_workflows/cli_commands/memory_commands.py +0 -182
  236. empathy_os/meta_workflows/cli_commands/template_commands.py +0 -354
  237. empathy_os/meta_workflows/cli_commands/workflow_commands.py +0 -382
  238. empathy_os/meta_workflows/cli_meta_workflows.py +0 -59
  239. empathy_os/meta_workflows/form_engine.py +0 -292
  240. empathy_os/meta_workflows/intent_detector.py +0 -409
  241. empathy_os/meta_workflows/models.py +0 -569
  242. empathy_os/meta_workflows/pattern_learner.py +0 -738
  243. empathy_os/meta_workflows/plan_generator.py +0 -384
  244. empathy_os/meta_workflows/session_context.py +0 -397
  245. empathy_os/meta_workflows/template_registry.py +0 -229
  246. empathy_os/meta_workflows/workflow.py +0 -984
  247. empathy_os/metrics/__init__.py +0 -12
  248. empathy_os/metrics/collector.py +0 -31
  249. empathy_os/metrics/prompt_metrics.py +0 -194
  250. empathy_os/models/__init__.py +0 -172
  251. empathy_os/models/__main__.py +0 -13
  252. empathy_os/models/adaptive_routing 2.py +0 -437
  253. empathy_os/models/adaptive_routing.py +0 -437
  254. empathy_os/models/auth_cli.py +0 -444
  255. empathy_os/models/auth_strategy.py +0 -450
  256. empathy_os/models/cli.py +0 -655
  257. empathy_os/models/empathy_executor.py +0 -354
  258. empathy_os/models/executor.py +0 -257
  259. empathy_os/models/fallback.py +0 -762
  260. empathy_os/models/provider_config.py +0 -282
  261. empathy_os/models/registry.py +0 -472
  262. empathy_os/models/tasks.py +0 -359
  263. empathy_os/models/telemetry/__init__.py +0 -71
  264. empathy_os/models/telemetry/analytics.py +0 -594
  265. empathy_os/models/telemetry/backend.py +0 -196
  266. empathy_os/models/telemetry/data_models.py +0 -431
  267. empathy_os/models/telemetry/storage.py +0 -489
  268. empathy_os/models/token_estimator.py +0 -420
  269. empathy_os/models/validation.py +0 -280
  270. empathy_os/monitoring/__init__.py +0 -52
  271. empathy_os/monitoring/alerts.py +0 -946
  272. empathy_os/monitoring/alerts_cli.py +0 -448
  273. empathy_os/monitoring/multi_backend.py +0 -271
  274. empathy_os/monitoring/otel_backend.py +0 -362
  275. empathy_os/optimization/__init__.py +0 -19
  276. empathy_os/optimization/context_optimizer.py +0 -272
  277. empathy_os/orchestration/__init__.py +0 -67
  278. empathy_os/orchestration/agent_templates.py +0 -707
  279. empathy_os/orchestration/config_store.py +0 -499
  280. empathy_os/orchestration/execution_strategies.py +0 -2111
  281. empathy_os/orchestration/meta_orchestrator.py +0 -1168
  282. empathy_os/orchestration/pattern_learner.py +0 -696
  283. empathy_os/orchestration/real_tools.py +0 -931
  284. empathy_os/pattern_cache.py +0 -187
  285. empathy_os/pattern_library.py +0 -542
  286. empathy_os/patterns/debugging/all_patterns.json +0 -81
  287. empathy_os/patterns/debugging/workflow_20260107_1770825e.json +0 -77
  288. empathy_os/patterns/refactoring_memory.json +0 -89
  289. empathy_os/persistence.py +0 -564
  290. empathy_os/platform_utils.py +0 -265
  291. empathy_os/plugins/__init__.py +0 -28
  292. empathy_os/plugins/base.py +0 -361
  293. empathy_os/plugins/registry.py +0 -268
  294. empathy_os/project_index/__init__.py +0 -32
  295. empathy_os/project_index/cli.py +0 -335
  296. empathy_os/project_index/index.py +0 -667
  297. empathy_os/project_index/models.py +0 -504
  298. empathy_os/project_index/reports.py +0 -474
  299. empathy_os/project_index/scanner.py +0 -777
  300. empathy_os/project_index/scanner_parallel 2.py +0 -291
  301. empathy_os/project_index/scanner_parallel.py +0 -291
  302. empathy_os/prompts/__init__.py +0 -61
  303. empathy_os/prompts/config.py +0 -77
  304. empathy_os/prompts/context.py +0 -177
  305. empathy_os/prompts/parser.py +0 -285
  306. empathy_os/prompts/registry.py +0 -313
  307. empathy_os/prompts/templates.py +0 -208
  308. empathy_os/redis_config.py +0 -302
  309. empathy_os/redis_memory.py +0 -799
  310. empathy_os/resilience/__init__.py +0 -56
  311. empathy_os/resilience/circuit_breaker.py +0 -256
  312. empathy_os/resilience/fallback.py +0 -179
  313. empathy_os/resilience/health.py +0 -300
  314. empathy_os/resilience/retry.py +0 -209
  315. empathy_os/resilience/timeout.py +0 -135
  316. empathy_os/routing/__init__.py +0 -43
  317. empathy_os/routing/chain_executor.py +0 -433
  318. empathy_os/routing/classifier.py +0 -217
  319. empathy_os/routing/smart_router.py +0 -234
  320. empathy_os/routing/workflow_registry.py +0 -343
  321. empathy_os/scaffolding/README.md +0 -589
  322. empathy_os/scaffolding/__init__.py +0 -35
  323. empathy_os/scaffolding/__main__.py +0 -14
  324. empathy_os/scaffolding/cli.py +0 -240
  325. empathy_os/socratic/__init__.py +0 -256
  326. empathy_os/socratic/ab_testing.py +0 -958
  327. empathy_os/socratic/blueprint.py +0 -533
  328. empathy_os/socratic/cli.py +0 -703
  329. empathy_os/socratic/collaboration.py +0 -1114
  330. empathy_os/socratic/domain_templates.py +0 -924
  331. empathy_os/socratic/embeddings.py +0 -738
  332. empathy_os/socratic/engine.py +0 -794
  333. empathy_os/socratic/explainer.py +0 -682
  334. empathy_os/socratic/feedback.py +0 -772
  335. empathy_os/socratic/forms.py +0 -629
  336. empathy_os/socratic/generator.py +0 -732
  337. empathy_os/socratic/llm_analyzer.py +0 -637
  338. empathy_os/socratic/mcp_server.py +0 -702
  339. empathy_os/socratic/session.py +0 -312
  340. empathy_os/socratic/storage.py +0 -667
  341. empathy_os/socratic/success.py +0 -730
  342. empathy_os/socratic/visual_editor.py +0 -860
  343. empathy_os/socratic/web_ui.py +0 -958
  344. empathy_os/telemetry/__init__.py +0 -39
  345. empathy_os/telemetry/agent_coordination 2.py +0 -478
  346. empathy_os/telemetry/agent_coordination.py +0 -476
  347. empathy_os/telemetry/agent_tracking 2.py +0 -350
  348. empathy_os/telemetry/agent_tracking.py +0 -348
  349. empathy_os/telemetry/approval_gates 2.py +0 -563
  350. empathy_os/telemetry/approval_gates.py +0 -551
  351. empathy_os/telemetry/cli.py +0 -1231
  352. empathy_os/telemetry/commands/__init__.py +0 -14
  353. empathy_os/telemetry/commands/dashboard_commands.py +0 -696
  354. empathy_os/telemetry/event_streaming 2.py +0 -405
  355. empathy_os/telemetry/event_streaming.py +0 -405
  356. empathy_os/telemetry/feedback_loop 2.py +0 -557
  357. empathy_os/telemetry/feedback_loop.py +0 -554
  358. empathy_os/telemetry/usage_tracker.py +0 -591
  359. empathy_os/templates.py +0 -754
  360. empathy_os/test_generator/__init__.py +0 -38
  361. empathy_os/test_generator/__main__.py +0 -14
  362. empathy_os/test_generator/cli.py +0 -234
  363. empathy_os/test_generator/generator.py +0 -355
  364. empathy_os/test_generator/risk_analyzer.py +0 -216
  365. empathy_os/tier_recommender.py +0 -384
  366. empathy_os/tools.py +0 -183
  367. empathy_os/trust/__init__.py +0 -28
  368. empathy_os/trust/circuit_breaker.py +0 -579
  369. empathy_os/trust_building.py +0 -527
  370. empathy_os/validation/__init__.py +0 -19
  371. empathy_os/validation/xml_validator.py +0 -281
  372. empathy_os/vscode_bridge 2.py +0 -173
  373. empathy_os/vscode_bridge.py +0 -173
  374. empathy_os/workflow_commands.py +0 -780
  375. empathy_os/workflow_patterns/__init__.py +0 -33
  376. empathy_os/workflow_patterns/behavior.py +0 -249
  377. empathy_os/workflow_patterns/core.py +0 -76
  378. empathy_os/workflow_patterns/output.py +0 -99
  379. empathy_os/workflow_patterns/registry.py +0 -255
  380. empathy_os/workflow_patterns/structural.py +0 -288
  381. empathy_os/workflows/__init__.py +0 -539
  382. empathy_os/workflows/autonomous_test_gen.py +0 -1268
  383. empathy_os/workflows/base.py +0 -2667
  384. empathy_os/workflows/batch_processing.py +0 -342
  385. empathy_os/workflows/bug_predict.py +0 -1084
  386. empathy_os/workflows/builder.py +0 -273
  387. empathy_os/workflows/caching.py +0 -253
  388. empathy_os/workflows/code_review.py +0 -1048
  389. empathy_os/workflows/code_review_adapters.py +0 -312
  390. empathy_os/workflows/code_review_pipeline.py +0 -722
  391. empathy_os/workflows/config.py +0 -645
  392. empathy_os/workflows/dependency_check.py +0 -644
  393. empathy_os/workflows/document_gen/__init__.py +0 -25
  394. empathy_os/workflows/document_gen/config.py +0 -30
  395. empathy_os/workflows/document_gen/report_formatter.py +0 -162
  396. empathy_os/workflows/document_gen/workflow.py +0 -1426
  397. empathy_os/workflows/document_gen.py +0 -29
  398. empathy_os/workflows/document_manager.py +0 -216
  399. empathy_os/workflows/document_manager_README.md +0 -134
  400. empathy_os/workflows/documentation_orchestrator.py +0 -1205
  401. empathy_os/workflows/history.py +0 -510
  402. empathy_os/workflows/keyboard_shortcuts/__init__.py +0 -39
  403. empathy_os/workflows/keyboard_shortcuts/generators.py +0 -391
  404. empathy_os/workflows/keyboard_shortcuts/parsers.py +0 -416
  405. empathy_os/workflows/keyboard_shortcuts/prompts.py +0 -295
  406. empathy_os/workflows/keyboard_shortcuts/schema.py +0 -193
  407. empathy_os/workflows/keyboard_shortcuts/workflow.py +0 -509
  408. empathy_os/workflows/llm_base.py +0 -363
  409. empathy_os/workflows/manage_docs.py +0 -87
  410. empathy_os/workflows/manage_docs_README.md +0 -134
  411. empathy_os/workflows/manage_documentation.py +0 -821
  412. empathy_os/workflows/new_sample_workflow1.py +0 -149
  413. empathy_os/workflows/new_sample_workflow1_README.md +0 -150
  414. empathy_os/workflows/orchestrated_health_check.py +0 -849
  415. empathy_os/workflows/orchestrated_release_prep.py +0 -600
  416. empathy_os/workflows/output.py +0 -410
  417. empathy_os/workflows/perf_audit.py +0 -863
  418. empathy_os/workflows/pr_review.py +0 -762
  419. empathy_os/workflows/progress.py +0 -779
  420. empathy_os/workflows/progress_server.py +0 -322
  421. empathy_os/workflows/progressive/README 2.md +0 -454
  422. empathy_os/workflows/progressive/README.md +0 -454
  423. empathy_os/workflows/progressive/__init__ 2.py +0 -92
  424. empathy_os/workflows/progressive/__init__.py +0 -82
  425. empathy_os/workflows/progressive/cli 2.py +0 -242
  426. empathy_os/workflows/progressive/cli.py +0 -219
  427. empathy_os/workflows/progressive/core 2.py +0 -488
  428. empathy_os/workflows/progressive/core.py +0 -488
  429. empathy_os/workflows/progressive/orchestrator 2.py +0 -701
  430. empathy_os/workflows/progressive/orchestrator.py +0 -723
  431. empathy_os/workflows/progressive/reports 2.py +0 -528
  432. empathy_os/workflows/progressive/reports.py +0 -520
  433. empathy_os/workflows/progressive/telemetry 2.py +0 -280
  434. empathy_os/workflows/progressive/telemetry.py +0 -274
  435. empathy_os/workflows/progressive/test_gen 2.py +0 -514
  436. empathy_os/workflows/progressive/test_gen.py +0 -495
  437. empathy_os/workflows/progressive/workflow 2.py +0 -628
  438. empathy_os/workflows/progressive/workflow.py +0 -589
  439. empathy_os/workflows/refactor_plan.py +0 -694
  440. empathy_os/workflows/release_prep.py +0 -895
  441. empathy_os/workflows/release_prep_crew.py +0 -969
  442. empathy_os/workflows/research_synthesis.py +0 -404
  443. empathy_os/workflows/routing.py +0 -168
  444. empathy_os/workflows/secure_release.py +0 -593
  445. empathy_os/workflows/security_adapters.py +0 -297
  446. empathy_os/workflows/security_audit.py +0 -1329
  447. empathy_os/workflows/security_audit_phase3.py +0 -355
  448. empathy_os/workflows/seo_optimization.py +0 -633
  449. empathy_os/workflows/step_config.py +0 -234
  450. empathy_os/workflows/telemetry_mixin.py +0 -269
  451. empathy_os/workflows/test5.py +0 -125
  452. empathy_os/workflows/test5_README.md +0 -158
  453. empathy_os/workflows/test_coverage_boost_crew.py +0 -849
  454. empathy_os/workflows/test_gen/__init__.py +0 -52
  455. empathy_os/workflows/test_gen/ast_analyzer.py +0 -249
  456. empathy_os/workflows/test_gen/config.py +0 -88
  457. empathy_os/workflows/test_gen/data_models.py +0 -38
  458. empathy_os/workflows/test_gen/report_formatter.py +0 -289
  459. empathy_os/workflows/test_gen/test_templates.py +0 -381
  460. empathy_os/workflows/test_gen/workflow.py +0 -655
  461. empathy_os/workflows/test_gen.py +0 -54
  462. empathy_os/workflows/test_gen_behavioral.py +0 -477
  463. empathy_os/workflows/test_gen_parallel.py +0 -341
  464. empathy_os/workflows/test_lifecycle.py +0 -526
  465. empathy_os/workflows/test_maintenance.py +0 -627
  466. empathy_os/workflows/test_maintenance_cli.py +0 -590
  467. empathy_os/workflows/test_maintenance_crew.py +0 -840
  468. empathy_os/workflows/test_runner.py +0 -622
  469. empathy_os/workflows/tier_tracking.py +0 -531
  470. empathy_os/workflows/xml_enhanced_crew.py +0 -285
  471. empathy_software_plugin/SOFTWARE_PLUGIN_README.md +0 -57
  472. empathy_software_plugin/cli/__init__.py +0 -120
  473. empathy_software_plugin/cli/inspect.py +0 -362
  474. empathy_software_plugin/cli.py +0 -574
  475. empathy_software_plugin/plugin.py +0 -188
  476. workflow_scaffolding/__init__.py +0 -11
  477. workflow_scaffolding/__main__.py +0 -12
  478. workflow_scaffolding/cli.py +0 -206
  479. workflow_scaffolding/generator.py +0 -265
  480. {empathy_framework-5.2.1.dist-info → empathy_framework-5.4.0.dist-info}/WHEEL +0 -0
@@ -1,1329 +0,0 @@
1
- """Security Audit Workflow
2
-
3
- OWASP-focused security scan with intelligent vulnerability assessment.
4
- Integrates with team security decisions to filter known false positives.
5
-
6
- Stages:
7
- 1. triage (CHEAP) - Quick scan for common vulnerability patterns
8
- 2. analyze (CAPABLE) - Deep analysis of flagged areas
9
- 3. assess (CAPABLE) - Risk scoring and severity classification
10
- 4. remediate (PREMIUM) - Generate remediation plan (conditional)
11
-
12
- Copyright 2025 Smart-AI-Memory
13
- Licensed under Fair Source License 0.9
14
- """
15
-
16
- import json
17
- import logging
18
- import re
19
- from pathlib import Path
20
- from typing import Any
21
-
22
- from .base import BaseWorkflow, ModelTier
23
- from .step_config import WorkflowStepConfig
24
-
25
- logger = logging.getLogger(__name__)
26
-
27
- # Define step configurations for executor-based execution
28
- SECURITY_STEPS = {
29
- "remediate": WorkflowStepConfig(
30
- name="remediate",
31
- task_type="final_review", # Premium tier task
32
- tier_hint="premium",
33
- description="Generate remediation plan for security vulnerabilities",
34
- max_tokens=3000,
35
- ),
36
- }
37
-
38
- # Directories to skip during scanning (build artifacts, third-party code)
39
- SKIP_DIRECTORIES = {
40
- ".git",
41
- "node_modules",
42
- "__pycache__",
43
- "venv",
44
- ".venv",
45
- "env",
46
- ".next", # Next.js build output
47
- "dist",
48
- "build",
49
- ".tox",
50
- "site", # MkDocs output
51
- "ebook-site",
52
- "website", # Website build artifacts
53
- "anthropic-cookbook", # Third-party examples
54
- ".eggs",
55
- "*.egg-info",
56
- "htmlcov", # Coverage report artifacts
57
- "htmlcov_logging", # Coverage report artifacts
58
- ".coverage", # Coverage data
59
- "vscode-extension", # VSCode extension code (separate security review)
60
- "vscode-memory-panel", # VSCode panel code
61
- "workflow-dashboard", # Dashboard build
62
- }
63
-
64
- # Patterns that indicate a line is DETECTION code, not vulnerable code
65
- # These help avoid false positives when scanning security tools
66
- DETECTION_PATTERNS = [
67
- r'["\']eval\s*\(["\']', # String literal like "eval(" (detection, not execution)
68
- r'["\']exec\s*\(["\']', # String literal like "exec(" (detection, not execution)
69
- r"in\s+content", # Pattern detection like "eval(" in content
70
- r"re\.compile", # Regex compilation for detection
71
- r"\.finditer\(", # Regex matching for detection
72
- r"\.search\(", # Regex searching for detection
73
- ]
74
-
75
- # Known fake/test credential patterns to ignore
76
- FAKE_CREDENTIAL_PATTERNS = [
77
- r"EXAMPLE", # AWS example keys
78
- r"FAKE",
79
- r"TEST",
80
- r"your-.*-here",
81
- r'"your-key"', # Placeholder key
82
- r"abc123xyz",
83
- r"\.\.\.", # Placeholder with ellipsis
84
- r"test-key",
85
- r"mock",
86
- r'"hardcoded_secret"', # Literal example text
87
- r'"secret"$', # Generic "secret" as value
88
- r'"secret123"', # Test password
89
- r'"password"$', # Generic password as value
90
- r"_PATTERN", # Pattern constants
91
- r"_EXAMPLE", # Example constants
92
- ]
93
-
94
- # Files/paths that contain security examples/tests (not vulnerabilities)
95
- SECURITY_EXAMPLE_PATHS = [
96
- "owasp_patterns.py",
97
- "vulnerability_scanner.py",
98
- "test_security",
99
- "test_secrets",
100
- "test_owasp",
101
- "secrets_detector.py", # Security tool with pattern definitions
102
- "pii_scrubber.py", # Privacy tool
103
- "secure_memdocs", # Secure storage module
104
- "/security/", # Security modules
105
- "/benchmarks/", # Benchmark files with test fixtures
106
- "benchmark_", # Benchmark files (e.g., benchmark_caching.py)
107
- "phase_2_setup.py", # Setup file with educational patterns
108
- ]
109
-
110
- # Patterns indicating test fixture data (code written to temp files for testing)
111
- TEST_FIXTURE_PATTERNS = [
112
- r"SECURITY_TEST_FILES\s*=", # Dict of test fixture code
113
- r"write_text\s*\(", # Writing test data to temp files
114
- r"# UNSAFE - DO NOT USE", # Educational comments showing bad patterns
115
- r"# SAFE -", # Educational comments showing good patterns
116
- r"# INJECTION RISK", # Educational markers
117
- r"pragma:\s*allowlist\s*secret", # Explicit allowlist marker
118
- ]
119
-
120
- # Test file patterns - findings here are informational, not critical
121
- TEST_FILE_PATTERNS = [
122
- r"/tests/",
123
- r"/test_",
124
- r"_test\.py$",
125
- r"_demo\.py$",
126
- r"_example\.py$",
127
- r"/examples/",
128
- r"/demo",
129
- r"coach/vscode-extension", # Example VSCode extension
130
- ]
131
-
132
- # Common security vulnerability patterns (OWASP Top 10 inspired)
133
- SECURITY_PATTERNS = {
134
- "sql_injection": {
135
- "patterns": [
136
- r'execute\s*\(\s*["\'].*%s',
137
- r'cursor\.execute\s*\(\s*f["\']',
138
- r"\.format\s*\(.*\).*execute",
139
- ],
140
- "severity": "critical",
141
- "owasp": "A03:2021 Injection",
142
- },
143
- "xss": {
144
- "patterns": [
145
- r"innerHTML\s*=",
146
- r"dangerouslySetInnerHTML",
147
- r"document\.write\s*\(",
148
- ],
149
- "severity": "high",
150
- "owasp": "A03:2021 Injection",
151
- },
152
- "hardcoded_secret": {
153
- "patterns": [
154
- r'password\s*=\s*["\'][^"\']+["\']',
155
- r'api_key\s*=\s*["\'][^"\']+["\']',
156
- r'secret\s*=\s*["\'][^"\']+["\']',
157
- r'token\s*=\s*["\'][A-Za-z0-9]{20,}["\']',
158
- ],
159
- "severity": "critical",
160
- "owasp": "A02:2021 Cryptographic Failures",
161
- },
162
- "insecure_random": {
163
- "patterns": [
164
- r"random\.\w+\s*\(",
165
- r"Math\.random\s*\(",
166
- ],
167
- "severity": "medium",
168
- "owasp": "A02:2021 Cryptographic Failures",
169
- },
170
- "path_traversal": {
171
- "patterns": [
172
- r"open\s*\([^)]*\+[^)]*\)",
173
- r"readFile\s*\([^)]*\+[^)]*\)",
174
- ],
175
- "severity": "high",
176
- "owasp": "A01:2021 Broken Access Control",
177
- },
178
- "command_injection": {
179
- "patterns": [
180
- r"subprocess\.\w+\s*\([^)]*shell\s*=\s*True",
181
- r"os\.system\s*\(",
182
- r"eval\s*\(",
183
- r"exec\s*\(",
184
- ],
185
- "severity": "critical",
186
- "owasp": "A03:2021 Injection",
187
- },
188
- }
189
-
190
-
191
- class SecurityAuditWorkflow(BaseWorkflow):
192
- """OWASP-focused security audit with team decision integration.
193
-
194
- Scans code for security vulnerabilities while respecting
195
- team decisions about false positives and accepted risks.
196
- """
197
-
198
- name = "security-audit"
199
- description = "OWASP-focused security scan with vulnerability assessment"
200
- stages = ["triage", "analyze", "assess", "remediate"]
201
- tier_map = {
202
- "triage": ModelTier.CHEAP,
203
- "analyze": ModelTier.CAPABLE,
204
- "assess": ModelTier.CAPABLE,
205
- "remediate": ModelTier.PREMIUM,
206
- }
207
-
208
- def __init__(
209
- self,
210
- patterns_dir: str = "./patterns",
211
- skip_remediate_if_clean: bool = True,
212
- use_crew_for_assessment: bool = True,
213
- use_crew_for_remediation: bool = False,
214
- crew_config: dict | None = None,
215
- enable_auth_strategy: bool = True,
216
- **kwargs: Any,
217
- ):
218
- """Initialize security audit workflow.
219
-
220
- Args:
221
- patterns_dir: Directory containing security decisions
222
- skip_remediate_if_clean: Skip remediation if no high/critical findings
223
- use_crew_for_assessment: Use SecurityAuditCrew for vulnerability assessment (default: True)
224
- use_crew_for_remediation: Use SecurityAuditCrew for enhanced remediation (default: True)
225
- crew_config: Configuration dict for SecurityAuditCrew
226
- enable_auth_strategy: If True, use intelligent subscription vs API routing
227
- based on codebase size (default: True)
228
- **kwargs: Additional arguments passed to BaseWorkflow
229
-
230
- """
231
- super().__init__(**kwargs)
232
- self.patterns_dir = patterns_dir
233
- self.skip_remediate_if_clean = skip_remediate_if_clean
234
- self.use_crew_for_assessment = use_crew_for_assessment
235
- self.use_crew_for_remediation = use_crew_for_remediation
236
- self.crew_config = crew_config or {}
237
- self.enable_auth_strategy = enable_auth_strategy
238
- self._has_critical: bool = False
239
- self._team_decisions: dict[str, dict] = {}
240
- self._crew: Any = None
241
- self._crew_available = False
242
- self._auth_mode_used: str | None = None # Track which auth was recommended
243
- self._load_team_decisions()
244
-
245
- def _load_team_decisions(self) -> None:
246
- """Load team security decisions for false positive filtering."""
247
- decisions_file = Path(self.patterns_dir) / "security" / "team_decisions.json"
248
- if decisions_file.exists():
249
- try:
250
- with open(decisions_file) as f:
251
- data = json.load(f)
252
- for decision in data.get("decisions", []):
253
- key = decision.get("finding_hash", "")
254
- self._team_decisions[key] = decision
255
- except (json.JSONDecodeError, OSError):
256
- pass
257
-
258
- async def _initialize_crew(self) -> None:
259
- """Initialize the SecurityAuditCrew."""
260
- if self._crew is not None:
261
- return
262
-
263
- try:
264
- from empathy_llm_toolkit.agent_factory.crews.security_audit import SecurityAuditCrew
265
-
266
- self._crew = SecurityAuditCrew()
267
- self._crew_available = True
268
- logger.info("SecurityAuditCrew initialized successfully")
269
- except ImportError as e:
270
- logger.warning(f"SecurityAuditCrew not available: {e}")
271
- self._crew_available = False
272
-
273
- def should_skip_stage(self, stage_name: str, input_data: Any) -> tuple[bool, str | None]:
274
- """Skip remediation stage if no critical/high findings.
275
-
276
- Args:
277
- stage_name: Name of the stage to check
278
- input_data: Current workflow data
279
-
280
- Returns:
281
- Tuple of (should_skip, reason)
282
-
283
- """
284
- if stage_name == "remediate" and self.skip_remediate_if_clean:
285
- if not self._has_critical:
286
- return True, "No high/critical findings requiring remediation"
287
- return False, None
288
-
289
- async def run_stage(
290
- self,
291
- stage_name: str,
292
- tier: ModelTier,
293
- input_data: Any,
294
- ) -> tuple[Any, int, int]:
295
- """Route to specific stage implementation."""
296
- if stage_name == "triage":
297
- return await self._triage(input_data, tier)
298
- if stage_name == "analyze":
299
- return await self._analyze(input_data, tier)
300
- if stage_name == "assess":
301
- return await self._assess(input_data, tier)
302
- if stage_name == "remediate":
303
- return await self._remediate(input_data, tier)
304
- raise ValueError(f"Unknown stage: {stage_name}")
305
-
306
- async def _triage(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
307
- """Quick scan for common vulnerability patterns.
308
-
309
- Uses regex patterns to identify potential security issues
310
- across the codebase for further analysis.
311
- """
312
- target_path = input_data.get("path", ".")
313
- file_types = input_data.get("file_types", [".py", ".ts", ".tsx", ".js", ".jsx"])
314
-
315
- findings: list[dict] = []
316
- files_scanned = 0
317
-
318
- target = Path(target_path)
319
- if target.exists():
320
- # Handle both file and directory targets
321
- files_to_scan: list[Path] = []
322
- if target.is_file():
323
- # Single file - check if it matches file_types
324
- if any(str(target).endswith(ext) for ext in file_types):
325
- files_to_scan = [target]
326
- else:
327
- # Directory - recursively find all matching files
328
- for ext in file_types:
329
- for file_path in target.rglob(f"*{ext}"):
330
- # Skip excluded directories
331
- if any(skip in str(file_path) for skip in SKIP_DIRECTORIES):
332
- continue
333
- files_to_scan.append(file_path)
334
-
335
- for file_path in files_to_scan:
336
- try:
337
- content = file_path.read_text(errors="ignore")
338
- lines = content.split("\n")
339
- files_scanned += 1
340
-
341
- for vuln_type, vuln_info in SECURITY_PATTERNS.items():
342
- for pattern in vuln_info["patterns"]:
343
- matches = list(re.finditer(pattern, content, re.IGNORECASE))
344
- for match in matches:
345
- # Find line number and get the line content
346
- line_num = content[: match.start()].count("\n") + 1
347
- line_content = (
348
- lines[line_num - 1] if line_num <= len(lines) else ""
349
- )
350
-
351
- # Skip if file is a security example/test file
352
- file_name = str(file_path)
353
- if any(exp in file_name for exp in SECURITY_EXAMPLE_PATHS):
354
- continue
355
-
356
- # Skip if this looks like detection/scanning code
357
- if self._is_detection_code(line_content, match.group()):
358
- continue
359
-
360
- # Phase 2: Skip safe SQL parameterization patterns
361
- if vuln_type == "sql_injection":
362
- if self._is_safe_sql_parameterization(
363
- line_content,
364
- match.group(),
365
- content,
366
- ):
367
- continue
368
-
369
- # Skip fake/test credentials
370
- if vuln_type == "hardcoded_secret":
371
- if self._is_fake_credential(match.group()):
372
- continue
373
-
374
- # Phase 2: Skip safe random usage (tests, demos, documented)
375
- if vuln_type == "insecure_random":
376
- if self._is_safe_random_usage(
377
- line_content,
378
- file_name,
379
- content,
380
- ):
381
- continue
382
-
383
- # Skip command_injection in documentation strings
384
- if vuln_type == "command_injection":
385
- if self._is_documentation_or_string(
386
- line_content,
387
- match.group(),
388
- ):
389
- continue
390
-
391
- # Check if this is a test file - downgrade to informational
392
- is_test_file = any(
393
- re.search(pat, file_name) for pat in TEST_FILE_PATTERNS
394
- )
395
-
396
- # Skip test file findings for hardcoded_secret (expected in tests)
397
- if is_test_file and vuln_type == "hardcoded_secret":
398
- continue
399
-
400
- findings.append(
401
- {
402
- "type": vuln_type,
403
- "file": str(file_path),
404
- "line": line_num,
405
- "match": match.group()[:100],
406
- "severity": (
407
- "low" if is_test_file else vuln_info["severity"]
408
- ),
409
- "owasp": vuln_info["owasp"],
410
- "is_test": is_test_file,
411
- },
412
- )
413
- except OSError:
414
- continue
415
-
416
- # Phase 3: Apply AST-based filtering for command injection
417
- try:
418
- from .security_audit_phase3 import apply_phase3_filtering
419
-
420
- # Separate command injection findings
421
- cmd_findings = [f for f in findings if f["type"] == "command_injection"]
422
- other_findings = [f for f in findings if f["type"] != "command_injection"]
423
-
424
- # Apply Phase 3 filtering to command injection
425
- filtered_cmd = apply_phase3_filtering(cmd_findings)
426
-
427
- # Combine back
428
- findings = other_findings + filtered_cmd
429
-
430
- logger.info(
431
- f"Phase 3: Filtered command_injection from {len(cmd_findings)} to {len(filtered_cmd)} "
432
- f"({len(cmd_findings) - len(filtered_cmd)} false positives removed)"
433
- )
434
- except ImportError:
435
- logger.debug("Phase 3 module not available, skipping AST-based filtering")
436
- except Exception as e:
437
- logger.warning(f"Phase 3 filtering failed: {e}")
438
-
439
- # === AUTH STRATEGY INTEGRATION ===
440
- # Detect codebase size and recommend auth mode (first stage only)
441
- if self.enable_auth_strategy:
442
- try:
443
- from empathy_os.models import (
444
- count_lines_of_code,
445
- get_auth_strategy,
446
- get_module_size_category,
447
- )
448
-
449
- # Calculate codebase size
450
- codebase_lines = 0
451
- if target.exists():
452
- if target.is_file():
453
- codebase_lines = count_lines_of_code(target)
454
- elif target.is_dir():
455
- # Sum lines across all Python files
456
- for py_file in target.rglob("*.py"):
457
- try:
458
- codebase_lines += count_lines_of_code(py_file)
459
- except Exception:
460
- pass
461
-
462
- if codebase_lines > 0:
463
- # Get auth strategy (first-time setup if needed)
464
- strategy = get_auth_strategy()
465
-
466
- # Get recommended auth mode
467
- recommended_mode = strategy.get_recommended_mode(codebase_lines)
468
- self._auth_mode_used = recommended_mode.value
469
-
470
- # Get size category
471
- size_category = get_module_size_category(codebase_lines)
472
-
473
- # Log recommendation
474
- logger.info(
475
- f"Codebase: {target} ({codebase_lines} LOC, {size_category})"
476
- )
477
- logger.info(f"Recommended auth mode: {recommended_mode.value}")
478
-
479
- # Get cost estimate
480
- cost_estimate = strategy.estimate_cost(codebase_lines, recommended_mode)
481
-
482
- if recommended_mode.value == "subscription":
483
- logger.info(
484
- f"Cost: {cost_estimate['quota_cost']} "
485
- f"(fits in {cost_estimate['fits_in_context']} context)"
486
- )
487
- else: # API
488
- logger.info(
489
- f"Cost: ~${cost_estimate['monetary_cost']:.4f} "
490
- f"(1M context window)"
491
- )
492
-
493
- except Exception as e:
494
- # Don't fail workflow if auth strategy fails
495
- logger.warning(f"Auth strategy detection failed: {e}")
496
-
497
- input_tokens = len(str(input_data)) // 4
498
- output_tokens = len(str(findings)) // 4
499
-
500
- return (
501
- {
502
- "findings": findings,
503
- "files_scanned": files_scanned,
504
- "finding_count": len(findings),
505
- **input_data,
506
- },
507
- input_tokens,
508
- output_tokens,
509
- )
510
-
511
- async def _analyze(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
512
- """Deep analysis of flagged areas.
513
-
514
- Filters findings against team decisions and performs
515
- deeper analysis of genuine security concerns.
516
- """
517
- findings = input_data.get("findings", [])
518
- analyzed: list[dict] = []
519
-
520
- for finding in findings:
521
- finding_key = finding.get("type", "")
522
-
523
- # Check team decisions
524
- decision = self._team_decisions.get(finding_key)
525
- if decision:
526
- if decision.get("decision") == "false_positive":
527
- finding["status"] = "false_positive"
528
- finding["decision_reason"] = decision.get("reason", "")
529
- finding["decided_by"] = decision.get("decided_by", "")
530
- elif decision.get("decision") == "accepted":
531
- finding["status"] = "accepted_risk"
532
- finding["decision_reason"] = decision.get("reason", "")
533
- elif decision.get("decision") == "deferred":
534
- finding["status"] = "deferred"
535
- finding["decision_reason"] = decision.get("reason", "")
536
- else:
537
- finding["status"] = "needs_review"
538
- else:
539
- finding["status"] = "needs_review"
540
-
541
- # Add context analysis
542
- if finding["status"] == "needs_review":
543
- finding["analysis"] = self._analyze_finding(finding)
544
-
545
- analyzed.append(finding)
546
-
547
- # Separate by status
548
- needs_review = [f for f in analyzed if f["status"] == "needs_review"]
549
- false_positives = [f for f in analyzed if f["status"] == "false_positive"]
550
- accepted = [f for f in analyzed if f["status"] == "accepted_risk"]
551
-
552
- input_tokens = len(str(input_data)) // 4
553
- output_tokens = len(str(analyzed)) // 4
554
-
555
- return (
556
- {
557
- "analyzed_findings": analyzed,
558
- "needs_review": needs_review,
559
- "false_positives": false_positives,
560
- "accepted_risks": accepted,
561
- "review_count": len(needs_review),
562
- **input_data,
563
- },
564
- input_tokens,
565
- output_tokens,
566
- )
567
-
568
- def _analyze_finding(self, finding: dict) -> str:
569
- """Generate analysis context for a finding."""
570
- vuln_type = finding.get("type", "")
571
- analyses = {
572
- "sql_injection": "Potential SQL injection. Verify parameterized input.",
573
- "xss": "Potential XSS vulnerability. Check output escaping.",
574
- "hardcoded_secret": "Hardcoded credential. Use env vars or secrets manager.",
575
- "insecure_random": "Insecure random. Use secrets module instead.",
576
- "path_traversal": "Potential path traversal. Validate file paths.",
577
- "command_injection": "Potential command injection. Avoid shell=True.",
578
- }
579
- return analyses.get(vuln_type, "Review for security implications.")
580
-
581
- def _is_detection_code(self, line_content: str, match_text: str) -> bool:
582
- """Check if a match is actually detection/scanning code, not a vulnerability.
583
-
584
- This prevents false positives when scanning security tools that contain
585
- patterns like 'if "eval(" in content:' which are detecting vulnerabilities,
586
- not introducing them.
587
- """
588
- # Check if the line contains detection patterns
589
- for pattern in DETECTION_PATTERNS:
590
- if re.search(pattern, line_content, re.IGNORECASE):
591
- return True
592
-
593
- # Check if the match is inside a string literal used for comparison
594
- # e.g., 'if "eval(" in content:' or 'pattern = r"eval\("'
595
- if f'"{match_text.strip()}"' in line_content or f"'{match_text.strip()}'" in line_content:
596
- return True
597
-
598
- return False
599
-
600
- def _is_fake_credential(self, match_text: str) -> bool:
601
- """Check if a matched credential is obviously fake/for testing.
602
-
603
- This prevents false positives for test fixtures using patterns like
604
- 'AKIAIOSFODNN7EXAMPLE' (AWS official example) or 'test-key-not-real'.
605
- """
606
- for pattern in FAKE_CREDENTIAL_PATTERNS:
607
- if re.search(pattern, match_text, re.IGNORECASE):
608
- return True
609
- return False
610
-
611
- def _is_documentation_or_string(self, line_content: str, match_text: str) -> bool:
612
- """Check if a command injection match is in documentation or string literals.
613
-
614
- This prevents false positives for:
615
- - Docstrings describing security issues
616
- - String literals containing example vulnerable code
617
- - Comments explaining vulnerabilities
618
- """
619
- line = line_content.strip()
620
-
621
- # Check if line is a comment or documentation
622
- if line.startswith("#") or line.startswith("//") or line.startswith("*") or line.startswith("-"):
623
- return True
624
-
625
- # Check if inside a docstring (triple quotes)
626
- if '"""' in line or "'''" in line:
627
- return True
628
-
629
- # Check if the match is inside a string literal being defined
630
- # e.g., 'pattern = r"eval\("' or '"eval(" in content'
631
- string_patterns = [
632
- r'["\'].*' + re.escape(match_text.strip()[:10]) + r'.*["\']', # Inside quotes
633
- r'r["\'].*' + re.escape(match_text.strip()[:10]), # Raw string
634
- r'=\s*["\']', # String assignment
635
- ]
636
- for pattern in string_patterns:
637
- if re.search(pattern, line):
638
- return True
639
-
640
- # Check for common documentation patterns
641
- doc_indicators = [
642
- "example",
643
- "vulnerable",
644
- "insecure",
645
- "dangerous",
646
- "pattern",
647
- "detect",
648
- "scan",
649
- "check for",
650
- "look for",
651
- ]
652
- line_lower = line.lower()
653
- if any(ind in line_lower for ind in doc_indicators):
654
- return True
655
-
656
- return False
657
-
658
- def _is_safe_sql_parameterization(self, line_content: str, match_text: str, file_content: str) -> bool:
659
- """Check if SQL query uses safe parameterization despite f-string usage.
660
-
661
- Phase 2 Enhancement: Detects safe patterns like:
662
- - placeholders = ",".join("?" * len(ids))
663
- - cursor.execute(f"... IN ({placeholders})", ids)
664
-
665
- This prevents false positives for the SQLite-recommended pattern
666
- of building dynamic placeholder strings.
667
-
668
- Args:
669
- line_content: The line containing the match (may be incomplete for multi-line)
670
- match_text: The matched text
671
- file_content: Full file content for context analysis
672
-
673
- Returns:
674
- True if this is safe parameterized SQL, False otherwise
675
- """
676
- # Get the position of the match in the full file content
677
- match_pos = file_content.find(match_text)
678
- if match_pos == -1:
679
- # Try to find cursor.execute
680
- match_pos = file_content.find("cursor.execute")
681
- if match_pos == -1:
682
- return False
683
-
684
- # Extract a larger context (next 200 chars after match)
685
- context = file_content[match_pos:match_pos + 200]
686
-
687
- # Also get lines before the match for placeholder detection
688
- lines_before = file_content[:match_pos].split("\n")
689
- recent_lines = lines_before[-10:] if len(lines_before) > 10 else lines_before
690
-
691
- # Pattern 1: Check if this is a placeholder-based parameterized query
692
- # Look for: cursor.execute(f"... IN ({placeholders})", params)
693
- if "placeholders" in context or any("placeholders" in line for line in recent_lines[-5:]):
694
- # Check if context has both f-string and separate parameters
695
- # Pattern: f"...{placeholders}..." followed by comma and params
696
- if re.search(r'f["\'][^"\']*\{placeholders\}[^"\']*["\']\s*,\s*\w+', context):
697
- return True # Safe - has separate parameters
698
-
699
- # Also check if recent lines built the placeholders
700
- for prev_line in reversed(recent_lines):
701
- if "placeholders" in prev_line and '"?"' in prev_line and "join" in prev_line:
702
- # Found placeholder construction
703
- # Now check if the execute has separate parameters
704
- if "," in context and any(param in context for param in ["run_ids", "ids", "params", "values", ")"]):
705
- return True
706
-
707
- # Pattern 2: Check if f-string only builds SQL structure with constants
708
- # Example: f"SELECT * FROM {TABLE_NAME}" where TABLE_NAME is a constant
709
- f_string_vars = re.findall(r'\{(\w+)\}', context)
710
- if f_string_vars:
711
- # Check if all variables are constants (UPPERCASE or table/column names)
712
- all_constants = all(
713
- var.isupper() or "TABLE" in var.upper() or "COLUMN" in var.upper()
714
- for var in f_string_vars
715
- )
716
- if all_constants:
717
- return True # Safe - using constants, not user data
718
-
719
- # Pattern 3: Check for security note comments nearby
720
- # If developers added security notes, it's likely safe
721
- for prev_line in reversed(recent_lines[-3:]):
722
- if "security note" in prev_line.lower() and "safe" in prev_line.lower():
723
- return True
724
-
725
- return False
726
-
727
- def _is_safe_random_usage(self, line_content: str, file_path: str, file_content: str) -> bool:
728
- """Check if random usage is in a safe context (tests, simulations, non-crypto).
729
-
730
- Phase 2 Enhancement: Reduces false positives for random module usage
731
- in test fixtures, A/B testing simulations, and demo code.
732
-
733
- Args:
734
- line_content: The line containing the match
735
- file_path: Path to the file being scanned
736
- file_content: Full file content for context analysis
737
-
738
- Returns:
739
- True if random usage is safe/documented, False if potentially insecure
740
- """
741
- # Check if file is a test file
742
- is_test = any(pattern in file_path.lower() for pattern in ["/test", "test_", "conftest"])
743
-
744
- # Check for explicit security notes nearby
745
- lines = file_content.split("\n")
746
- line_index = None
747
- for i, line in enumerate(lines):
748
- if line_content.strip() in line:
749
- line_index = i
750
- break
751
-
752
- if line_index is not None:
753
- # Check 5 lines before and after for security notes
754
- context_start = max(0, line_index - 5)
755
- context_end = min(len(lines), line_index + 5)
756
- context = "\n".join(lines[context_start:context_end]).lower()
757
-
758
- # Look for clarifying comments
759
- safe_indicators = [
760
- "security note",
761
- "not cryptographic",
762
- "not for crypto",
763
- "test data",
764
- "demo data",
765
- "simulation",
766
- "reproducible",
767
- "deterministic",
768
- "fixed seed",
769
- "not used for security",
770
- "not used for secrets",
771
- "not used for tokens",
772
- ]
773
-
774
- if any(indicator in context for indicator in safe_indicators):
775
- return True # Documented as safe
776
-
777
- # Check for common safe random patterns
778
- line_lower = line_content.lower()
779
-
780
- # Pattern 1: Fixed seed (reproducible tests)
781
- if "random.seed(" in line_lower:
782
- return True # Fixed seed is for reproducibility, not security
783
-
784
- # Pattern 2: A/B testing, simulations, demos
785
- safe_contexts = [
786
- "simulation",
787
- "demo",
788
- "a/b test",
789
- "ab_test",
790
- "fixture",
791
- "mock",
792
- "example",
793
- "sample",
794
- ]
795
- if any(context in file_path.lower() for context in safe_contexts):
796
- return True
797
-
798
- # If it's a test file without crypto indicators, it's probably safe
799
- if is_test:
800
- crypto_indicators = ["password", "secret", "token", "key", "crypto", "auth"]
801
- if not any(indicator in file_path.lower() for indicator in crypto_indicators):
802
- return True
803
-
804
- return False
805
-
806
- async def _assess(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
807
- """Risk scoring and severity classification.
808
-
809
- Calculates overall security risk score and identifies
810
- critical issues requiring immediate attention.
811
-
812
- When use_crew_for_assessment=True, uses SecurityAuditCrew's
813
- comprehensive analysis for enhanced vulnerability detection.
814
- """
815
- await self._initialize_crew()
816
-
817
- needs_review = input_data.get("needs_review", [])
818
-
819
- # Count by severity
820
- severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0}
821
- for finding in needs_review:
822
- sev = finding.get("severity", "low")
823
- severity_counts[sev] = severity_counts.get(sev, 0) + 1
824
-
825
- # Calculate risk score (0-100)
826
- risk_score = (
827
- severity_counts["critical"] * 25
828
- + severity_counts["high"] * 10
829
- + severity_counts["medium"] * 3
830
- + severity_counts["low"] * 1
831
- )
832
- risk_score = min(100, risk_score)
833
-
834
- # Set flag for skip logic
835
- self._has_critical = severity_counts["critical"] > 0 or severity_counts["high"] > 0
836
-
837
- # Group findings by OWASP category
838
- by_owasp: dict[str, list] = {}
839
- for finding in needs_review:
840
- owasp = finding.get("owasp", "Unknown")
841
- if owasp not in by_owasp:
842
- by_owasp[owasp] = []
843
- by_owasp[owasp].append(finding)
844
-
845
- # Use crew for enhanced assessment if available
846
- crew_enhanced = False
847
- crew_findings = []
848
- if self.use_crew_for_assessment and self._crew_available:
849
- target = input_data.get("path", ".")
850
- try:
851
- crew_report = await self._crew.audit(target=target)
852
- if crew_report and crew_report.findings:
853
- crew_enhanced = True
854
- # Convert crew findings to workflow format
855
- for finding in crew_report.findings:
856
- crew_findings.append(
857
- {
858
- "type": finding.category.value,
859
- "title": finding.title,
860
- "description": finding.description,
861
- "severity": finding.severity.value,
862
- "file": finding.file_path or "",
863
- "line": finding.line_number or 0,
864
- "owasp": finding.category.value,
865
- "remediation": finding.remediation or "",
866
- "cwe_id": finding.cwe_id or "",
867
- "cvss_score": finding.cvss_score or 0.0,
868
- "source": "crew",
869
- }
870
- )
871
- # Update severity counts with crew findings
872
- for finding in crew_findings:
873
- sev = finding.get("severity", "low")
874
- severity_counts[sev] = severity_counts.get(sev, 0) + 1
875
- # Recalculate risk score with crew findings
876
- risk_score = (
877
- severity_counts["critical"] * 25
878
- + severity_counts["high"] * 10
879
- + severity_counts["medium"] * 3
880
- + severity_counts["low"] * 1
881
- )
882
- risk_score = min(100, risk_score)
883
- except Exception as e:
884
- logger.warning(f"Crew assessment failed: {e}")
885
-
886
- # Merge crew findings with pattern-based findings
887
- all_critical = [f for f in needs_review if f.get("severity") == "critical"]
888
- all_high = [f for f in needs_review if f.get("severity") == "high"]
889
- if crew_enhanced:
890
- all_critical.extend([f for f in crew_findings if f.get("severity") == "critical"])
891
- all_high.extend([f for f in crew_findings if f.get("severity") == "high"])
892
-
893
- assessment = {
894
- "risk_score": risk_score,
895
- "risk_level": (
896
- "critical"
897
- if risk_score >= 75
898
- else "high" if risk_score >= 50 else "medium" if risk_score >= 25 else "low"
899
- ),
900
- "severity_breakdown": severity_counts,
901
- "by_owasp_category": {k: len(v) for k, v in by_owasp.items()},
902
- "critical_findings": all_critical,
903
- "high_findings": all_high,
904
- "crew_enhanced": crew_enhanced,
905
- "crew_findings_count": len(crew_findings) if crew_enhanced else 0,
906
- }
907
-
908
- input_tokens = len(str(input_data)) // 4
909
- output_tokens = len(str(assessment)) // 4
910
-
911
- # Build output with assessment
912
- output = {
913
- "assessment": assessment,
914
- **input_data,
915
- }
916
-
917
- # Add formatted report for human readability
918
- output["formatted_report"] = format_security_report(output)
919
-
920
- return (
921
- output,
922
- input_tokens,
923
- output_tokens,
924
- )
925
-
926
- async def _remediate(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
927
- """Generate remediation plan for security issues.
928
-
929
- Creates actionable remediation steps prioritized by
930
- severity and grouped by OWASP category.
931
-
932
- When use_crew_for_remediation=True, uses SecurityAuditCrew's
933
- Remediation Expert agent for enhanced recommendations.
934
-
935
- Supports XML-enhanced prompts when enabled in workflow config.
936
- """
937
- try:
938
- from .security_adapters import _check_crew_available
939
-
940
- adapters_available = True
941
- except ImportError:
942
- adapters_available = False
943
- _check_crew_available = lambda: False
944
-
945
- assessment = input_data.get("assessment", {})
946
- critical = assessment.get("critical_findings", [])
947
- high = assessment.get("high_findings", [])
948
- target = input_data.get("target", input_data.get("path", ""))
949
-
950
- crew_remediation = None
951
- crew_enhanced = False
952
-
953
- # Try crew-based remediation first if enabled
954
- if self.use_crew_for_remediation and adapters_available and _check_crew_available():
955
- crew_remediation = await self._get_crew_remediation(target, critical + high, assessment)
956
- if crew_remediation:
957
- crew_enhanced = True
958
-
959
- # Build findings summary for LLM
960
- findings_summary = []
961
- for f in critical:
962
- findings_summary.append(
963
- f"CRITICAL: {f.get('type')} in {f.get('file')}:{f.get('line')} - {f.get('owasp')}",
964
- )
965
- for f in high:
966
- findings_summary.append(
967
- f"HIGH: {f.get('type')} in {f.get('file')}:{f.get('line')} - {f.get('owasp')}",
968
- )
969
-
970
- # Build input payload for prompt
971
- input_payload = f"""Target: {target or "codebase"}
972
-
973
- Findings:
974
- {chr(10).join(findings_summary) if findings_summary else "No critical or high findings"}
975
-
976
- Risk Score: {assessment.get("risk_score", 0)}/100
977
- Risk Level: {assessment.get("risk_level", "unknown")}
978
-
979
- Severity Breakdown: {json.dumps(assessment.get("severity_breakdown", {}), indent=2)}"""
980
-
981
- # Check if XML prompts are enabled
982
- if self._is_xml_enabled():
983
- # Use XML-enhanced prompt
984
- user_message = self._render_xml_prompt(
985
- role="application security engineer",
986
- goal="Generate a comprehensive remediation plan for security vulnerabilities",
987
- instructions=[
988
- "Explain each vulnerability and its potential impact",
989
- "Provide specific remediation steps with code examples",
990
- "Suggest preventive measures to avoid similar issues",
991
- "Reference relevant OWASP guidelines",
992
- "Prioritize by severity (critical first, then high)",
993
- ],
994
- constraints=[
995
- "Be specific and actionable",
996
- "Include code examples where helpful",
997
- "Group fixes by severity",
998
- ],
999
- input_type="security_findings",
1000
- input_payload=input_payload,
1001
- extra={
1002
- "risk_score": assessment.get("risk_score", 0),
1003
- "risk_level": assessment.get("risk_level", "unknown"),
1004
- },
1005
- )
1006
- system = None # XML prompt includes all context
1007
- else:
1008
- # Use legacy plain text prompts
1009
- system = """You are a security expert in application security and OWASP.
1010
- Generate a comprehensive remediation plan for the security findings.
1011
-
1012
- For each finding:
1013
- 1. Explain the vulnerability and its potential impact
1014
- 2. Provide specific remediation steps with code examples
1015
- 3. Suggest preventive measures to avoid similar issues
1016
- 4. Reference relevant OWASP guidelines
1017
-
1018
- Prioritize by severity (critical first, then high).
1019
- Be specific and actionable."""
1020
-
1021
- user_message = f"""Generate a remediation plan for these security findings:
1022
-
1023
- {input_payload}
1024
-
1025
- Provide a detailed remediation plan with specific fixes."""
1026
-
1027
- # Try executor-based execution first (Phase 3 pattern)
1028
- if self._executor is not None or self._api_key:
1029
- try:
1030
- step = SECURITY_STEPS["remediate"]
1031
- response, input_tokens, output_tokens, cost = await self.run_step_with_executor(
1032
- step=step,
1033
- prompt=user_message,
1034
- system=system,
1035
- )
1036
- except Exception:
1037
- # Fall back to legacy _call_llm if executor fails
1038
- response, input_tokens, output_tokens = await self._call_llm(
1039
- tier,
1040
- system or "",
1041
- user_message,
1042
- max_tokens=3000,
1043
- )
1044
- else:
1045
- # Legacy path for backward compatibility
1046
- response, input_tokens, output_tokens = await self._call_llm(
1047
- tier,
1048
- system or "",
1049
- user_message,
1050
- max_tokens=3000,
1051
- )
1052
-
1053
- # Parse XML response if enforcement is enabled
1054
- parsed_data = self._parse_xml_response(response)
1055
-
1056
- # Merge crew remediation if available
1057
- if crew_enhanced and crew_remediation:
1058
- response = self._merge_crew_remediation(response, crew_remediation)
1059
-
1060
- result = {
1061
- "remediation_plan": response,
1062
- "remediation_count": len(critical) + len(high),
1063
- "risk_score": assessment.get("risk_score", 0),
1064
- "risk_level": assessment.get("risk_level", "unknown"),
1065
- "model_tier_used": tier.value,
1066
- "crew_enhanced": crew_enhanced,
1067
- "auth_mode_used": self._auth_mode_used, # Track recommended auth mode
1068
- **input_data, # Merge all previous stage data
1069
- }
1070
-
1071
- # Add crew-specific fields if enhanced
1072
- if crew_enhanced and crew_remediation:
1073
- result["crew_findings"] = crew_remediation.get("findings", [])
1074
- result["crew_agents_used"] = crew_remediation.get("agents_used", [])
1075
-
1076
- # Merge parsed XML data if available
1077
- if parsed_data.get("xml_parsed"):
1078
- result.update(
1079
- {
1080
- "xml_parsed": True,
1081
- "summary": parsed_data.get("summary"),
1082
- "findings": parsed_data.get("findings", []),
1083
- "checklist": parsed_data.get("checklist", []),
1084
- },
1085
- )
1086
-
1087
- return (result, input_tokens, output_tokens)
1088
-
1089
- async def _get_crew_remediation(
1090
- self,
1091
- target: str,
1092
- findings: list,
1093
- assessment: dict,
1094
- ) -> dict | None:
1095
- """Get remediation recommendations from SecurityAuditCrew.
1096
-
1097
- Args:
1098
- target: Path to codebase
1099
- findings: List of findings needing remediation
1100
- assessment: Current assessment dict
1101
-
1102
- Returns:
1103
- Crew results dict or None if failed
1104
-
1105
- """
1106
- try:
1107
- from empathy_llm_toolkit.agent_factory.crews import (
1108
- SecurityAuditConfig,
1109
- SecurityAuditCrew,
1110
- )
1111
-
1112
- from .security_adapters import (
1113
- crew_report_to_workflow_format,
1114
- workflow_findings_to_crew_format,
1115
- )
1116
-
1117
- # Configure crew for focused remediation
1118
- config = SecurityAuditConfig(
1119
- scan_depth="quick", # Skip deep scan, focus on remediation
1120
- **self.crew_config,
1121
- )
1122
- crew = SecurityAuditCrew(config=config)
1123
-
1124
- # Convert findings to crew format for context
1125
- crew_findings = workflow_findings_to_crew_format(findings)
1126
-
1127
- # Run audit with remediation focus
1128
- context = {
1129
- "focus_areas": ["remediation"],
1130
- "existing_findings": crew_findings,
1131
- "skip_detection": True, # We already have findings
1132
- "risk_score": assessment.get("risk_score", 0),
1133
- }
1134
-
1135
- report = await crew.audit(target, context=context)
1136
-
1137
- if report:
1138
- return crew_report_to_workflow_format(report)
1139
- return None
1140
-
1141
- except Exception as e:
1142
- import logging
1143
-
1144
- logging.getLogger(__name__).warning(f"Crew remediation failed: {e}")
1145
- return None
1146
-
1147
- def _merge_crew_remediation(self, llm_response: str, crew_remediation: dict) -> str:
1148
- """Merge crew remediation recommendations with LLM response.
1149
-
1150
- Args:
1151
- llm_response: LLM-generated remediation plan
1152
- crew_remediation: Crew results in workflow format
1153
-
1154
- Returns:
1155
- Merged response with crew enhancements
1156
-
1157
- """
1158
- crew_findings = crew_remediation.get("findings", [])
1159
-
1160
- if not crew_findings:
1161
- return llm_response
1162
-
1163
- # Build crew section efficiently (avoid O(n²) string concat)
1164
- parts = [
1165
- "\n\n## Enhanced Remediation (SecurityAuditCrew)\n\n",
1166
- f"**Agents Used**: {', '.join(crew_remediation.get('agents_used', []))}\n\n",
1167
- ]
1168
-
1169
- for finding in crew_findings:
1170
- if finding.get("remediation"):
1171
- parts.append(f"### {finding.get('title', 'Finding')}\n")
1172
- parts.append(f"**Severity**: {finding.get('severity', 'unknown').upper()}\n")
1173
- if finding.get("cwe_id"):
1174
- parts.append(f"**CWE**: {finding.get('cwe_id')}\n")
1175
- if finding.get("cvss_score"):
1176
- parts.append(f"**CVSS Score**: {finding.get('cvss_score')}\n")
1177
- parts.append(f"\n**Remediation**:\n{finding.get('remediation')}\n\n")
1178
-
1179
- return llm_response + "".join(parts)
1180
-
1181
- def _get_remediation_action(self, finding: dict) -> str:
1182
- """Generate specific remediation action for a finding."""
1183
- actions = {
1184
- "sql_injection": "Use parameterized queries or ORM. Never interpolate user input.",
1185
- "xss": "Use framework's auto-escaping. Sanitize user input.",
1186
- "hardcoded_secret": "Move to env vars or use a secrets manager.",
1187
- "insecure_random": "Use secrets.token_hex() or secrets.randbelow().",
1188
- "path_traversal": "Use os.path.realpath() and validate paths.",
1189
- "command_injection": "Use subprocess with shell=False and argument lists.",
1190
- }
1191
- return actions.get(finding.get("type", ""), "Apply security best practices.")
1192
-
1193
-
1194
- def format_security_report(output: dict) -> str:
1195
- """Format security audit output as a human-readable report.
1196
-
1197
- This format is designed to be:
1198
- - Easy for humans to read and understand
1199
- - Easy to copy/paste to an AI assistant for remediation help
1200
- - Actionable with clear severity levels and file locations
1201
-
1202
- Args:
1203
- output: The workflow output dictionary
1204
-
1205
- Returns:
1206
- Formatted report string
1207
-
1208
- """
1209
- lines = []
1210
-
1211
- # Header
1212
- assessment = output.get("assessment", {})
1213
- risk_level = assessment.get("risk_level", "unknown").upper()
1214
- risk_score = assessment.get("risk_score", 0)
1215
-
1216
- lines.append("=" * 60)
1217
- lines.append("SECURITY AUDIT REPORT")
1218
- lines.append("=" * 60)
1219
- lines.append("")
1220
- lines.append(f"Risk Level: {risk_level}")
1221
- lines.append(f"Risk Score: {risk_score}/100")
1222
- lines.append("")
1223
-
1224
- # Severity breakdown
1225
- breakdown = assessment.get("severity_breakdown", {})
1226
- lines.append("Severity Summary:")
1227
- for sev in ["critical", "high", "medium", "low"]:
1228
- count = breakdown.get(sev, 0)
1229
- icon = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🟢"}.get(sev, "⚪")
1230
- lines.append(f" {icon} {sev.capitalize()}: {count}")
1231
- lines.append("")
1232
-
1233
- # Files scanned
1234
- files_scanned = output.get("files_scanned", 0)
1235
- lines.append(f"Files Scanned: {files_scanned}")
1236
- lines.append("")
1237
-
1238
- # Findings requiring review
1239
- needs_review = output.get("needs_review", [])
1240
- if needs_review:
1241
- lines.append("-" * 60)
1242
- lines.append("FINDINGS REQUIRING REVIEW")
1243
- lines.append("-" * 60)
1244
- lines.append("")
1245
-
1246
- for i, finding in enumerate(needs_review, 1):
1247
- severity = finding.get("severity", "unknown").upper()
1248
- vuln_type = finding.get("type", "unknown")
1249
- file_path = finding.get("file", "").split("Empathy-framework/")[-1]
1250
- line_num = finding.get("line", 0)
1251
- match = finding.get("match", "")[:50]
1252
- owasp = finding.get("owasp", "")
1253
- is_test = finding.get("is_test", False)
1254
- analysis = finding.get("analysis", "")
1255
-
1256
- test_marker = " [TEST FILE]" if is_test else ""
1257
- lines.append(f"{i}. [{severity}]{test_marker} {vuln_type}")
1258
- lines.append(f" File: {file_path}:{line_num}")
1259
- lines.append(f" Match: {match}")
1260
- lines.append(f" OWASP: {owasp}")
1261
- if analysis:
1262
- lines.append(f" Analysis: {analysis}")
1263
- lines.append("")
1264
-
1265
- # Accepted risks
1266
- accepted = output.get("accepted_risks", [])
1267
- if accepted:
1268
- lines.append("-" * 60)
1269
- lines.append("ACCEPTED RISKS (No Action Required)")
1270
- lines.append("-" * 60)
1271
- lines.append("")
1272
-
1273
- for finding in accepted:
1274
- vuln_type = finding.get("type", "unknown")
1275
- file_path = finding.get("file", "").split("Empathy-framework/")[-1]
1276
- line_num = finding.get("line", 0)
1277
- reason = finding.get("decision_reason", "")
1278
-
1279
- lines.append(f" - {vuln_type} in {file_path}:{line_num}")
1280
- if reason:
1281
- lines.append(f" Reason: {reason}")
1282
- lines.append("")
1283
-
1284
- # Remediation plan if present
1285
- remediation = output.get("remediation_plan", "")
1286
- if remediation and remediation.strip():
1287
- lines.append("-" * 60)
1288
- lines.append("REMEDIATION PLAN")
1289
- lines.append("-" * 60)
1290
- lines.append("")
1291
- lines.append(remediation)
1292
- lines.append("")
1293
-
1294
- # Footer with action items
1295
- lines.append("=" * 60)
1296
- if needs_review:
1297
- lines.append("ACTION REQUIRED:")
1298
- lines.append(f" Review {len(needs_review)} finding(s) above")
1299
- lines.append(" Copy this report to Claude Code for remediation help")
1300
- else:
1301
- lines.append("STATUS: All clear - no critical or high findings")
1302
- lines.append("=" * 60)
1303
-
1304
- return "\n".join(lines)
1305
-
1306
-
1307
- def main():
1308
- """CLI entry point for security audit workflow."""
1309
- import asyncio
1310
-
1311
- async def run():
1312
- workflow = SecurityAuditWorkflow()
1313
- result = await workflow.execute(path=".", file_types=[".py"])
1314
-
1315
- # Use the new formatted report
1316
- report = format_security_report(result.final_output)
1317
- print(report)
1318
-
1319
- print("\nCost Report:")
1320
- print(f" Total Cost: ${result.cost_report.total_cost:.4f}")
1321
- savings = result.cost_report.savings
1322
- pct = result.cost_report.savings_percent
1323
- print(f" Savings: ${savings:.4f} ({pct:.1f}%)")
1324
-
1325
- asyncio.run(run())
1326
-
1327
-
1328
- if __name__ == "__main__":
1329
- main()