tapps-agents 3.5.40__py3-none-any.whl → 3.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (705) hide show
  1. tapps_agents/__init__.py +2 -2
  2. tapps_agents/agents/__init__.py +22 -22
  3. tapps_agents/agents/analyst/__init__.py +5 -5
  4. tapps_agents/agents/architect/__init__.py +5 -5
  5. tapps_agents/agents/architect/agent.py +1033 -1033
  6. tapps_agents/agents/architect/pattern_detector.py +75 -75
  7. tapps_agents/agents/cleanup/__init__.py +7 -7
  8. tapps_agents/agents/cleanup/agent.py +445 -445
  9. tapps_agents/agents/debugger/__init__.py +7 -7
  10. tapps_agents/agents/debugger/agent.py +310 -310
  11. tapps_agents/agents/debugger/error_analyzer.py +437 -437
  12. tapps_agents/agents/designer/__init__.py +5 -5
  13. tapps_agents/agents/designer/agent.py +786 -786
  14. tapps_agents/agents/designer/visual_designer.py +638 -638
  15. tapps_agents/agents/documenter/__init__.py +7 -7
  16. tapps_agents/agents/documenter/agent.py +531 -531
  17. tapps_agents/agents/documenter/doc_generator.py +472 -472
  18. tapps_agents/agents/documenter/doc_validator.py +393 -393
  19. tapps_agents/agents/documenter/framework_doc_updater.py +493 -493
  20. tapps_agents/agents/enhancer/__init__.py +7 -7
  21. tapps_agents/agents/evaluator/__init__.py +7 -7
  22. tapps_agents/agents/evaluator/agent.py +443 -443
  23. tapps_agents/agents/evaluator/priority_evaluator.py +641 -641
  24. tapps_agents/agents/evaluator/quality_analyzer.py +147 -147
  25. tapps_agents/agents/evaluator/report_generator.py +344 -344
  26. tapps_agents/agents/evaluator/usage_analyzer.py +192 -192
  27. tapps_agents/agents/evaluator/workflow_analyzer.py +189 -189
  28. tapps_agents/agents/implementer/__init__.py +7 -7
  29. tapps_agents/agents/implementer/agent.py +798 -798
  30. tapps_agents/agents/implementer/auto_fix.py +1119 -1119
  31. tapps_agents/agents/implementer/code_generator.py +73 -73
  32. tapps_agents/agents/improver/__init__.py +1 -1
  33. tapps_agents/agents/improver/agent.py +753 -753
  34. tapps_agents/agents/ops/__init__.py +1 -1
  35. tapps_agents/agents/ops/agent.py +619 -619
  36. tapps_agents/agents/ops/dependency_analyzer.py +600 -600
  37. tapps_agents/agents/orchestrator/__init__.py +5 -5
  38. tapps_agents/agents/orchestrator/agent.py +522 -522
  39. tapps_agents/agents/planner/__init__.py +7 -7
  40. tapps_agents/agents/planner/agent.py +1127 -1127
  41. tapps_agents/agents/reviewer/__init__.py +24 -24
  42. tapps_agents/agents/reviewer/agent.py +3513 -3513
  43. tapps_agents/agents/reviewer/aggregator.py +213 -213
  44. tapps_agents/agents/reviewer/batch_review.py +448 -448
  45. tapps_agents/agents/reviewer/cache.py +443 -443
  46. tapps_agents/agents/reviewer/context7_enhancer.py +630 -630
  47. tapps_agents/agents/reviewer/context_detector.py +203 -203
  48. tapps_agents/agents/reviewer/docker_compose_validator.py +158 -158
  49. tapps_agents/agents/reviewer/dockerfile_validator.py +176 -176
  50. tapps_agents/agents/reviewer/error_handling.py +126 -126
  51. tapps_agents/agents/reviewer/feedback_generator.py +490 -490
  52. tapps_agents/agents/reviewer/influxdb_validator.py +316 -316
  53. tapps_agents/agents/reviewer/issue_tracking.py +169 -169
  54. tapps_agents/agents/reviewer/library_detector.py +295 -295
  55. tapps_agents/agents/reviewer/library_patterns.py +268 -268
  56. tapps_agents/agents/reviewer/maintainability_scorer.py +593 -593
  57. tapps_agents/agents/reviewer/metric_strategies.py +276 -276
  58. tapps_agents/agents/reviewer/mqtt_validator.py +160 -160
  59. tapps_agents/agents/reviewer/output_enhancer.py +105 -105
  60. tapps_agents/agents/reviewer/pattern_detector.py +241 -241
  61. tapps_agents/agents/reviewer/performance_scorer.py +357 -357
  62. tapps_agents/agents/reviewer/phased_review.py +516 -516
  63. tapps_agents/agents/reviewer/progressive_review.py +435 -435
  64. tapps_agents/agents/reviewer/react_scorer.py +331 -331
  65. tapps_agents/agents/reviewer/score_constants.py +228 -228
  66. tapps_agents/agents/reviewer/score_validator.py +507 -507
  67. tapps_agents/agents/reviewer/scorer_registry.py +373 -373
  68. tapps_agents/agents/reviewer/scoring.py +1566 -1566
  69. tapps_agents/agents/reviewer/service_discovery.py +534 -534
  70. tapps_agents/agents/reviewer/tools/__init__.py +41 -41
  71. tapps_agents/agents/reviewer/tools/parallel_executor.py +581 -581
  72. tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -250
  73. tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -284
  74. tapps_agents/agents/reviewer/typescript_scorer.py +1142 -1142
  75. tapps_agents/agents/reviewer/validation.py +208 -208
  76. tapps_agents/agents/reviewer/websocket_validator.py +132 -132
  77. tapps_agents/agents/tester/__init__.py +7 -7
  78. tapps_agents/agents/tester/accessibility_auditor.py +309 -309
  79. tapps_agents/agents/tester/agent.py +1080 -1080
  80. tapps_agents/agents/tester/batch_generator.py +54 -54
  81. tapps_agents/agents/tester/context_learner.py +51 -51
  82. tapps_agents/agents/tester/coverage_analyzer.py +386 -386
  83. tapps_agents/agents/tester/coverage_test_generator.py +290 -290
  84. tapps_agents/agents/tester/debug_enhancer.py +238 -238
  85. tapps_agents/agents/tester/device_emulator.py +241 -241
  86. tapps_agents/agents/tester/integration_generator.py +62 -62
  87. tapps_agents/agents/tester/network_recorder.py +300 -300
  88. tapps_agents/agents/tester/performance_monitor.py +320 -320
  89. tapps_agents/agents/tester/test_fixer.py +316 -316
  90. tapps_agents/agents/tester/test_generator.py +632 -632
  91. tapps_agents/agents/tester/trace_manager.py +234 -234
  92. tapps_agents/agents/tester/visual_regression.py +291 -291
  93. tapps_agents/analysis/pattern_detector.py +36 -36
  94. tapps_agents/beads/hydration.py +213 -213
  95. tapps_agents/beads/parse.py +32 -32
  96. tapps_agents/beads/specs.py +206 -206
  97. tapps_agents/cli/__init__.py +9 -9
  98. tapps_agents/cli/__main__.py +8 -8
  99. tapps_agents/cli/base.py +478 -478
  100. tapps_agents/cli/command_classifier.py +72 -72
  101. tapps_agents/cli/commands/__init__.py +2 -2
  102. tapps_agents/cli/commands/analyst.py +173 -173
  103. tapps_agents/cli/commands/architect.py +109 -109
  104. tapps_agents/cli/commands/cleanup_agent.py +92 -92
  105. tapps_agents/cli/commands/common.py +126 -126
  106. tapps_agents/cli/commands/debugger.py +90 -90
  107. tapps_agents/cli/commands/designer.py +112 -112
  108. tapps_agents/cli/commands/documenter.py +136 -136
  109. tapps_agents/cli/commands/enhancer.py +110 -110
  110. tapps_agents/cli/commands/evaluator.py +255 -255
  111. tapps_agents/cli/commands/health.py +665 -665
  112. tapps_agents/cli/commands/implementer.py +301 -301
  113. tapps_agents/cli/commands/improver.py +91 -91
  114. tapps_agents/cli/commands/knowledge.py +111 -111
  115. tapps_agents/cli/commands/learning.py +172 -172
  116. tapps_agents/cli/commands/observability.py +283 -283
  117. tapps_agents/cli/commands/ops.py +135 -135
  118. tapps_agents/cli/commands/orchestrator.py +116 -116
  119. tapps_agents/cli/commands/planner.py +237 -237
  120. tapps_agents/cli/commands/reviewer.py +1872 -1872
  121. tapps_agents/cli/commands/status.py +285 -285
  122. tapps_agents/cli/commands/task.py +227 -219
  123. tapps_agents/cli/commands/tester.py +191 -191
  124. tapps_agents/cli/commands/top_level.py +3586 -3586
  125. tapps_agents/cli/feedback.py +936 -936
  126. tapps_agents/cli/formatters.py +608 -608
  127. tapps_agents/cli/help/__init__.py +7 -7
  128. tapps_agents/cli/help/static_help.py +425 -425
  129. tapps_agents/cli/network_detection.py +110 -110
  130. tapps_agents/cli/output_compactor.py +274 -274
  131. tapps_agents/cli/parsers/__init__.py +2 -2
  132. tapps_agents/cli/parsers/analyst.py +186 -186
  133. tapps_agents/cli/parsers/architect.py +167 -167
  134. tapps_agents/cli/parsers/cleanup_agent.py +228 -228
  135. tapps_agents/cli/parsers/debugger.py +116 -116
  136. tapps_agents/cli/parsers/designer.py +182 -182
  137. tapps_agents/cli/parsers/documenter.py +134 -134
  138. tapps_agents/cli/parsers/enhancer.py +113 -113
  139. tapps_agents/cli/parsers/evaluator.py +213 -213
  140. tapps_agents/cli/parsers/implementer.py +168 -168
  141. tapps_agents/cli/parsers/improver.py +132 -132
  142. tapps_agents/cli/parsers/ops.py +159 -159
  143. tapps_agents/cli/parsers/orchestrator.py +98 -98
  144. tapps_agents/cli/parsers/planner.py +145 -145
  145. tapps_agents/cli/parsers/reviewer.py +462 -462
  146. tapps_agents/cli/parsers/tester.py +124 -124
  147. tapps_agents/cli/progress_heartbeat.py +254 -254
  148. tapps_agents/cli/streaming_progress.py +336 -336
  149. tapps_agents/cli/utils/__init__.py +6 -6
  150. tapps_agents/cli/utils/agent_lifecycle.py +48 -48
  151. tapps_agents/cli/utils/error_formatter.py +82 -82
  152. tapps_agents/cli/utils/error_recovery.py +188 -188
  153. tapps_agents/cli/utils/output_handler.py +59 -59
  154. tapps_agents/cli/utils/prompt_enhancer.py +319 -319
  155. tapps_agents/cli/validators/__init__.py +9 -9
  156. tapps_agents/cli/validators/command_validator.py +81 -81
  157. tapps_agents/context7/__init__.py +112 -112
  158. tapps_agents/context7/agent_integration.py +869 -869
  159. tapps_agents/context7/analytics.py +382 -382
  160. tapps_agents/context7/analytics_dashboard.py +299 -299
  161. tapps_agents/context7/async_cache.py +681 -681
  162. tapps_agents/context7/backup_client.py +958 -958
  163. tapps_agents/context7/cache_locking.py +194 -194
  164. tapps_agents/context7/cache_metadata.py +214 -214
  165. tapps_agents/context7/cache_prewarm.py +488 -488
  166. tapps_agents/context7/cache_structure.py +168 -168
  167. tapps_agents/context7/cache_warming.py +604 -604
  168. tapps_agents/context7/circuit_breaker.py +376 -376
  169. tapps_agents/context7/cleanup.py +461 -461
  170. tapps_agents/context7/commands.py +858 -858
  171. tapps_agents/context7/credential_validation.py +276 -276
  172. tapps_agents/context7/cross_reference_resolver.py +168 -168
  173. tapps_agents/context7/cross_references.py +424 -424
  174. tapps_agents/context7/doc_manager.py +225 -225
  175. tapps_agents/context7/fuzzy_matcher.py +369 -369
  176. tapps_agents/context7/kb_cache.py +404 -404
  177. tapps_agents/context7/language_detector.py +219 -219
  178. tapps_agents/context7/library_detector.py +725 -725
  179. tapps_agents/context7/lookup.py +738 -738
  180. tapps_agents/context7/metadata.py +258 -258
  181. tapps_agents/context7/refresh_queue.py +300 -300
  182. tapps_agents/context7/security.py +373 -373
  183. tapps_agents/context7/staleness_policies.py +278 -278
  184. tapps_agents/context7/tiles_integration.py +47 -47
  185. tapps_agents/continuous_bug_fix/__init__.py +20 -20
  186. tapps_agents/continuous_bug_fix/bug_finder.py +306 -306
  187. tapps_agents/continuous_bug_fix/bug_fix_coordinator.py +177 -177
  188. tapps_agents/continuous_bug_fix/commit_manager.py +178 -178
  189. tapps_agents/continuous_bug_fix/continuous_bug_fixer.py +322 -322
  190. tapps_agents/continuous_bug_fix/proactive_bug_finder.py +285 -285
  191. tapps_agents/core/__init__.py +298 -298
  192. tapps_agents/core/adaptive_cache_config.py +432 -432
  193. tapps_agents/core/agent_base.py +647 -647
  194. tapps_agents/core/agent_cache.py +466 -466
  195. tapps_agents/core/agent_learning.py +1865 -1865
  196. tapps_agents/core/analytics_dashboard.py +563 -563
  197. tapps_agents/core/analytics_enhancements.py +597 -597
  198. tapps_agents/core/anonymization.py +274 -274
  199. tapps_agents/core/artifact_context_builder.py +293 -0
  200. tapps_agents/core/ast_parser.py +228 -228
  201. tapps_agents/core/async_file_ops.py +402 -402
  202. tapps_agents/core/best_practice_consultant.py +299 -299
  203. tapps_agents/core/brownfield_analyzer.py +299 -299
  204. tapps_agents/core/brownfield_review.py +541 -541
  205. tapps_agents/core/browser_controller.py +513 -513
  206. tapps_agents/core/capability_registry.py +418 -418
  207. tapps_agents/core/change_impact_analyzer.py +190 -190
  208. tapps_agents/core/checkpoint_manager.py +377 -377
  209. tapps_agents/core/code_generator.py +329 -329
  210. tapps_agents/core/code_validator.py +276 -276
  211. tapps_agents/core/command_registry.py +327 -327
  212. tapps_agents/core/config.py +33 -0
  213. tapps_agents/core/context_gathering/__init__.py +2 -2
  214. tapps_agents/core/context_gathering/repository_explorer.py +28 -28
  215. tapps_agents/core/context_intelligence/__init__.py +2 -2
  216. tapps_agents/core/context_intelligence/relevance_scorer.py +24 -24
  217. tapps_agents/core/context_intelligence/token_budget_manager.py +27 -27
  218. tapps_agents/core/context_manager.py +240 -240
  219. tapps_agents/core/cursor_feedback_monitor.py +146 -146
  220. tapps_agents/core/cursor_verification.py +290 -290
  221. tapps_agents/core/customization_loader.py +280 -280
  222. tapps_agents/core/customization_schema.py +260 -260
  223. tapps_agents/core/customization_template.py +238 -238
  224. tapps_agents/core/debug_logger.py +124 -124
  225. tapps_agents/core/design_validator.py +298 -298
  226. tapps_agents/core/diagram_generator.py +226 -226
  227. tapps_agents/core/docker_utils.py +232 -232
  228. tapps_agents/core/document_generator.py +617 -617
  229. tapps_agents/core/domain_detector.py +30 -30
  230. tapps_agents/core/error_envelope.py +454 -454
  231. tapps_agents/core/error_handler.py +270 -270
  232. tapps_agents/core/estimation_tracker.py +189 -189
  233. tapps_agents/core/eval_prompt_engine.py +116 -116
  234. tapps_agents/core/evaluation_base.py +119 -119
  235. tapps_agents/core/evaluation_models.py +320 -320
  236. tapps_agents/core/evaluation_orchestrator.py +225 -225
  237. tapps_agents/core/evaluators/__init__.py +7 -7
  238. tapps_agents/core/evaluators/architectural_evaluator.py +205 -205
  239. tapps_agents/core/evaluators/behavioral_evaluator.py +160 -160
  240. tapps_agents/core/evaluators/performance_profile_evaluator.py +160 -160
  241. tapps_agents/core/evaluators/security_posture_evaluator.py +148 -148
  242. tapps_agents/core/evaluators/spec_compliance_evaluator.py +181 -181
  243. tapps_agents/core/exceptions.py +107 -107
  244. tapps_agents/core/expert_config_generator.py +293 -293
  245. tapps_agents/core/export_schema.py +202 -202
  246. tapps_agents/core/external_feedback_models.py +102 -102
  247. tapps_agents/core/external_feedback_storage.py +213 -213
  248. tapps_agents/core/fallback_strategy.py +314 -314
  249. tapps_agents/core/feedback_analyzer.py +162 -162
  250. tapps_agents/core/feedback_collector.py +178 -178
  251. tapps_agents/core/git_operations.py +445 -445
  252. tapps_agents/core/hardware_profiler.py +151 -151
  253. tapps_agents/core/instructions.py +324 -324
  254. tapps_agents/core/io_guardrails.py +69 -69
  255. tapps_agents/core/issue_manifest.py +249 -249
  256. tapps_agents/core/issue_schema.py +139 -139
  257. tapps_agents/core/json_utils.py +128 -128
  258. tapps_agents/core/knowledge_graph.py +446 -446
  259. tapps_agents/core/language_detector.py +296 -296
  260. tapps_agents/core/learning_confidence.py +242 -242
  261. tapps_agents/core/learning_dashboard.py +246 -246
  262. tapps_agents/core/learning_decision.py +384 -384
  263. tapps_agents/core/learning_explainability.py +578 -578
  264. tapps_agents/core/learning_export.py +287 -287
  265. tapps_agents/core/learning_integration.py +228 -228
  266. tapps_agents/core/llm_behavior.py +232 -232
  267. tapps_agents/core/long_duration_support.py +786 -786
  268. tapps_agents/core/mcp_setup.py +106 -106
  269. tapps_agents/core/memory_integration.py +396 -396
  270. tapps_agents/core/meta_learning.py +666 -666
  271. tapps_agents/core/module_path_sanitizer.py +199 -199
  272. tapps_agents/core/multi_agent_orchestrator.py +382 -382
  273. tapps_agents/core/network_errors.py +125 -125
  274. tapps_agents/core/nfr_validator.py +336 -336
  275. tapps_agents/core/offline_mode.py +158 -158
  276. tapps_agents/core/output_contracts.py +300 -300
  277. tapps_agents/core/output_formatter.py +300 -300
  278. tapps_agents/core/path_normalizer.py +174 -174
  279. tapps_agents/core/path_validator.py +322 -322
  280. tapps_agents/core/pattern_library.py +250 -250
  281. tapps_agents/core/performance_benchmark.py +301 -301
  282. tapps_agents/core/performance_monitor.py +184 -184
  283. tapps_agents/core/playwright_mcp_controller.py +771 -771
  284. tapps_agents/core/policy_loader.py +135 -135
  285. tapps_agents/core/progress.py +166 -166
  286. tapps_agents/core/project_profile.py +354 -354
  287. tapps_agents/core/project_type_detector.py +454 -454
  288. tapps_agents/core/prompt_base.py +223 -223
  289. tapps_agents/core/prompt_learning/__init__.py +2 -2
  290. tapps_agents/core/prompt_learning/learning_loop.py +24 -24
  291. tapps_agents/core/prompt_learning/project_prompt_store.py +25 -25
  292. tapps_agents/core/prompt_learning/skills_prompt_analyzer.py +35 -35
  293. tapps_agents/core/prompt_optimization/__init__.py +6 -6
  294. tapps_agents/core/prompt_optimization/ab_tester.py +114 -114
  295. tapps_agents/core/prompt_optimization/correlation_analyzer.py +160 -160
  296. tapps_agents/core/prompt_optimization/progressive_refiner.py +129 -129
  297. tapps_agents/core/prompt_optimization/prompt_library.py +37 -37
  298. tapps_agents/core/requirements_evaluator.py +431 -431
  299. tapps_agents/core/resource_aware_executor.py +449 -449
  300. tapps_agents/core/resource_monitor.py +343 -343
  301. tapps_agents/core/resume_handler.py +298 -298
  302. tapps_agents/core/retry_handler.py +197 -197
  303. tapps_agents/core/review_checklists.py +479 -479
  304. tapps_agents/core/role_loader.py +201 -201
  305. tapps_agents/core/role_template_loader.py +201 -201
  306. tapps_agents/core/runtime_mode.py +60 -60
  307. tapps_agents/core/security_scanner.py +342 -342
  308. tapps_agents/core/skill_agent_registry.py +194 -194
  309. tapps_agents/core/skill_integration.py +208 -208
  310. tapps_agents/core/skill_loader.py +492 -492
  311. tapps_agents/core/skill_template.py +341 -341
  312. tapps_agents/core/skill_validator.py +478 -478
  313. tapps_agents/core/stack_analyzer.py +35 -35
  314. tapps_agents/core/startup.py +174 -174
  315. tapps_agents/core/storage_manager.py +397 -397
  316. tapps_agents/core/storage_models.py +166 -166
  317. tapps_agents/core/story_evaluator.py +410 -410
  318. tapps_agents/core/subprocess_utils.py +170 -170
  319. tapps_agents/core/task_duration.py +296 -296
  320. tapps_agents/core/task_memory.py +582 -582
  321. tapps_agents/core/task_state.py +226 -226
  322. tapps_agents/core/tech_stack_priorities.py +208 -208
  323. tapps_agents/core/temp_directory.py +194 -194
  324. tapps_agents/core/template_merger.py +600 -600
  325. tapps_agents/core/template_selector.py +280 -280
  326. tapps_agents/core/test_generator.py +286 -286
  327. tapps_agents/core/tiered_context.py +253 -253
  328. tapps_agents/core/token_monitor.py +345 -345
  329. tapps_agents/core/traceability.py +254 -254
  330. tapps_agents/core/trajectory_tracker.py +50 -50
  331. tapps_agents/core/unicode_safe.py +143 -143
  332. tapps_agents/core/unified_cache_config.py +170 -170
  333. tapps_agents/core/unified_state.py +324 -324
  334. tapps_agents/core/validate_cursor_setup.py +237 -237
  335. tapps_agents/core/validation_registry.py +136 -136
  336. tapps_agents/core/validators/__init__.py +4 -4
  337. tapps_agents/core/validators/python_validator.py +87 -87
  338. tapps_agents/core/verification_agent.py +90 -90
  339. tapps_agents/core/visual_feedback.py +644 -644
  340. tapps_agents/core/workflow_validator.py +197 -197
  341. tapps_agents/core/worktree.py +367 -367
  342. tapps_agents/docker/__init__.py +10 -10
  343. tapps_agents/docker/analyzer.py +186 -186
  344. tapps_agents/docker/debugger.py +229 -229
  345. tapps_agents/docker/error_patterns.py +216 -216
  346. tapps_agents/epic/__init__.py +22 -22
  347. tapps_agents/epic/beads_sync.py +115 -115
  348. tapps_agents/epic/markdown_sync.py +105 -105
  349. tapps_agents/epic/models.py +96 -96
  350. tapps_agents/experts/__init__.py +163 -163
  351. tapps_agents/experts/agent_integration.py +243 -243
  352. tapps_agents/experts/auto_generator.py +331 -331
  353. tapps_agents/experts/base_expert.py +536 -536
  354. tapps_agents/experts/builtin_registry.py +261 -261
  355. tapps_agents/experts/business_metrics.py +565 -565
  356. tapps_agents/experts/cache.py +266 -266
  357. tapps_agents/experts/confidence_breakdown.py +306 -306
  358. tapps_agents/experts/confidence_calculator.py +336 -336
  359. tapps_agents/experts/confidence_metrics.py +236 -236
  360. tapps_agents/experts/domain_config.py +311 -311
  361. tapps_agents/experts/domain_detector.py +550 -550
  362. tapps_agents/experts/domain_utils.py +84 -84
  363. tapps_agents/experts/expert_config.py +113 -113
  364. tapps_agents/experts/expert_engine.py +465 -465
  365. tapps_agents/experts/expert_registry.py +744 -744
  366. tapps_agents/experts/expert_synthesizer.py +70 -70
  367. tapps_agents/experts/governance.py +197 -197
  368. tapps_agents/experts/history_logger.py +312 -312
  369. tapps_agents/experts/knowledge/README.md +180 -180
  370. tapps_agents/experts/knowledge/accessibility/accessible-forms.md +331 -331
  371. tapps_agents/experts/knowledge/accessibility/aria-patterns.md +344 -344
  372. tapps_agents/experts/knowledge/accessibility/color-contrast.md +285 -285
  373. tapps_agents/experts/knowledge/accessibility/keyboard-navigation.md +332 -332
  374. tapps_agents/experts/knowledge/accessibility/screen-readers.md +282 -282
  375. tapps_agents/experts/knowledge/accessibility/semantic-html.md +355 -355
  376. tapps_agents/experts/knowledge/accessibility/testing-accessibility.md +369 -369
  377. tapps_agents/experts/knowledge/accessibility/wcag-2.1.md +296 -296
  378. tapps_agents/experts/knowledge/accessibility/wcag-2.2.md +211 -211
  379. tapps_agents/experts/knowledge/agent-learning/best-practices.md +715 -715
  380. tapps_agents/experts/knowledge/agent-learning/pattern-extraction.md +282 -282
  381. tapps_agents/experts/knowledge/agent-learning/prompt-optimization.md +320 -320
  382. tapps_agents/experts/knowledge/ai-frameworks/model-optimization.md +90 -90
  383. tapps_agents/experts/knowledge/ai-frameworks/openvino-patterns.md +260 -260
  384. tapps_agents/experts/knowledge/api-design-integration/api-gateway-patterns.md +309 -309
  385. tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +521 -521
  386. tapps_agents/experts/knowledge/api-design-integration/api-versioning.md +421 -421
  387. tapps_agents/experts/knowledge/api-design-integration/async-protocol-patterns.md +61 -61
  388. tapps_agents/experts/knowledge/api-design-integration/contract-testing.md +221 -221
  389. tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +489 -489
  390. tapps_agents/experts/knowledge/api-design-integration/fastapi-patterns.md +360 -360
  391. tapps_agents/experts/knowledge/api-design-integration/fastapi-testing.md +262 -262
  392. tapps_agents/experts/knowledge/api-design-integration/graphql-patterns.md +582 -582
  393. tapps_agents/experts/knowledge/api-design-integration/grpc-best-practices.md +499 -499
  394. tapps_agents/experts/knowledge/api-design-integration/mqtt-patterns.md +455 -455
  395. tapps_agents/experts/knowledge/api-design-integration/rate-limiting.md +507 -507
  396. tapps_agents/experts/knowledge/api-design-integration/restful-api-design.md +618 -618
  397. tapps_agents/experts/knowledge/api-design-integration/websocket-patterns.md +480 -480
  398. tapps_agents/experts/knowledge/cloud-infrastructure/cloud-native-patterns.md +175 -175
  399. tapps_agents/experts/knowledge/cloud-infrastructure/container-health-checks.md +261 -261
  400. tapps_agents/experts/knowledge/cloud-infrastructure/containerization.md +222 -222
  401. tapps_agents/experts/knowledge/cloud-infrastructure/cost-optimization.md +122 -122
  402. tapps_agents/experts/knowledge/cloud-infrastructure/disaster-recovery.md +153 -153
  403. tapps_agents/experts/knowledge/cloud-infrastructure/dockerfile-patterns.md +285 -285
  404. tapps_agents/experts/knowledge/cloud-infrastructure/infrastructure-as-code.md +187 -187
  405. tapps_agents/experts/knowledge/cloud-infrastructure/kubernetes-patterns.md +253 -253
  406. tapps_agents/experts/knowledge/cloud-infrastructure/multi-cloud-strategies.md +155 -155
  407. tapps_agents/experts/knowledge/cloud-infrastructure/serverless-architecture.md +200 -200
  408. tapps_agents/experts/knowledge/code-quality-analysis/README.md +16 -16
  409. tapps_agents/experts/knowledge/code-quality-analysis/code-metrics.md +137 -137
  410. tapps_agents/experts/knowledge/code-quality-analysis/complexity-analysis.md +181 -181
  411. tapps_agents/experts/knowledge/code-quality-analysis/technical-debt-patterns.md +191 -191
  412. tapps_agents/experts/knowledge/data-privacy-compliance/anonymization.md +313 -313
  413. tapps_agents/experts/knowledge/data-privacy-compliance/ccpa.md +255 -255
  414. tapps_agents/experts/knowledge/data-privacy-compliance/consent-management.md +282 -282
  415. tapps_agents/experts/knowledge/data-privacy-compliance/data-minimization.md +275 -275
  416. tapps_agents/experts/knowledge/data-privacy-compliance/data-retention.md +297 -297
  417. tapps_agents/experts/knowledge/data-privacy-compliance/data-subject-rights.md +383 -383
  418. tapps_agents/experts/knowledge/data-privacy-compliance/encryption-privacy.md +285 -285
  419. tapps_agents/experts/knowledge/data-privacy-compliance/gdpr.md +344 -344
  420. tapps_agents/experts/knowledge/data-privacy-compliance/hipaa.md +385 -385
  421. tapps_agents/experts/knowledge/data-privacy-compliance/privacy-by-design.md +280 -280
  422. tapps_agents/experts/knowledge/database-data-management/acid-vs-cap.md +164 -164
  423. tapps_agents/experts/knowledge/database-data-management/backup-and-recovery.md +182 -182
  424. tapps_agents/experts/knowledge/database-data-management/data-modeling.md +172 -172
  425. tapps_agents/experts/knowledge/database-data-management/database-design.md +187 -187
  426. tapps_agents/experts/knowledge/database-data-management/flux-query-optimization.md +342 -342
  427. tapps_agents/experts/knowledge/database-data-management/influxdb-connection-patterns.md +432 -432
  428. tapps_agents/experts/knowledge/database-data-management/influxdb-patterns.md +442 -442
  429. tapps_agents/experts/knowledge/database-data-management/migration-strategies.md +216 -216
  430. tapps_agents/experts/knowledge/database-data-management/nosql-patterns.md +259 -259
  431. tapps_agents/experts/knowledge/database-data-management/scalability-patterns.md +184 -184
  432. tapps_agents/experts/knowledge/database-data-management/sql-optimization.md +175 -175
  433. tapps_agents/experts/knowledge/database-data-management/time-series-modeling.md +444 -444
  434. tapps_agents/experts/knowledge/development-workflow/README.md +16 -16
  435. tapps_agents/experts/knowledge/development-workflow/automation-best-practices.md +216 -216
  436. tapps_agents/experts/knowledge/development-workflow/build-strategies.md +198 -198
  437. tapps_agents/experts/knowledge/development-workflow/deployment-patterns.md +205 -205
  438. tapps_agents/experts/knowledge/development-workflow/git-workflows.md +205 -205
  439. tapps_agents/experts/knowledge/documentation-knowledge-management/README.md +16 -16
  440. tapps_agents/experts/knowledge/documentation-knowledge-management/api-documentation-patterns.md +231 -231
  441. tapps_agents/experts/knowledge/documentation-knowledge-management/documentation-standards.md +191 -191
  442. tapps_agents/experts/knowledge/documentation-knowledge-management/knowledge-management.md +171 -171
  443. tapps_agents/experts/knowledge/documentation-knowledge-management/technical-writing-guide.md +192 -192
  444. tapps_agents/experts/knowledge/observability-monitoring/alerting-patterns.md +461 -461
  445. tapps_agents/experts/knowledge/observability-monitoring/apm-tools.md +459 -459
  446. tapps_agents/experts/knowledge/observability-monitoring/distributed-tracing.md +367 -367
  447. tapps_agents/experts/knowledge/observability-monitoring/logging-strategies.md +478 -478
  448. tapps_agents/experts/knowledge/observability-monitoring/metrics-and-monitoring.md +510 -510
  449. tapps_agents/experts/knowledge/observability-monitoring/observability-best-practices.md +492 -492
  450. tapps_agents/experts/knowledge/observability-monitoring/open-telemetry.md +573 -573
  451. tapps_agents/experts/knowledge/observability-monitoring/slo-sli-sla.md +419 -419
  452. tapps_agents/experts/knowledge/performance/anti-patterns.md +284 -284
  453. tapps_agents/experts/knowledge/performance/api-performance.md +256 -256
  454. tapps_agents/experts/knowledge/performance/caching.md +327 -327
  455. tapps_agents/experts/knowledge/performance/database-performance.md +252 -252
  456. tapps_agents/experts/knowledge/performance/optimization-patterns.md +327 -327
  457. tapps_agents/experts/knowledge/performance/profiling.md +297 -297
  458. tapps_agents/experts/knowledge/performance/resource-management.md +293 -293
  459. tapps_agents/experts/knowledge/performance/scalability.md +306 -306
  460. tapps_agents/experts/knowledge/security/owasp-top10.md +209 -209
  461. tapps_agents/experts/knowledge/security/secure-coding-practices.md +207 -207
  462. tapps_agents/experts/knowledge/security/threat-modeling.md +220 -220
  463. tapps_agents/experts/knowledge/security/vulnerability-patterns.md +342 -342
  464. tapps_agents/experts/knowledge/software-architecture/docker-compose-patterns.md +314 -314
  465. tapps_agents/experts/knowledge/software-architecture/microservices-patterns.md +379 -379
  466. tapps_agents/experts/knowledge/software-architecture/service-communication.md +316 -316
  467. tapps_agents/experts/knowledge/testing/best-practices.md +310 -310
  468. tapps_agents/experts/knowledge/testing/coverage-analysis.md +293 -293
  469. tapps_agents/experts/knowledge/testing/mocking.md +256 -256
  470. tapps_agents/experts/knowledge/testing/test-automation.md +276 -276
  471. tapps_agents/experts/knowledge/testing/test-data.md +271 -271
  472. tapps_agents/experts/knowledge/testing/test-design-patterns.md +280 -280
  473. tapps_agents/experts/knowledge/testing/test-maintenance.md +236 -236
  474. tapps_agents/experts/knowledge/testing/test-strategies.md +311 -311
  475. tapps_agents/experts/knowledge/user-experience/information-architecture.md +325 -325
  476. tapps_agents/experts/knowledge/user-experience/interaction-design.md +363 -363
  477. tapps_agents/experts/knowledge/user-experience/prototyping.md +293 -293
  478. tapps_agents/experts/knowledge/user-experience/usability-heuristics.md +337 -337
  479. tapps_agents/experts/knowledge/user-experience/usability-testing.md +311 -311
  480. tapps_agents/experts/knowledge/user-experience/user-journeys.md +296 -296
  481. tapps_agents/experts/knowledge/user-experience/user-research.md +373 -373
  482. tapps_agents/experts/knowledge/user-experience/ux-principles.md +340 -340
  483. tapps_agents/experts/knowledge_freshness.py +321 -321
  484. tapps_agents/experts/knowledge_ingestion.py +438 -438
  485. tapps_agents/experts/knowledge_need_detector.py +93 -93
  486. tapps_agents/experts/knowledge_validator.py +382 -382
  487. tapps_agents/experts/observability.py +440 -440
  488. tapps_agents/experts/passive_notifier.py +238 -238
  489. tapps_agents/experts/proactive_orchestrator.py +32 -32
  490. tapps_agents/experts/rag_chunker.py +205 -205
  491. tapps_agents/experts/rag_embedder.py +152 -152
  492. tapps_agents/experts/rag_evaluation.py +299 -299
  493. tapps_agents/experts/rag_index.py +303 -303
  494. tapps_agents/experts/rag_metrics.py +293 -293
  495. tapps_agents/experts/rag_safety.py +263 -263
  496. tapps_agents/experts/report_generator.py +296 -296
  497. tapps_agents/experts/setup_wizard.py +441 -441
  498. tapps_agents/experts/simple_rag.py +431 -431
  499. tapps_agents/experts/vector_rag.py +354 -354
  500. tapps_agents/experts/weight_distributor.py +304 -304
  501. tapps_agents/health/__init__.py +24 -24
  502. tapps_agents/health/base.py +75 -75
  503. tapps_agents/health/checks/__init__.py +22 -22
  504. tapps_agents/health/checks/automation.py +127 -127
  505. tapps_agents/health/checks/context7_cache.py +210 -210
  506. tapps_agents/health/checks/environment.py +116 -116
  507. tapps_agents/health/checks/execution.py +170 -170
  508. tapps_agents/health/checks/knowledge_base.py +187 -187
  509. tapps_agents/health/checks/outcomes.py +324 -324
  510. tapps_agents/health/collector.py +280 -280
  511. tapps_agents/health/dashboard.py +137 -137
  512. tapps_agents/health/metrics.py +151 -151
  513. tapps_agents/health/orchestrator.py +271 -271
  514. tapps_agents/health/registry.py +166 -166
  515. tapps_agents/hooks/__init__.py +33 -33
  516. tapps_agents/hooks/config.py +140 -140
  517. tapps_agents/hooks/events.py +135 -135
  518. tapps_agents/hooks/executor.py +128 -128
  519. tapps_agents/hooks/manager.py +143 -143
  520. tapps_agents/integration/__init__.py +8 -8
  521. tapps_agents/integration/service_integrator.py +121 -121
  522. tapps_agents/integrations/__init__.py +10 -10
  523. tapps_agents/integrations/clawdbot.py +525 -525
  524. tapps_agents/integrations/memory_bridge.py +356 -356
  525. tapps_agents/mcp/__init__.py +18 -18
  526. tapps_agents/mcp/gateway.py +112 -112
  527. tapps_agents/mcp/servers/__init__.py +13 -13
  528. tapps_agents/mcp/servers/analysis.py +204 -204
  529. tapps_agents/mcp/servers/context7.py +198 -198
  530. tapps_agents/mcp/servers/filesystem.py +218 -218
  531. tapps_agents/mcp/servers/git.py +201 -201
  532. tapps_agents/mcp/tool_registry.py +115 -115
  533. tapps_agents/quality/__init__.py +54 -54
  534. tapps_agents/quality/coverage_analyzer.py +379 -379
  535. tapps_agents/quality/enforcement.py +82 -82
  536. tapps_agents/quality/gates/__init__.py +37 -37
  537. tapps_agents/quality/gates/approval_gate.py +255 -255
  538. tapps_agents/quality/gates/base.py +84 -84
  539. tapps_agents/quality/gates/exceptions.py +43 -43
  540. tapps_agents/quality/gates/policy_gate.py +195 -195
  541. tapps_agents/quality/gates/registry.py +239 -239
  542. tapps_agents/quality/gates/security_gate.py +156 -156
  543. tapps_agents/quality/quality_gates.py +369 -369
  544. tapps_agents/quality/secret_scanner.py +335 -335
  545. tapps_agents/session/__init__.py +19 -19
  546. tapps_agents/session/manager.py +256 -256
  547. tapps_agents/simple_mode/__init__.py +66 -66
  548. tapps_agents/simple_mode/agent_contracts.py +357 -357
  549. tapps_agents/simple_mode/beads_hooks.py +151 -151
  550. tapps_agents/simple_mode/code_snippet_handler.py +382 -382
  551. tapps_agents/simple_mode/documentation_manager.py +395 -395
  552. tapps_agents/simple_mode/documentation_reader.py +187 -187
  553. tapps_agents/simple_mode/file_inference.py +292 -292
  554. tapps_agents/simple_mode/framework_change_detector.py +268 -268
  555. tapps_agents/simple_mode/intent_parser.py +510 -510
  556. tapps_agents/simple_mode/learning_progression.py +358 -358
  557. tapps_agents/simple_mode/nl_handler.py +700 -700
  558. tapps_agents/simple_mode/onboarding.py +253 -253
  559. tapps_agents/simple_mode/orchestrators/__init__.py +38 -38
  560. tapps_agents/simple_mode/orchestrators/base.py +185 -185
  561. tapps_agents/simple_mode/orchestrators/breakdown_orchestrator.py +49 -49
  562. tapps_agents/simple_mode/orchestrators/brownfield_orchestrator.py +135 -135
  563. tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2700 -2667
  564. tapps_agents/simple_mode/orchestrators/deliverable_checklist.py +349 -349
  565. tapps_agents/simple_mode/orchestrators/enhance_orchestrator.py +53 -53
  566. tapps_agents/simple_mode/orchestrators/epic_orchestrator.py +122 -122
  567. tapps_agents/simple_mode/orchestrators/explore_orchestrator.py +184 -184
  568. tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +723 -723
  569. tapps_agents/simple_mode/orchestrators/plan_analysis_orchestrator.py +206 -206
  570. tapps_agents/simple_mode/orchestrators/pr_orchestrator.py +237 -237
  571. tapps_agents/simple_mode/orchestrators/refactor_orchestrator.py +222 -222
  572. tapps_agents/simple_mode/orchestrators/requirements_tracer.py +262 -262
  573. tapps_agents/simple_mode/orchestrators/resume_orchestrator.py +210 -210
  574. tapps_agents/simple_mode/orchestrators/review_orchestrator.py +161 -161
  575. tapps_agents/simple_mode/orchestrators/test_orchestrator.py +82 -82
  576. tapps_agents/simple_mode/output_aggregator.py +340 -340
  577. tapps_agents/simple_mode/result_formatters.py +598 -598
  578. tapps_agents/simple_mode/step_dependencies.py +382 -382
  579. tapps_agents/simple_mode/step_results.py +276 -276
  580. tapps_agents/simple_mode/streaming.py +388 -388
  581. tapps_agents/simple_mode/variations.py +129 -129
  582. tapps_agents/simple_mode/visual_feedback.py +238 -238
  583. tapps_agents/simple_mode/zero_config.py +274 -274
  584. tapps_agents/suggestions/__init__.py +8 -8
  585. tapps_agents/suggestions/inline_suggester.py +52 -52
  586. tapps_agents/templates/__init__.py +8 -8
  587. tapps_agents/templates/microservice_generator.py +274 -274
  588. tapps_agents/utils/env_validator.py +291 -291
  589. tapps_agents/workflow/__init__.py +171 -171
  590. tapps_agents/workflow/acceptance_verifier.py +132 -132
  591. tapps_agents/workflow/agent_handlers/__init__.py +41 -41
  592. tapps_agents/workflow/agent_handlers/analyst_handler.py +75 -75
  593. tapps_agents/workflow/agent_handlers/architect_handler.py +107 -107
  594. tapps_agents/workflow/agent_handlers/base.py +84 -84
  595. tapps_agents/workflow/agent_handlers/debugger_handler.py +100 -100
  596. tapps_agents/workflow/agent_handlers/designer_handler.py +110 -110
  597. tapps_agents/workflow/agent_handlers/documenter_handler.py +94 -94
  598. tapps_agents/workflow/agent_handlers/implementer_handler.py +235 -235
  599. tapps_agents/workflow/agent_handlers/ops_handler.py +62 -62
  600. tapps_agents/workflow/agent_handlers/orchestrator_handler.py +43 -43
  601. tapps_agents/workflow/agent_handlers/planner_handler.py +98 -98
  602. tapps_agents/workflow/agent_handlers/registry.py +119 -119
  603. tapps_agents/workflow/agent_handlers/reviewer_handler.py +119 -119
  604. tapps_agents/workflow/agent_handlers/tester_handler.py +69 -69
  605. tapps_agents/workflow/analytics_accessor.py +337 -337
  606. tapps_agents/workflow/analytics_alerts.py +416 -416
  607. tapps_agents/workflow/analytics_dashboard_cursor.py +281 -281
  608. tapps_agents/workflow/analytics_dual_write.py +103 -103
  609. tapps_agents/workflow/analytics_integration.py +119 -119
  610. tapps_agents/workflow/analytics_query_parser.py +278 -278
  611. tapps_agents/workflow/analytics_visualizer.py +259 -259
  612. tapps_agents/workflow/artifact_helper.py +204 -204
  613. tapps_agents/workflow/audit_logger.py +263 -263
  614. tapps_agents/workflow/auto_execution_config.py +340 -340
  615. tapps_agents/workflow/auto_progression.py +586 -586
  616. tapps_agents/workflow/branch_cleanup.py +349 -349
  617. tapps_agents/workflow/checkpoint.py +256 -256
  618. tapps_agents/workflow/checkpoint_manager.py +178 -178
  619. tapps_agents/workflow/code_artifact.py +179 -179
  620. tapps_agents/workflow/common_enums.py +96 -96
  621. tapps_agents/workflow/confirmation_handler.py +130 -130
  622. tapps_agents/workflow/context_analyzer.py +222 -222
  623. tapps_agents/workflow/context_artifact.py +230 -230
  624. tapps_agents/workflow/cursor_chat.py +94 -94
  625. tapps_agents/workflow/cursor_executor.py +2337 -2196
  626. tapps_agents/workflow/cursor_skill_helper.py +516 -516
  627. tapps_agents/workflow/dependency_resolver.py +244 -244
  628. tapps_agents/workflow/design_artifact.py +156 -156
  629. tapps_agents/workflow/detector.py +751 -751
  630. tapps_agents/workflow/direct_execution_fallback.py +301 -301
  631. tapps_agents/workflow/docs_artifact.py +168 -168
  632. tapps_agents/workflow/enforcer.py +389 -389
  633. tapps_agents/workflow/enhancement_artifact.py +142 -142
  634. tapps_agents/workflow/error_recovery.py +806 -806
  635. tapps_agents/workflow/event_bus.py +183 -183
  636. tapps_agents/workflow/event_log.py +612 -612
  637. tapps_agents/workflow/events.py +63 -63
  638. tapps_agents/workflow/exceptions.py +43 -43
  639. tapps_agents/workflow/execution_graph.py +498 -498
  640. tapps_agents/workflow/execution_plan.py +126 -126
  641. tapps_agents/workflow/file_utils.py +186 -186
  642. tapps_agents/workflow/gate_evaluator.py +182 -182
  643. tapps_agents/workflow/gate_integration.py +200 -200
  644. tapps_agents/workflow/graph_visualizer.py +130 -130
  645. tapps_agents/workflow/health_checker.py +206 -206
  646. tapps_agents/workflow/logging_helper.py +243 -243
  647. tapps_agents/workflow/manifest.py +582 -582
  648. tapps_agents/workflow/marker_writer.py +250 -250
  649. tapps_agents/workflow/message_formatter.py +188 -188
  650. tapps_agents/workflow/messaging.py +325 -325
  651. tapps_agents/workflow/metadata_models.py +91 -91
  652. tapps_agents/workflow/metrics_integration.py +226 -226
  653. tapps_agents/workflow/migration_utils.py +116 -116
  654. tapps_agents/workflow/models.py +148 -111
  655. tapps_agents/workflow/nlp_config.py +198 -198
  656. tapps_agents/workflow/nlp_error_handler.py +207 -207
  657. tapps_agents/workflow/nlp_executor.py +163 -163
  658. tapps_agents/workflow/nlp_parser.py +528 -528
  659. tapps_agents/workflow/observability_dashboard.py +451 -451
  660. tapps_agents/workflow/observer.py +170 -170
  661. tapps_agents/workflow/ops_artifact.py +257 -257
  662. tapps_agents/workflow/output_passing.py +214 -214
  663. tapps_agents/workflow/parallel_executor.py +463 -463
  664. tapps_agents/workflow/planning_artifact.py +179 -179
  665. tapps_agents/workflow/preset_loader.py +285 -285
  666. tapps_agents/workflow/preset_recommender.py +270 -270
  667. tapps_agents/workflow/progress_logger.py +145 -145
  668. tapps_agents/workflow/progress_manager.py +303 -303
  669. tapps_agents/workflow/progress_monitor.py +186 -186
  670. tapps_agents/workflow/progress_updates.py +423 -423
  671. tapps_agents/workflow/quality_artifact.py +158 -158
  672. tapps_agents/workflow/quality_loopback.py +101 -101
  673. tapps_agents/workflow/recommender.py +387 -387
  674. tapps_agents/workflow/remediation_loop.py +166 -166
  675. tapps_agents/workflow/result_aggregator.py +300 -300
  676. tapps_agents/workflow/review_artifact.py +185 -185
  677. tapps_agents/workflow/schema_validator.py +522 -522
  678. tapps_agents/workflow/session_handoff.py +178 -178
  679. tapps_agents/workflow/skill_invoker.py +648 -648
  680. tapps_agents/workflow/state_manager.py +756 -756
  681. tapps_agents/workflow/state_persistence_config.py +331 -331
  682. tapps_agents/workflow/status_monitor.py +449 -449
  683. tapps_agents/workflow/step_checkpoint.py +314 -314
  684. tapps_agents/workflow/step_details.py +201 -201
  685. tapps_agents/workflow/story_models.py +147 -147
  686. tapps_agents/workflow/streaming.py +416 -416
  687. tapps_agents/workflow/suggestion_engine.py +552 -552
  688. tapps_agents/workflow/testing_artifact.py +186 -186
  689. tapps_agents/workflow/timeline.py +158 -158
  690. tapps_agents/workflow/token_integration.py +209 -209
  691. tapps_agents/workflow/validation.py +217 -217
  692. tapps_agents/workflow/visual_feedback.py +391 -391
  693. tapps_agents/workflow/workflow_chain.py +95 -95
  694. tapps_agents/workflow/workflow_summary.py +219 -219
  695. tapps_agents/workflow/worktree_manager.py +724 -724
  696. {tapps_agents-3.5.40.dist-info → tapps_agents-3.6.0.dist-info}/METADATA +672 -672
  697. tapps_agents-3.6.0.dist-info/RECORD +758 -0
  698. {tapps_agents-3.5.40.dist-info → tapps_agents-3.6.0.dist-info}/licenses/LICENSE +22 -22
  699. tapps_agents/health/checks/outcomes.backup_20260204_064058.py +0 -324
  700. tapps_agents/health/checks/outcomes.backup_20260204_064256.py +0 -324
  701. tapps_agents/health/checks/outcomes.backup_20260204_064600.py +0 -324
  702. tapps_agents-3.5.40.dist-info/RECORD +0 -760
  703. {tapps_agents-3.5.40.dist-info → tapps_agents-3.6.0.dist-info}/WHEEL +0 -0
  704. {tapps_agents-3.5.40.dist-info → tapps_agents-3.6.0.dist-info}/entry_points.txt +0 -0
  705. {tapps_agents-3.5.40.dist-info → tapps_agents-3.6.0.dist-info}/top_level.txt +0 -0
@@ -1,2196 +1,2337 @@
1
- """
2
- Cursor-Native Workflow Executor.
3
-
4
- This module provides a Cursor-native execution model that uses Cursor Skills
5
- and direct execution for LLM operations.
6
- """
7
-
8
- # @ai-prime-directive: This file implements the Cursor-native workflow executor for Cursor Skills integration.
9
- # This executor is used when running in Cursor mode (TAPPS_AGENTS_MODE=cursor) and invokes Cursor Skills
10
- # for LLM operations instead of direct API calls. Do not modify the Skill invocation pattern without
11
- # updating Cursor Skills integration and tests.
12
-
13
- # @ai-constraints:
14
- # - Must only execute in Cursor mode (is_cursor_mode() must return True)
15
- # - Must use SkillInvoker for all LLM operations - do not make direct API calls
16
- # - Workflow state must be compatible with WorkflowExecutor for cross-mode compatibility
17
- # - Performance: Skill invocation should complete in <5s for typical operations
18
- # - Must maintain backward compatibility with WorkflowExecutor workflow definitions
19
-
20
- # @note[2026-02-03]: Equal platform support policy per ADR-002.
21
- # The framework provides equal support for Claude Desktop, Cursor IDE, and Claude Code CLI.
22
- # Uses handler-first execution (AgentHandlerRegistry) before platform-specific features.
23
- # See docs/architecture/decisions/ADR-002-equal-platform-support.md
24
-
25
- from __future__ import annotations
26
-
27
- import asyncio
28
- import hashlib
29
- import os
30
- from collections.abc import AsyncIterator
31
- from contextlib import asynccontextmanager
32
- from dataclasses import asdict
33
- from datetime import datetime
34
- from pathlib import Path
35
- from typing import Any
36
-
37
- from ..core.project_profile import (
38
- ProjectProfile,
39
- ProjectProfileDetector,
40
- load_project_profile,
41
- save_project_profile,
42
- )
43
- from ..core.runtime_mode import is_cursor_mode
44
- from .auto_progression import AutoProgressionManager, ProgressionAction
45
- from .checkpoint_manager import (
46
- CheckpointConfig,
47
- CheckpointFrequency,
48
- WorkflowCheckpointManager,
49
- )
50
- from .error_recovery import ErrorContext, ErrorRecoveryManager
51
- from .event_bus import FileBasedEventBus
52
- from .events import EventType, WorkflowEvent
53
- from .logging_helper import WorkflowLogger
54
- from .marker_writer import MarkerWriter
55
- from .models import Artifact, StepExecution, Workflow, WorkflowState, WorkflowStep
56
- from .parallel_executor import ParallelStepExecutor
57
- from .progress_manager import ProgressUpdateManager
58
- from .skill_invoker import SkillInvoker
59
- from .state_manager import AdvancedStateManager
60
- from .state_persistence_config import StatePersistenceConfigManager
61
- from .worktree_manager import WorktreeManager
62
-
63
-
64
- class CursorWorkflowExecutor:
65
- """
66
- Cursor-native workflow executor that uses Skills.
67
-
68
- This executor is used when running in Cursor mode (TAPPS_AGENTS_MODE=cursor).
69
- It invokes Cursor Skills for LLM operations.
70
- """
71
-
72
- def __init__(
73
- self,
74
- project_root: Path | None = None,
75
- expert_registry: Any | None = None,
76
- auto_mode: bool = False,
77
- ):
78
- """
79
- Initialize Cursor-native workflow executor.
80
-
81
- Args:
82
- project_root: Root directory for the project
83
- expert_registry: Optional ExpertRegistry instance for expert consultation
84
- auto_mode: Whether to run in fully automated mode (no prompts)
85
- """
86
- if not is_cursor_mode():
87
- raise RuntimeError(
88
- "CursorWorkflowExecutor can only be used in Cursor mode. "
89
- "Use WorkflowExecutor for headless mode."
90
- )
91
-
92
- self.project_root = project_root or Path.cwd()
93
- self.state: WorkflowState | None = None
94
- self.workflow: Workflow | None = None
95
- self.expert_registry = expert_registry
96
- self.auto_mode = auto_mode
97
- self.skill_invoker = SkillInvoker(
98
- project_root=self.project_root, use_api=True
99
- )
100
- self.worktree_manager = WorktreeManager(project_root=self.project_root)
101
- self.project_profile: ProjectProfile | None = None
102
- self.parallel_executor = ParallelStepExecutor(max_parallel=8, default_timeout_seconds=3600.0)
103
- self.logger: WorkflowLogger | None = None # Initialized in start() with workflow_id
104
- self.progress_manager: ProgressUpdateManager | None = None # Initialized in start() with workflow
105
-
106
- # Issue fix: Support for continue-from and skip-steps flags
107
- self.continue_from: str | None = None
108
- self.skip_steps: list[str] = []
109
- self.print_paths: bool = True # Issue fix: Print artifact paths after each step
110
-
111
- # Initialize event bus for event-driven communication (Phase 2)
112
- self.event_bus = FileBasedEventBus(project_root=self.project_root)
113
-
114
- # Initialize auto-progression manager (Epic 10)
115
- auto_progression_enabled = os.getenv("TAPPS_AGENTS_AUTO_PROGRESSION", "true").lower() == "true"
116
- self.auto_progression = AutoProgressionManager(
117
- auto_progression_enabled=auto_progression_enabled,
118
- auto_retry_enabled=True,
119
- max_retries=3,
120
- )
121
-
122
- # Initialize error recovery manager (Epic 14)
123
- error_recovery_enabled = os.getenv("TAPPS_AGENTS_ERROR_RECOVERY", "true").lower() == "true"
124
- self.error_recovery = ErrorRecoveryManager(
125
- enable_auto_retry=error_recovery_enabled,
126
- max_retries=3,
127
- ) if error_recovery_enabled else None
128
-
129
- # Initialize state persistence configuration manager (Epic 12 - Story 12.6)
130
- self.state_config_manager = StatePersistenceConfigManager(project_root=self.project_root)
131
-
132
- # Initialize checkpoint manager (Epic 12)
133
- # Use configuration from state persistence config if available
134
- state_config = self.state_config_manager.config
135
- if state_config and state_config.checkpoint:
136
- checkpoint_frequency = state_config.checkpoint.mode
137
- checkpoint_interval = state_config.checkpoint.interval
138
- checkpoint_enabled = state_config.checkpoint.enabled
139
- else:
140
- # Fall back to environment variables
141
- checkpoint_frequency = os.getenv("TAPPS_AGENTS_CHECKPOINT_FREQUENCY", "every_step")
142
- checkpoint_interval = int(os.getenv("TAPPS_AGENTS_CHECKPOINT_INTERVAL", "1"))
143
- checkpoint_enabled = os.getenv("TAPPS_AGENTS_CHECKPOINT_ENABLED", "true").lower() == "true"
144
-
145
- try:
146
- frequency = CheckpointFrequency(checkpoint_frequency)
147
- except ValueError:
148
- frequency = CheckpointFrequency.EVERY_STEP
149
-
150
- checkpoint_config = CheckpointConfig(
151
- frequency=frequency,
152
- interval=checkpoint_interval,
153
- enabled=checkpoint_enabled,
154
- )
155
- self.checkpoint_manager = WorkflowCheckpointManager(config=checkpoint_config)
156
-
157
- # Initialize state manager
158
- # Use storage location from config
159
- if state_config and state_config.enabled:
160
- state_dir = self.state_config_manager.get_storage_path()
161
- compression = state_config.compression
162
- else:
163
- state_dir = self._state_dir()
164
- compression = False
165
- self.state_manager = AdvancedStateManager(state_dir, compression=compression)
166
-
167
- # Always use direct execution via Skills (Background Agents removed)
168
-
169
- # Initialize marker writer for durable step completion tracking
170
- self.marker_writer = MarkerWriter(project_root=self.project_root)
171
-
172
- def _state_dir(self) -> Path:
173
- """Get state directory path."""
174
- return self.project_root / ".tapps-agents" / "workflow-state"
175
-
176
- def _print_step_artifacts(
177
- self,
178
- step: Any,
179
- artifacts: dict[str, Any],
180
- step_execution: Any,
181
- ) -> None:
182
- """
183
- Print artifact paths after step completion (Issue fix: Hidden workflow state).
184
-
185
- Provides clear visibility into where workflow outputs are saved.
186
- """
187
- from ..core.unicode_safe import safe_print
188
-
189
- duration = step_execution.duration_seconds if step_execution else 0
190
- duration_str = f"{duration:.1f}s" if duration else "N/A"
191
-
192
- safe_print(f"\n[OK] Step '{step.id}' completed ({duration_str})")
193
-
194
- if artifacts:
195
- print(" 📄 Artifacts created:")
196
- for art_name, art_data in artifacts.items():
197
- if isinstance(art_data, dict):
198
- path = art_data.get("path", "")
199
- if path:
200
- print(f" - {path}")
201
- else:
202
- print(f" - {art_name} (in-memory)")
203
- else:
204
- print(f" - {art_name}")
205
-
206
- # Also print workflow state location for reference
207
- if self.state:
208
- state_dir = self._state_dir()
209
- print(f" 📁 State: {state_dir / self.state.workflow_id}")
210
-
211
- def _profile_project(self) -> None:
212
- """
213
- Perform project profiling before workflow execution.
214
-
215
- Loads existing profile if available, otherwise detects and saves a new one.
216
- The profile is stored in workflow state and passed to all Skills via context.
217
- """
218
- # Try to load existing profile first
219
- self.project_profile = load_project_profile(project_root=self.project_root)
220
-
221
- # If no profile exists, detect and save it
222
- if not self.project_profile:
223
- detector = ProjectProfileDetector(project_root=self.project_root)
224
- self.project_profile = detector.detect_profile()
225
- save_project_profile(profile=self.project_profile, project_root=self.project_root)
226
-
227
- async def start(
228
- self,
229
- workflow: Workflow,
230
- user_prompt: str | None = None,
231
- ) -> WorkflowState:
232
- """
233
- Start a new workflow execution.
234
-
235
- Also executes state cleanup if configured for "on_startup" schedule.
236
-
237
- Args:
238
- workflow: Workflow to execute
239
- user_prompt: Optional user prompt for the workflow
240
-
241
- Returns:
242
- Initial workflow state
243
- """
244
- # Execute cleanup on startup if configured (Epic 12 - Story 12.6)
245
- if self.state_config_manager.config and self.state_config_manager.config.cleanup:
246
- if self.state_config_manager.config.cleanup.cleanup_schedule == "on_startup":
247
- cleanup_result = self.state_config_manager.execute_cleanup()
248
- if self.logger:
249
- self.logger.info(
250
- f"State cleanup on startup: {cleanup_result}",
251
- cleanup_result=cleanup_result,
252
- )
253
-
254
- self.workflow = workflow
255
-
256
- # Check workflow metadata for auto-execution override (per-workflow config)
257
- # Always use direct execution via Skills (Background Agents removed)
258
-
259
- # Use consistent workflow_id format: {workflow.id}-{timestamp}
260
- # Include microseconds to ensure uniqueness for parallel workflows (BUG-001 fix)
261
- workflow_id = f"{workflow.id}-{datetime.now().strftime('%Y%m%d-%H%M%S-%f')}"
262
-
263
- # Initialize logger with workflow_id for correlation
264
- self.logger = WorkflowLogger(workflow_id=workflow_id)
265
-
266
- # Perform project profiling before workflow execution
267
- self._profile_project()
268
-
269
- self.state = WorkflowState(
270
- workflow_id=workflow_id,
271
- started_at=datetime.now(),
272
- current_step=workflow.steps[0].id if workflow.steps else None,
273
- status="running",
274
- variables={
275
- "user_prompt": user_prompt or "",
276
- "project_profile": self.project_profile.to_dict() if self.project_profile else None,
277
- "workflow_name": workflow.name, # Store in variables for reference
278
- },
279
- )
280
-
281
- # Beads: create workflow issue when enabled (store for close in run finally)
282
- try:
283
- from ..core.config import load_config
284
- from ..beads import require_beads
285
- from ..simple_mode.beads_hooks import create_workflow_issue
286
-
287
- config = load_config(self.project_root / ".tapps-agents" / "config.yaml")
288
- require_beads(config, self.project_root)
289
- state_vars = self.state.variables or {}
290
- # On resume: reuse id from .beads_issue_id file (same layout as *build)
291
- state_dir = self._state_dir()
292
- wf_dir = state_dir / workflow_id
293
- beads_file = wf_dir / ".beads_issue_id"
294
- if beads_file.exists():
295
- try:
296
- bid = beads_file.read_text(encoding="utf-8").strip() or None
297
- if bid:
298
- state_vars["_beads_issue_id"] = bid
299
- self.state.variables = state_vars
300
- except OSError:
301
- pass
302
- if "_beads_issue_id" not in state_vars:
303
- bid = create_workflow_issue(
304
- self.project_root,
305
- config,
306
- workflow.name,
307
- user_prompt or state_vars.get("target_file", "") or "",
308
- )
309
- if bid:
310
- state_vars["_beads_issue_id"] = bid
311
- self.state.variables = state_vars
312
- try:
313
- wf_dir.mkdir(parents=True, exist_ok=True)
314
- beads_file.write_text(bid, encoding="utf-8")
315
- except OSError:
316
- pass
317
- except Exception as e:
318
- from ..beads import BeadsRequiredError
319
-
320
- if isinstance(e, BeadsRequiredError):
321
- raise
322
- pass # log-and-continue: do not fail start for other beads errors
323
-
324
- # Generate and save execution plan (Epic 6 - Story 6.7)
325
- try:
326
- from .execution_plan import generate_execution_plan, save_execution_plan
327
- execution_plan = generate_execution_plan(workflow)
328
- state_dir = self._state_dir()
329
- plan_path = save_execution_plan(execution_plan, state_dir, workflow_id)
330
- if self.logger:
331
- self.logger.info(
332
- f"Execution plan generated: {plan_path}",
333
- execution_plan_path=str(plan_path),
334
- )
335
- except Exception as e:
336
- # Don't fail workflow start if execution plan generation fails
337
- if self.logger:
338
- self.logger.warning(f"Failed to generate execution plan: {e}")
339
-
340
- self.logger.info(
341
- "Workflow started",
342
- workflow_name=workflow.name,
343
- workflow_version=workflow.version,
344
- step_count=len(workflow.steps),
345
- )
346
-
347
- # Publish workflow started event (Phase 2)
348
- await self.event_bus.publish(
349
- WorkflowEvent(
350
- event_type=EventType.WORKFLOW_STARTED,
351
- workflow_id=workflow_id,
352
- step_id=None,
353
- data={
354
- "workflow_name": workflow.name,
355
- "workflow_version": workflow.version,
356
- "step_count": len(workflow.steps),
357
- "user_prompt": user_prompt or "",
358
- },
359
- timestamp=datetime.now(),
360
- correlation_id=workflow_id,
361
- )
362
- )
363
-
364
- # Initialize progress update manager
365
- self.progress_manager = ProgressUpdateManager(
366
- workflow=workflow,
367
- state=self.state,
368
- project_root=self.project_root,
369
- enable_updates=True,
370
- )
371
- # Connect event bus to status monitor (Phase 2)
372
- if self.progress_manager.status_monitor:
373
- self.progress_manager.status_monitor.event_bus = self.event_bus
374
- # Start progress monitoring (non-blocking)
375
- import asyncio
376
- try:
377
- asyncio.get_running_loop()
378
- asyncio.create_task(self.progress_manager.start())
379
- except RuntimeError:
380
- # No running event loop - progress manager will start when event loop is available
381
- pass
382
-
383
- self.save_state()
384
-
385
- # Generate task manifest (Epic 7)
386
- self._generate_manifest()
387
-
388
- return self.state
389
-
390
- def save_state(self) -> None:
391
- """Save workflow state to disk."""
392
- if not self.state:
393
- return
394
-
395
- def _make_json_serializable(obj: Any) -> Any:
396
- """Recursively convert objects to JSON-serializable format."""
397
- # Handle ProjectProfile objects
398
- if hasattr(obj, "to_dict") and hasattr(obj, "compliance_requirements"):
399
- try:
400
- from ..core.project_profile import ProjectProfile
401
- if isinstance(obj, ProjectProfile):
402
- return obj.to_dict()
403
- except (ImportError, AttributeError):
404
- pass
405
-
406
- # Handle ComplianceRequirement objects
407
- if hasattr(obj, "name") and hasattr(obj, "confidence") and hasattr(obj, "indicators"):
408
- try:
409
- from ..core.project_profile import ComplianceRequirement
410
- if isinstance(obj, ComplianceRequirement):
411
- return asdict(obj)
412
- except (ImportError, AttributeError):
413
- pass
414
-
415
- # Handle dictionaries recursively
416
- if isinstance(obj, dict):
417
- return {k: _make_json_serializable(v) for k, v in obj.items()}
418
-
419
- # Handle lists recursively
420
- if isinstance(obj, list):
421
- return [_make_json_serializable(item) for item in obj]
422
-
423
- # Handle other non-serializable types
424
- try:
425
- import json
426
- json.dumps(obj)
427
- return obj
428
- except (TypeError, ValueError):
429
- # For non-serializable types, convert to string as fallback
430
- return str(obj)
431
-
432
- state_file = self._state_dir() / f"{self.state.workflow_id}.json"
433
- state_file.parent.mkdir(parents=True, exist_ok=True)
434
-
435
- # Convert variables to JSON-serializable format
436
- variables = self.state.variables or {}
437
- serializable_variables = _make_json_serializable(variables)
438
-
439
- # Convert to dict for JSON serialization
440
- state_dict = {
441
- "workflow_id": self.state.workflow_id,
442
- "status": self.state.status,
443
- "current_step": self.state.current_step,
444
- "started_at": self.state.started_at.isoformat() if self.state.started_at else None,
445
- "completed_steps": self.state.completed_steps,
446
- "skipped_steps": self.state.skipped_steps,
447
- "variables": serializable_variables,
448
- "artifacts": {
449
- name: {
450
- "name": a.name,
451
- "path": a.path,
452
- "status": a.status,
453
- "created_by": a.created_by,
454
- "created_at": a.created_at.isoformat() if a.created_at else None,
455
- "metadata": a.metadata,
456
- }
457
- for name, a in self.state.artifacts.items()
458
- },
459
- "step_executions": [
460
- {
461
- "step_id": se.step_id,
462
- "agent": se.agent,
463
- "action": se.action,
464
- "started_at": se.started_at.isoformat() if se.started_at else None,
465
- "completed_at": se.completed_at.isoformat() if se.completed_at else None,
466
- "duration_seconds": se.duration_seconds,
467
- "status": se.status,
468
- "error": se.error,
469
- }
470
- for se in self.state.step_executions
471
- ],
472
- "error": self.state.error,
473
- }
474
-
475
- from .file_utils import atomic_write_json
476
-
477
- atomic_write_json(state_file, state_dict, indent=2)
478
-
479
- # Also save to history
480
- history_dir = state_file.parent / "history"
481
- history_dir.mkdir(exist_ok=True)
482
- history_file = history_dir / state_file.name
483
- atomic_write_json(history_file, state_dict, indent=2)
484
-
485
- # Generate task manifest (Epic 7)
486
- self._generate_manifest()
487
-
488
- def _generate_manifest(self) -> None:
489
- """
490
- Generate and save task manifest (Epic 7).
491
-
492
- Generates manifest on workflow start, step completion, and state save.
493
- """
494
- if not self.workflow or not self.state:
495
- return
496
-
497
- try:
498
- from .manifest import (
499
- generate_manifest,
500
- save_manifest,
501
- sync_manifest_to_project_root,
502
- )
503
-
504
- # Generate manifest
505
- manifest_content = generate_manifest(self.workflow, self.state)
506
-
507
- # Save to state directory
508
- state_dir = self._state_dir()
509
- manifest_path = save_manifest(manifest_content, state_dir, self.state.workflow_id)
510
-
511
- # Optional: Sync to project root if configured
512
- sync_enabled = os.getenv("TAPPS_AGENTS_MANIFEST_SYNC", "false").lower() == "true"
513
- if sync_enabled:
514
- sync_path = sync_manifest_to_project_root(manifest_content, self.project_root)
515
- if self.logger:
516
- self.logger.debug(
517
- "Task manifest synced to project root",
518
- manifest_path=str(manifest_path),
519
- sync_path=str(sync_path),
520
- )
521
- elif self.logger:
522
- self.logger.debug(
523
- "Task manifest generated",
524
- manifest_path=str(manifest_path),
525
- )
526
- except Exception as e:
527
- # Don't fail workflow if manifest generation fails
528
- if self.logger:
529
- self.logger.warning(
530
- "Failed to generate task manifest",
531
- error=str(e),
532
- )
533
-
534
- async def run(
535
- self,
536
- workflow: Workflow | None = None,
537
- target_file: str | None = None,
538
- max_steps: int = 100,
539
- ) -> WorkflowState:
540
- """
541
- Run workflow to completion with timeout protection.
542
-
543
- Args:
544
- workflow: Workflow to execute (if not already loaded)
545
- target_file: Optional target file path
546
- max_steps: Maximum number of steps to execute
547
-
548
- Returns:
549
- Final workflow state
550
- """
551
- import asyncio
552
- from datetime import datetime
553
-
554
- from tapps_agents.core.config import load_config
555
-
556
- config = load_config()
557
- # Use 2x step timeout for overall workflow timeout (default: 2 hours)
558
- workflow_timeout = getattr(config.workflow, 'timeout_seconds', 3600.0) * 2
559
-
560
- async def _run_workflow_inner() -> WorkflowState:
561
- """Inner function to wrap actual execution for timeout protection."""
562
- # Initialize execution
563
- target_path = await self._initialize_run(workflow, target_file)
564
-
565
- # Log workflow start
566
- start_time = datetime.now()
567
- if self.logger:
568
- self.logger.info(
569
- "Starting workflow execution",
570
- extra={
571
- "workflow_id": self.state.workflow_id if self.state else None,
572
- "workflow_name": workflow.name if workflow else (self.workflow.name if self.workflow else None),
573
- "max_steps": max_steps,
574
- "total_steps": len(workflow.steps) if workflow else (len(self.workflow.steps) if self.workflow else 0),
575
- "workflow_timeout": workflow_timeout,
576
- }
577
- )
578
-
579
- # Use parallel execution for independent steps
580
- steps_executed = 0
581
- completed_step_ids = set(self.state.completed_steps)
582
- running_step_ids: set[str] = set()
583
-
584
- while (
585
- self.state
586
- and self.workflow
587
- and self.state.status == "running"
588
- ):
589
- if steps_executed >= max_steps:
590
- self._handle_max_steps_exceeded(max_steps)
591
- break
592
-
593
- # Find steps ready to execute (dependencies met)
594
- ready_steps = self._find_ready_steps(
595
- completed_step_ids, running_step_ids
596
- )
597
-
598
- if not ready_steps:
599
- if self._handle_no_ready_steps(completed_step_ids):
600
- break
601
- continue
602
-
603
- # Execute ready steps in parallel
604
- running_step_ids.update(step.id for step in ready_steps)
605
-
606
- async def execute_step_wrapper(step: WorkflowStep) -> dict[str, Any]:
607
- """Wrapper to adapt _execute_step_for_parallel to parallel executor interface."""
608
- artifacts = await self._execute_step_for_parallel(step=step, target_path=target_path)
609
- return artifacts or {}
610
-
611
- try:
612
- results = await self.parallel_executor.execute_parallel(
613
- steps=ready_steps,
614
- execute_fn=execute_step_wrapper,
615
- state=self.state,
616
- )
617
-
618
- # Process results and update state
619
- should_break = await self._process_parallel_results(
620
- results, completed_step_ids, running_step_ids
621
- )
622
- if should_break:
623
- break
624
-
625
- steps_executed += len(ready_steps)
626
- self.save_state()
627
-
628
- # Generate task manifest after step completion (Epic 7)
629
- self._generate_manifest()
630
-
631
- # Log progress every 10 steps
632
- if steps_executed % 10 == 0 and self.logger:
633
- elapsed = (datetime.now() - start_time).total_seconds()
634
- self.logger.info(
635
- f"Workflow progress: {steps_executed} steps executed in {elapsed:.1f}s",
636
- extra={
637
- "steps_executed": steps_executed,
638
- "completed_steps": len(completed_step_ids),
639
- "total_steps": len(self.workflow.steps),
640
- "elapsed_seconds": elapsed,
641
- }
642
- )
643
-
644
- except Exception as e:
645
- self._handle_execution_error(e)
646
- break
647
-
648
- return await self._finalize_run(completed_step_ids)
649
-
650
- # Wrap execution with timeout
651
- try:
652
- return await asyncio.wait_for(
653
- _run_workflow_inner(),
654
- timeout=workflow_timeout
655
- )
656
- except TimeoutError:
657
- if self.state:
658
- self.state.status = "failed"
659
- self.state.error = f"Workflow timeout after {workflow_timeout}s"
660
- self.save_state()
661
- if self.logger:
662
- self.logger.error(
663
- f"Workflow execution exceeded {workflow_timeout}s timeout",
664
- extra={
665
- "workflow_id": self.state.workflow_id,
666
- "timeout_seconds": workflow_timeout,
667
- }
668
- )
669
- raise TimeoutError(
670
- f"Workflow execution exceeded {workflow_timeout}s timeout. "
671
- f"Increase timeout in config (workflow.timeout_seconds) or check for blocking operations."
672
- ) from None
673
- finally:
674
- variables = (getattr(self.state, "variables", None) or {}) if self.state else {}
675
- beads_issue_id = variables.get("_beads_issue_id")
676
- if beads_issue_id is None and self.state:
677
- wf_id = getattr(self.state, "workflow_id", None)
678
- if wf_id:
679
- beads_file = self._state_dir() / wf_id / ".beads_issue_id"
680
- if beads_file.exists():
681
- try:
682
- beads_issue_id = beads_file.read_text(
683
- encoding="utf-8"
684
- ).strip() or None
685
- except OSError:
686
- pass
687
- from ..simple_mode.beads_hooks import close_issue
688
- close_issue(self.project_root, beads_issue_id)
689
-
690
- async def _initialize_run(
691
- self,
692
- workflow: Workflow | None,
693
- target_file: str | None,
694
- ) -> Path | None:
695
- """Initialize workflow execution with validation and return target path."""
696
- if workflow:
697
- self.workflow = workflow
698
- if not self.workflow:
699
- raise ValueError(
700
- "No workflow loaded. Call start() or pass workflow."
701
- )
702
-
703
- # Validate workflow has steps
704
- if not self.workflow.steps:
705
- raise ValueError("Workflow has no steps to execute")
706
-
707
- # Ensure we have a state
708
- if not self.state or not self.state.workflow_id.startswith(f"{self.workflow.id}-"):
709
- await self.start(workflow=self.workflow)
710
-
711
- # Validate first step can be executed (no dependencies)
712
- first_step = self.workflow.steps[0]
713
- if not first_step.requires: # No dependencies
714
- # First step should always be ready
715
- if self.logger:
716
- self.logger.info(
717
- f"First step {first_step.id} has no dependencies - ready to execute",
718
- extra={
719
- "step_id": first_step.id,
720
- "agent": first_step.agent,
721
- "action": first_step.action,
722
- }
723
- )
724
-
725
- # Establish target file
726
- target_path: Path | None = None
727
- if target_file:
728
- target_path = (
729
- (self.project_root / target_file)
730
- if not Path(target_file).is_absolute()
731
- else Path(target_file)
732
- )
733
- else:
734
- target_path = self._default_target_file()
735
-
736
- if target_path and self.state:
737
- self.state.variables["target_file"] = str(target_path)
738
-
739
- return target_path
740
-
741
- def _handle_max_steps_exceeded(self, max_steps: int) -> None:
742
- """Handle max steps exceeded."""
743
- self.state.status = "failed"
744
- self.state.error = f"Max steps exceeded ({max_steps}). Aborting."
745
- self.save_state()
746
-
747
- def get_workflow_health(self) -> dict[str, Any]:
748
- """
749
- Get workflow health diagnostics.
750
-
751
- Returns:
752
- Dictionary with workflow health information including:
753
- - status: Current workflow status
754
- - elapsed_seconds: Time since workflow started
755
- - completed_steps: Number of completed steps
756
- - total_steps: Total number of steps
757
- - progress_percent: Percentage of steps completed
758
- - time_since_last_step: Seconds since last step completed
759
- - is_stuck: Whether workflow appears to be stuck (no progress in 5 minutes)
760
- - current_step: Current step ID
761
- - error: Error message if any
762
- """
763
- if not self.state:
764
- return {"status": "not_started", "message": "Workflow not started"}
765
-
766
- elapsed = (
767
- (datetime.now() - self.state.started_at).total_seconds()
768
- if self.state.started_at else 0
769
- )
770
- completed = len(self.state.completed_steps)
771
- total = len(self.workflow.steps) if self.workflow else 0
772
-
773
- # Check if stuck (no progress in last 5 minutes)
774
- last_step_time = None
775
- if self.state.step_executions:
776
- completed_times = [
777
- se.completed_at for se in self.state.step_executions
778
- if se.completed_at
779
- ]
780
- if completed_times:
781
- last_step_time = max(completed_times)
782
-
783
- if not last_step_time:
784
- last_step_time = self.state.started_at
785
-
786
- time_since_last_step = (
787
- (datetime.now() - last_step_time).total_seconds()
788
- if last_step_time else elapsed
789
- )
790
- is_stuck = time_since_last_step > 300 # 5 minutes
791
-
792
- return {
793
- "status": self.state.status,
794
- "elapsed_seconds": elapsed,
795
- "completed_steps": completed,
796
- "total_steps": total,
797
- "progress_percent": (completed / total * 100) if total > 0 else 0,
798
- "time_since_last_step": time_since_last_step,
799
- "is_stuck": is_stuck,
800
- "current_step": self.state.current_step,
801
- "error": self.state.error,
802
- }
803
-
804
- def _find_ready_steps(
805
- self,
806
- completed_step_ids: set[str],
807
- running_step_ids: set[str],
808
- ) -> list[WorkflowStep]:
809
- """Find steps ready to execute (dependencies met)."""
810
- available_artifacts = set(self.state.artifacts.keys())
811
- return self.parallel_executor.find_ready_steps(
812
- workflow_steps=self.workflow.steps,
813
- completed_step_ids=completed_step_ids,
814
- running_step_ids=running_step_ids,
815
- available_artifacts=available_artifacts,
816
- )
817
-
818
- def _handle_no_ready_steps(self, completed_step_ids: set[str]) -> bool:
819
- """Handle case when no steps are ready with better diagnostics. Returns True if workflow should stop."""
820
- if len(completed_step_ids) >= len(self.workflow.steps):
821
- # Workflow is complete
822
- self.state.status = "completed"
823
- self.state.current_step = None
824
- self.save_state()
825
- return True
826
- else:
827
- # Workflow is blocked - provide diagnostics
828
- available_artifacts = set(self.state.artifacts.keys())
829
- pending_steps = [
830
- s for s in self.workflow.steps
831
- if s.id not in completed_step_ids
832
- ]
833
-
834
- # Check what's blocking
835
- blocking_info = []
836
- for step in pending_steps:
837
- missing = [req for req in (step.requires or []) if req not in available_artifacts]
838
- if missing:
839
- blocking_info.append(f"Step {step.id} ({step.agent}/{step.action}): missing {missing}")
840
-
841
- error_msg = (
842
- f"Workflow blocked: no ready steps and workflow not complete. "
843
- f"Completed: {len(completed_step_ids)}/{len(self.workflow.steps)}. "
844
- f"Blocking issues: {blocking_info if blocking_info else 'Unknown - check step dependencies'}"
845
- )
846
-
847
- self.state.status = "failed"
848
- self.state.error = error_msg
849
- self.save_state()
850
-
851
- # Log detailed diagnostics
852
- if self.logger:
853
- self.logger.error(
854
- "Workflow blocked - no ready steps",
855
- extra={
856
- "completed_steps": list(completed_step_ids),
857
- "pending_steps": [s.id for s in pending_steps],
858
- "available_artifacts": list(available_artifacts),
859
- "blocking_info": blocking_info,
860
- }
861
- )
862
-
863
- return True
864
-
865
- async def _process_parallel_results(
866
- self,
867
- results: list[Any],
868
- completed_step_ids: set[str],
869
- running_step_ids: set[str],
870
- ) -> bool:
871
- """
872
- Process results from parallel execution.
873
- Returns True if workflow should stop (failed or aborted).
874
- """
875
- for result in results:
876
- step_logger = self.logger.with_context(
877
- step_id=result.step.id,
878
- agent=result.step.agent,
879
- ) if self.logger else None
880
-
881
- if result.error:
882
- should_break = await self._handle_step_error(
883
- result, step_logger, completed_step_ids, running_step_ids
884
- )
885
- if should_break:
886
- return True
887
- continue
888
-
889
- # Handle successful step completion
890
- await self._handle_step_success(
891
- result, step_logger, completed_step_ids, running_step_ids
892
- )
893
-
894
- return False
895
-
896
- async def _handle_step_error(
897
- self,
898
- result: Any,
899
- step_logger: Any,
900
- completed_step_ids: set[str],
901
- running_step_ids: set[str],
902
- ) -> bool:
903
- """Handle step error. Returns True if workflow should stop."""
904
- # Publish step failed event (Phase 2)
905
- await self.event_bus.publish(
906
- WorkflowEvent(
907
- event_type=EventType.STEP_FAILED,
908
- workflow_id=self.state.workflow_id,
909
- step_id=result.step.id,
910
- data={
911
- "agent": result.step.agent,
912
- "action": result.step.action,
913
- "error": str(result.error),
914
- "attempts": getattr(result, "attempts", 1),
915
- },
916
- timestamp=datetime.now(),
917
- correlation_id=f"{self.state.workflow_id}:{result.step.id}",
918
- )
919
- )
920
-
921
- # Step failed - use error recovery and auto-progression (Epic 14)
922
- error_context = ErrorContext(
923
- workflow_id=self.state.workflow_id,
924
- step_id=result.step.id,
925
- agent=result.step.agent,
926
- action=result.step.action,
927
- step_number=None,
928
- total_steps=len(self.workflow.steps),
929
- workflow_status=self.state.status,
930
- )
931
-
932
- # Handle error with recovery manager (Epic 14)
933
- recovery_result = None
934
- user_friendly_error = None
935
- if self.error_recovery:
936
- recovery_result = self.error_recovery.handle_error(
937
- error=result.error,
938
- context=error_context,
939
- attempt=getattr(result, "attempts", 1),
940
- )
941
-
942
- # Store user-friendly message (can't modify frozen dataclass)
943
- if recovery_result.get("user_message"):
944
- user_friendly_error = recovery_result["user_message"]
945
-
946
- if self.auto_progression.should_auto_progress():
947
- # Get review result if this was a reviewer step
948
- review_result = None
949
- if result.step.agent == "reviewer":
950
- review_result = self.state.variables.get("reviewer_result")
951
-
952
- decision = self.auto_progression.handle_step_completion(
953
- step=result.step,
954
- state=self.state,
955
- step_execution=result.step_execution,
956
- review_result=review_result,
957
- )
958
-
959
- if decision.action == ProgressionAction.RETRY:
960
- # Retry the step - remove from completed and add back to ready
961
- completed_step_ids.discard(result.step.id)
962
- running_step_ids.discard(result.step.id)
963
- # Apply backoff if specified
964
- if decision.metadata.get("backoff_seconds"):
965
- await asyncio.sleep(decision.metadata["backoff_seconds"])
966
- if step_logger:
967
- step_logger.info(
968
- f"Retrying step {result.step.id} (attempt {decision.retry_count})",
969
- )
970
- return False
971
- elif decision.action == ProgressionAction.SKIP:
972
- # Skip this step
973
- completed_step_ids.add(result.step.id)
974
- running_step_ids.discard(result.step.id)
975
- if result.step.id not in self.state.skipped_steps:
976
- self.state.skipped_steps.append(result.step.id)
977
- if step_logger:
978
- step_logger.warning(
979
- f"Skipping step {result.step.id}: {decision.reason}",
980
- )
981
- return False
982
- elif decision.action == ProgressionAction.ABORT:
983
- # Abort workflow
984
- self.state.status = "failed"
985
- self.state.error = decision.reason
986
- if step_logger:
987
- step_logger.error(
988
- f"Workflow aborted: {decision.reason}",
989
- )
990
-
991
- # Publish workflow failed event (Phase 2)
992
- await self.event_bus.publish(
993
- WorkflowEvent(
994
- event_type=EventType.WORKFLOW_FAILED,
995
- workflow_id=self.state.workflow_id,
996
- step_id=result.step.id,
997
- data={
998
- "error": decision.reason,
999
- "step_id": result.step.id,
1000
- },
1001
- timestamp=datetime.now(),
1002
- correlation_id=f"{self.state.workflow_id}:{result.step.id}",
1003
- )
1004
- )
1005
-
1006
- self.save_state()
1007
- if self.progress_manager:
1008
- await self.progress_manager.send_workflow_failed(decision.reason)
1009
- await self.progress_manager.stop()
1010
- return True
1011
- elif decision.action == ProgressionAction.CONTINUE:
1012
- # Continue despite error (recoverable)
1013
- completed_step_ids.add(result.step.id)
1014
- running_step_ids.discard(result.step.id)
1015
- if step_logger:
1016
- step_logger.warning(
1017
- f"Step {result.step.id} failed but continuing: {decision.reason}",
1018
- )
1019
- return False
1020
-
1021
- # Fallback: WorkflowFailureConfig when auto-progression disabled (plan 3.1)
1022
- error_message = user_friendly_error if user_friendly_error else str(result.error)
1023
- try:
1024
- from ..core.config import load_config
1025
-
1026
- cfg = load_config()
1027
- wf = getattr(cfg, "workflow", None)
1028
- fail_cfg = getattr(wf, "failure", None) if wf else None
1029
- except Exception: # pylint: disable=broad-except
1030
- fail_cfg = None
1031
- on_fail = getattr(fail_cfg, "on_step_fail", "fail") or "fail"
1032
- retry_count = getattr(fail_cfg, "retry_count", 1) or 0
1033
- escalate_pause = getattr(fail_cfg, "escalate_to_pause", True)
1034
-
1035
- raw = self.state.variables.get("_step_retries")
1036
- retries_var = raw if isinstance(raw, dict) else {}
1037
- self.state.variables["_step_retries"] = retries_var
1038
- retries_used = retries_var.get(result.step.id, 0)
1039
-
1040
- if on_fail == "retry" and retries_used < retry_count:
1041
- retries_var[result.step.id] = retries_used + 1
1042
- completed_step_ids.discard(result.step.id)
1043
- running_step_ids.discard(result.step.id)
1044
- if step_logger:
1045
- step_logger.info(f"Retrying step {result.step.id} (attempt {retries_used + 1}/{retry_count})")
1046
- return False
1047
-
1048
- if on_fail == "skip":
1049
- completed_step_ids.add(result.step.id)
1050
- running_step_ids.discard(result.step.id)
1051
- if result.step.id not in self.state.skipped_steps:
1052
- self.state.skipped_steps.append(result.step.id)
1053
- if step_logger:
1054
- step_logger.warning(f"Skipping step {result.step.id}: {error_message}")
1055
- return False
1056
-
1057
- # fail or escalate: stop workflow
1058
- self.state.status = "paused" if (on_fail == "escalate" and escalate_pause) else "failed"
1059
- self.state.error = f"Step {result.step.id} failed: {error_message}"
1060
- suggest = None
1061
- if on_fail == "escalate" and recovery_result and recovery_result.get("suggestions"):
1062
- suggest = [getattr(s, "action", str(s)) for s in recovery_result["suggestions"][:3]]
1063
-
1064
- # Publish workflow failed event (Phase 2)
1065
- await self.event_bus.publish(
1066
- WorkflowEvent(
1067
- event_type=EventType.WORKFLOW_FAILED,
1068
- workflow_id=self.state.workflow_id,
1069
- step_id=result.step.id,
1070
- data={
1071
- "error": error_message,
1072
- "step_id": result.step.id,
1073
- "behavior": on_fail,
1074
- "suggestions": suggest,
1075
- },
1076
- timestamp=datetime.now(),
1077
- correlation_id=f"{self.state.workflow_id}:{result.step.id}",
1078
- )
1079
- )
1080
-
1081
- self.save_state()
1082
-
1083
- # Send failure update
1084
- if self.progress_manager:
1085
- await self.progress_manager.send_workflow_failed(error_message)
1086
- await self.progress_manager.stop()
1087
- return True
1088
-
1089
- async def _handle_step_success(
1090
- self,
1091
- result: Any,
1092
- step_logger: Any,
1093
- completed_step_ids: set[str],
1094
- running_step_ids: set[str],
1095
- ) -> None:
1096
- """Handle successful step completion."""
1097
- # Mark step as completed
1098
- completed_step_ids.add(result.step.id)
1099
- running_step_ids.discard(result.step.id)
1100
-
1101
- # Get review result if this was a reviewer step (for gate evaluation)
1102
- review_result = None
1103
- if result.step.agent == "reviewer":
1104
- review_result = self.state.variables.get("reviewer_result")
1105
-
1106
- # Issue fix: Print artifact paths after each step (Hidden workflow state)
1107
- if self.print_paths and result.artifacts:
1108
- self._print_step_artifacts(result.step, result.artifacts, result.step_execution)
1109
-
1110
- # Publish step completed event (Phase 2)
1111
- await self.event_bus.publish(
1112
- WorkflowEvent(
1113
- event_type=EventType.STEP_COMPLETED,
1114
- workflow_id=self.state.workflow_id,
1115
- step_id=result.step.id,
1116
- data={
1117
- "agent": result.step.agent,
1118
- "action": result.step.action,
1119
- "duration_seconds": result.step_execution.duration_seconds,
1120
- "artifact_count": len(result.artifacts) if result.artifacts else 0,
1121
- },
1122
- timestamp=datetime.now(),
1123
- correlation_id=f"{self.state.workflow_id}:{result.step.id}",
1124
- )
1125
- )
1126
-
1127
- # Publish artifact created events (Phase 2)
1128
- if result.artifacts:
1129
- for artifact_name, artifact_data in result.artifacts.items():
1130
- await self.event_bus.publish(
1131
- WorkflowEvent(
1132
- event_type=EventType.ARTIFACT_CREATED,
1133
- workflow_id=self.state.workflow_id,
1134
- step_id=result.step.id,
1135
- data={
1136
- "artifact_name": artifact_name,
1137
- "artifact_path": artifact_data.get("path", ""),
1138
- "created_by": result.step.id,
1139
- },
1140
- timestamp=datetime.now(),
1141
- correlation_id=f"{self.state.workflow_id}:{result.step.id}",
1142
- )
1143
- )
1144
-
1145
- # Use auto-progression to handle step completion and gate evaluation
1146
- if self.auto_progression.should_auto_progress():
1147
- decision = self.auto_progression.handle_step_completion(
1148
- step=result.step,
1149
- state=self.state,
1150
- step_execution=result.step_execution,
1151
- review_result=review_result,
1152
- )
1153
-
1154
- # Update current step based on gate decision if needed
1155
- if decision.next_step_id:
1156
- self.state.current_step = decision.next_step_id
1157
-
1158
- if step_logger:
1159
- step_logger.info(
1160
- f"Step completed: {decision.reason}",
1161
- action=result.step.action,
1162
- duration_seconds=result.step_execution.duration_seconds,
1163
- artifact_count=len(result.artifacts) if result.artifacts else 0,
1164
- next_step=decision.next_step_id,
1165
- )
1166
- else:
1167
- if step_logger:
1168
- step_logger.info(
1169
- "Step completed",
1170
- action=result.step.action,
1171
- duration_seconds=result.step_execution.duration_seconds,
1172
- artifact_count=len(result.artifacts) if result.artifacts else 0,
1173
- )
1174
-
1175
- # Send step completed update (Epic 11: Include gate result for quality dashboard)
1176
- is_gate_step = result.step.agent == "reviewer" and result.step.gate is not None
1177
- if self.progress_manager:
1178
- # Extract gate result if this was a reviewer step
1179
- gate_result = None
1180
- if result.step.agent == "reviewer" and review_result:
1181
- # Get gate result from state variables (set by auto-progression)
1182
- gate_last = self.state.variables.get("gate_last", {})
1183
- if gate_last:
1184
- gate_result = gate_last
1185
-
1186
- # Publish gate evaluated event (Phase 2)
1187
- await self.event_bus.publish(
1188
- WorkflowEvent(
1189
- event_type=EventType.GATE_EVALUATED,
1190
- workflow_id=self.state.workflow_id,
1191
- step_id=result.step.id,
1192
- data={
1193
- "gate_result": gate_result,
1194
- "passed": gate_result.get("passed", False),
1195
- },
1196
- timestamp=datetime.now(),
1197
- correlation_id=f"{self.state.workflow_id}:{result.step.id}",
1198
- )
1199
- )
1200
-
1201
- await self.progress_manager.send_step_completed(
1202
- step_id=result.step.id,
1203
- agent=result.step.agent,
1204
- action=result.step.action,
1205
- duration=result.step_execution.duration_seconds,
1206
- gate_result=gate_result,
1207
- )
1208
-
1209
- # Epic 12: Automatic checkpointing after step completion
1210
- if self.checkpoint_manager.should_checkpoint(
1211
- step=result.step,
1212
- state=self.state,
1213
- is_gate_step=is_gate_step,
1214
- ):
1215
- # Enhance state with checkpoint metadata before saving
1216
- checkpoint_metadata = self.checkpoint_manager.get_checkpoint_metadata(
1217
- state=self.state,
1218
- step=result.step,
1219
- )
1220
- # Store metadata in state variables for persistence
1221
- if "_checkpoint_metadata" not in self.state.variables:
1222
- self.state.variables["_checkpoint_metadata"] = {}
1223
- self.state.variables["_checkpoint_metadata"].update(checkpoint_metadata)
1224
-
1225
- # Save checkpoint
1226
- self.save_state()
1227
- self.checkpoint_manager.record_checkpoint(result.step.id)
1228
-
1229
- if self.logger:
1230
- self.logger.info(
1231
- f"Checkpoint created after step {result.step.id}",
1232
- checkpoint_metadata=checkpoint_metadata,
1233
- )
1234
-
1235
- # Update artifacts from result
1236
- if result.artifacts and isinstance(result.artifacts, dict):
1237
- for art_name, art_data in result.artifacts.items():
1238
- if isinstance(art_data, dict):
1239
- artifact = Artifact(
1240
- name=art_data.get("name", art_name),
1241
- path=art_data.get("path", ""),
1242
- status="complete",
1243
- created_by=result.step.id,
1244
- created_at=datetime.now(),
1245
- metadata=art_data.get("metadata", {}),
1246
- )
1247
- self.state.artifacts[artifact.name] = artifact
1248
-
1249
- def _handle_execution_error(self, error: Exception) -> None:
1250
- """Handle execution error."""
1251
- self.state.status = "failed"
1252
- self.state.error = str(error)
1253
- if self.logger:
1254
- self.logger.error(
1255
- "Workflow execution failed",
1256
- error=str(error),
1257
- exc_info=True,
1258
- )
1259
- self.save_state()
1260
-
1261
- async def _finalize_run(self, completed_step_ids: set[str]) -> WorkflowState:
1262
- """Finalize workflow execution and return final state."""
1263
- if not self.state:
1264
- raise RuntimeError("Workflow state lost during execution")
1265
-
1266
- # Mark as completed if no error
1267
- if self.state.status == "running":
1268
- self.state.status = "completed"
1269
- if self.logger:
1270
- self.logger.info(
1271
- "Workflow completed",
1272
- completed_steps=len(completed_step_ids),
1273
- total_steps=len(self.workflow.steps) if self.workflow else 0,
1274
- )
1275
-
1276
- # Publish workflow completed event (Phase 2)
1277
- await self.event_bus.publish(
1278
- WorkflowEvent(
1279
- event_type=EventType.WORKFLOW_COMPLETED,
1280
- workflow_id=self.state.workflow_id,
1281
- step_id=None,
1282
- data={
1283
- "completed_steps": len(completed_step_ids),
1284
- "total_steps": len(self.workflow.steps) if self.workflow else 0,
1285
- },
1286
- timestamp=datetime.now(),
1287
- correlation_id=self.state.workflow_id,
1288
- )
1289
- )
1290
-
1291
- self.save_state()
1292
-
1293
- # Send completion summary
1294
- if self.progress_manager:
1295
- await self.progress_manager.send_workflow_completed()
1296
- await self.progress_manager.stop()
1297
-
1298
- # Best-effort cleanup of worktrees created during this run
1299
- try:
1300
- await self.worktree_manager.cleanup_all()
1301
- except Exception:
1302
- pass
1303
-
1304
- # Dual-write workflow completion to analytics (best-effort)
1305
- if self.state.status in ("completed", "failed") and self.workflow:
1306
- try:
1307
- from .analytics_dual_write import record_workflow_execution_to_analytics
1308
-
1309
- duration_sec = 0.0
1310
- if self.state.started_at:
1311
- end = datetime.now()
1312
- duration_sec = (end - self.state.started_at).total_seconds()
1313
- record_workflow_execution_to_analytics(
1314
- project_root=self.project_root,
1315
- workflow_id=self.state.workflow_id,
1316
- workflow_name=self.workflow.name or self.state.workflow_id,
1317
- duration_seconds=duration_sec,
1318
- steps=len(self.workflow.steps),
1319
- success=(self.state.status == "completed"),
1320
- )
1321
- except Exception: # pylint: disable=broad-except
1322
- pass
1323
-
1324
- return self.state
1325
-
1326
- async def _execute_step_for_parallel(
1327
- self, step: WorkflowStep, target_path: Path | None
1328
- ) -> dict[str, dict[str, Any]] | None:
1329
- """
1330
- Execute a single workflow step using Cursor Skills and return artifacts (for parallel execution).
1331
-
1332
- This is similar to _execute_step but returns artifacts instead of updating state.
1333
- State updates (step_execution tracking) are handled by ParallelStepExecutor.
1334
- """
1335
- if not self.state or not self.workflow:
1336
- raise ValueError("Workflow not started")
1337
-
1338
- action = self._normalize_action(step.action)
1339
- agent_name = (step.agent or "").strip().lower()
1340
-
1341
- # Publish step started event (Phase 2)
1342
- await self.event_bus.publish(
1343
- WorkflowEvent(
1344
- event_type=EventType.STEP_STARTED,
1345
- workflow_id=self.state.workflow_id,
1346
- step_id=step.id,
1347
- data={
1348
- "agent": agent_name,
1349
- "action": action,
1350
- "step_id": step.id,
1351
- },
1352
- timestamp=datetime.now(),
1353
- correlation_id=f"{self.state.workflow_id}:{step.id}",
1354
- )
1355
- )
1356
-
1357
- # Handle completion/finalization steps that don't require agent execution
1358
- if agent_name == "orchestrator" and action in ["finalize", "complete"]:
1359
- # Return empty artifacts for completion steps
1360
- return {}
1361
-
1362
- # Track step start time for duration calculation
1363
- step_started_at = datetime.now()
1364
-
1365
- # Use context manager for worktree lifecycle (guaranteed cleanup)
1366
- async with self._worktree_context(step) as worktree_path:
1367
- worktree_name = self._worktree_name_for_step(step.id)
1368
-
1369
- # Try AgentHandlerRegistry first for context-aware execution (BUG-003 fix)
1370
- # Falls back to SkillInvoker if no handler found
1371
- from .agent_handlers import AgentHandlerRegistry
1372
-
1373
- # Helper function to run agents (needed by handlers)
1374
- async def run_agent(agent: str, command: str, **kwargs: Any) -> dict[str, Any]:
1375
- """Run agent by importing and invoking its class."""
1376
- module = __import__(f"tapps_agents.agents.{agent}.agent", fromlist=["*"])
1377
- class_name = f"{agent.title()}Agent"
1378
- agent_cls = getattr(module, class_name)
1379
- instance = agent_cls()
1380
- await instance.activate(self.project_root)
1381
- try:
1382
- return await instance.run(command, **kwargs)
1383
- finally:
1384
- if hasattr(instance, 'close'):
1385
- await instance.close()
1386
-
1387
- # Create handler registry and try to find handler
1388
- registry = AgentHandlerRegistry.create_registry(
1389
- project_root=self.project_root,
1390
- state=self.state,
1391
- workflow=self.workflow,
1392
- run_agent_fn=run_agent,
1393
- executor=self,
1394
- )
1395
-
1396
- handler = registry.find_handler(agent_name, action)
1397
-
1398
- try:
1399
- from ..core.unicode_safe import safe_print
1400
-
1401
- if handler:
1402
- # Use handler for context-aware execution (e.g., ImplementerHandler)
1403
- safe_print(f"\n[EXEC] Executing {agent_name}/{action} via handler...", flush=True)
1404
-
1405
- # Execute handler and get artifacts directly
1406
- # Note: Handler execution happens in main working directory, not worktree
1407
- # Worktree is only used for skill invocation fallback
1408
- created_artifacts_list = await handler.execute(step, action, target_path)
1409
-
1410
- # Convert handler artifacts to dict format
1411
- artifacts_dict: dict[str, dict[str, Any]] = {}
1412
- for art in (created_artifacts_list or []):
1413
- artifacts_dict[art["name"]] = art
1414
-
1415
- # Write success marker
1416
- step_completed_at = datetime.now()
1417
- duration = (step_completed_at - step_started_at).total_seconds()
1418
-
1419
- found_artifact_paths = [art["path"] for art in (created_artifacts_list or [])]
1420
-
1421
- marker_path = self.marker_writer.write_done_marker(
1422
- workflow_id=self.state.workflow_id,
1423
- step_id=step.id,
1424
- agent=agent_name,
1425
- action=action,
1426
- worktree_name=worktree_name,
1427
- worktree_path=str(worktree_path),
1428
- expected_artifacts=step.creates or [],
1429
- found_artifacts=found_artifact_paths,
1430
- duration_seconds=duration,
1431
- started_at=step_started_at,
1432
- completed_at=step_completed_at,
1433
- )
1434
-
1435
- if self.logger:
1436
- self.logger.debug(
1437
- f"Handler execution complete for step {step.id}",
1438
- marker_path=str(marker_path),
1439
- )
1440
-
1441
- return artifacts_dict if artifacts_dict else None
1442
- else:
1443
- # Fall back to SkillInvoker for steps without handlers
1444
- safe_print(f"\n[EXEC] Executing {agent_name}/{action} via skill...", flush=True)
1445
- await self.skill_invoker.invoke_skill(
1446
- agent_name=agent_name,
1447
- action=action,
1448
- step=step,
1449
- target_path=target_path,
1450
- worktree_path=worktree_path,
1451
- state=self.state,
1452
- )
1453
- # Skill invoker handles execution (direct execution or Cursor Skills)
1454
- # Artifacts are extracted after completion
1455
-
1456
- # Extract artifacts from worktree (skill_invoker path only)
1457
- artifacts = await self.worktree_manager.extract_artifacts(
1458
- worktree_path=worktree_path,
1459
- step=step,
1460
- )
1461
-
1462
- # Convert artifacts to dict format
1463
- artifacts_dict: dict[str, dict[str, Any]] = {}
1464
- found_artifact_paths = []
1465
- for artifact in artifacts:
1466
- artifacts_dict[artifact.name] = {
1467
- "name": artifact.name,
1468
- "path": artifact.path,
1469
- "status": artifact.status,
1470
- "created_by": artifact.created_by,
1471
- "created_at": artifact.created_at.isoformat() if artifact.created_at else None,
1472
- "metadata": artifact.metadata or {},
1473
- }
1474
- found_artifact_paths.append(artifact.path)
1475
-
1476
- # Write DONE marker for successful completion
1477
- step_completed_at = datetime.now()
1478
- duration = (step_completed_at - step_started_at).total_seconds()
1479
-
1480
- marker_path = self.marker_writer.write_done_marker(
1481
- workflow_id=self.state.workflow_id,
1482
- step_id=step.id,
1483
- agent=agent_name,
1484
- action=action,
1485
- worktree_name=worktree_name,
1486
- worktree_path=str(worktree_path),
1487
- expected_artifacts=step.creates or [],
1488
- found_artifacts=found_artifact_paths,
1489
- duration_seconds=duration,
1490
- started_at=step_started_at,
1491
- completed_at=step_completed_at,
1492
- )
1493
-
1494
- if self.logger:
1495
- self.logger.debug(
1496
- f"DONE marker written for step {step.id}",
1497
- marker_path=str(marker_path),
1498
- )
1499
-
1500
- # Worktree cleanup is handled by context manager
1501
- return artifacts_dict if artifacts_dict else None
1502
-
1503
- except (TimeoutError, RuntimeError) as e:
1504
- # Write FAILED marker for timeout or execution errors
1505
- step_failed_at = datetime.now()
1506
- duration = (step_failed_at - step_started_at).total_seconds()
1507
- error_type = type(e).__name__
1508
- error_msg = str(e)
1509
-
1510
- # Try to get completion status if available (for missing artifacts)
1511
- found_artifact_paths = []
1512
- try:
1513
- from .cursor_skill_helper import check_skill_completion
1514
- completion_status = check_skill_completion(
1515
- worktree_path=worktree_path,
1516
- expected_artifacts=step.creates or [],
1517
- )
1518
- found_artifact_paths = completion_status.get("found_artifacts", [])
1519
- except Exception:
1520
- pass
1521
-
1522
- marker_path = self.marker_writer.write_failed_marker(
1523
- workflow_id=self.state.workflow_id,
1524
- step_id=step.id,
1525
- agent=agent_name,
1526
- action=action,
1527
- error=error_msg,
1528
- worktree_name=worktree_name,
1529
- worktree_path=str(worktree_path),
1530
- expected_artifacts=step.creates or [],
1531
- found_artifacts=found_artifact_paths,
1532
- duration_seconds=duration,
1533
- started_at=step_started_at,
1534
- failed_at=step_failed_at,
1535
- error_type=error_type,
1536
- metadata={
1537
- "marker_location": f".tapps-agents/workflows/markers/{self.state.workflow_id}/step-{step.id}/FAILED.json",
1538
- },
1539
- )
1540
-
1541
- if self.logger:
1542
- self.logger.warning(
1543
- f"FAILED marker written for step {step.id}",
1544
- marker_path=str(marker_path),
1545
- error=error_msg,
1546
- )
1547
-
1548
- # Include marker location in error message for better troubleshooting
1549
- from ..core.unicode_safe import safe_print
1550
- safe_print(
1551
- f"\n[INFO] Failure marker written to: {marker_path}",
1552
- flush=True,
1553
- )
1554
-
1555
- # Re-raise the exception
1556
- raise
1557
- except Exception as e:
1558
- # Write FAILED marker for unexpected errors
1559
- step_failed_at = datetime.now()
1560
- duration = (step_failed_at - step_started_at).total_seconds()
1561
- error_type = type(e).__name__
1562
- error_msg = str(e)
1563
-
1564
- marker_path = self.marker_writer.write_failed_marker(
1565
- workflow_id=self.state.workflow_id,
1566
- step_id=step.id,
1567
- agent=agent_name,
1568
- action=action,
1569
- error=error_msg,
1570
- worktree_name=worktree_name,
1571
- worktree_path=str(worktree_path) if 'worktree_path' in locals() else None,
1572
- expected_artifacts=step.creates or [],
1573
- found_artifacts=[],
1574
- duration_seconds=duration,
1575
- started_at=step_started_at,
1576
- failed_at=step_failed_at,
1577
- error_type=error_type,
1578
- metadata={
1579
- "marker_location": f".tapps-agents/workflows/markers/{self.state.workflow_id}/step-{step.id}/FAILED.json",
1580
- },
1581
- )
1582
-
1583
- if self.logger:
1584
- self.logger.error(
1585
- f"FAILED marker written for step {step.id} (unexpected error)",
1586
- marker_path=str(marker_path),
1587
- error=error_msg,
1588
- exc_info=True,
1589
- )
1590
-
1591
- # Re-raise the exception
1592
- raise
1593
-
1594
- @asynccontextmanager
1595
- async def _worktree_context(
1596
- self, step: WorkflowStep
1597
- ) -> AsyncIterator[Path]:
1598
- """
1599
- Context manager for worktree lifecycle management.
1600
-
1601
- Ensures worktree is properly cleaned up even on cancellation or exceptions.
1602
- This is a 2025 best practice for resource management in async code.
1603
-
1604
- Args:
1605
- step: Workflow step that needs a worktree
1606
-
1607
- Yields:
1608
- Path to the worktree
1609
-
1610
- Example:
1611
- async with self._worktree_context(step) as worktree_path:
1612
- # Use worktree_path here
1613
- # Worktree automatically cleaned up on exit
1614
- """
1615
- worktree_name = self._worktree_name_for_step(step.id)
1616
- worktree_path: Path | None = None
1617
-
1618
- try:
1619
- # Create worktree
1620
- worktree_path = await self.worktree_manager.create_worktree(
1621
- worktree_name=worktree_name
1622
- )
1623
-
1624
- # Copy artifacts from previous steps to worktree
1625
- artifacts_list = list(self.state.artifacts.values())
1626
- await self.worktree_manager.copy_artifacts(
1627
- worktree_path=worktree_path,
1628
- artifacts=artifacts_list,
1629
- )
1630
-
1631
- # Yield worktree path
1632
- yield worktree_path
1633
-
1634
- finally:
1635
- # Always cleanup, even on cancellation or exception
1636
- if worktree_path:
1637
- try:
1638
- # Determine if we should delete the branch based on configuration
1639
- from ..core.config import load_config
1640
- config = load_config()
1641
- should_delete = (
1642
- config.workflow.branch_cleanup.delete_branches_on_cleanup
1643
- if (
1644
- config.workflow.branch_cleanup
1645
- and config.workflow.branch_cleanup.enabled
1646
- )
1647
- else True # Default to True for backward compatibility (same as parameter default)
1648
- )
1649
- await self.worktree_manager.remove_worktree(
1650
- worktree_name, delete_branch=should_delete
1651
- )
1652
- except Exception as e:
1653
- # Log but don't raise - cleanup failures shouldn't break workflow
1654
- if self.logger:
1655
- self.logger.warning(
1656
- f"Failed to cleanup worktree {worktree_name}: {e}",
1657
- step_id=step.id,
1658
- )
1659
-
1660
- def _worktree_name_for_step(self, step_id: str) -> str:
1661
- """
1662
- Deterministic, collision-resistant worktree name for a workflow step.
1663
-
1664
- Keeps names short/safe for Windows while still traceable back to workflow+step.
1665
- """
1666
- if not self.state:
1667
- raise ValueError("Workflow not started")
1668
- raw = f"workflow-{self.state.workflow_id}-step-{step_id}"
1669
- digest = hashlib.sha256(raw.encode("utf-8")).hexdigest()[:8]
1670
- base = f"{raw}-{digest}"
1671
- return WorktreeManager._sanitize_component(base, max_len=80)
1672
-
1673
- def get_current_step(self) -> WorkflowStep | None:
1674
- """Get the current workflow step."""
1675
- if not self.workflow or not self.state:
1676
- return None
1677
-
1678
- for step in self.workflow.steps:
1679
- if step.id == self.state.current_step:
1680
- return step
1681
- return None
1682
-
1683
- def _default_target_file(self) -> Path | None:
1684
- """Get default target file path."""
1685
- # Try common locations
1686
- candidates = [
1687
- self.project_root / "src" / "app.py",
1688
- self.project_root / "app.py",
1689
- self.project_root / "main.py",
1690
- ]
1691
- for candidate in candidates:
1692
- if candidate.exists():
1693
- return candidate
1694
- return None
1695
-
1696
- async def _execute_step(
1697
- self, step: WorkflowStep, target_path: Path | None
1698
- ) -> None:
1699
- """
1700
- Execute a single workflow step using Cursor Skills.
1701
-
1702
- Args:
1703
- step: Workflow step to execute
1704
- target_path: Optional target file path
1705
- """
1706
- if not self.state or not self.workflow:
1707
- raise ValueError("Workflow not started")
1708
-
1709
- action = self._normalize_action(step.action)
1710
- agent_name = (step.agent or "").strip().lower()
1711
-
1712
- # Handle completion/finalization steps that don't require agent execution
1713
- if agent_name == "orchestrator" and action in ["finalize", "complete"]:
1714
- # Mark step as completed without executing an agent
1715
- step_execution = StepExecution(
1716
- step_id=step.id,
1717
- agent=agent_name,
1718
- action=action,
1719
- started_at=datetime.now(),
1720
- completed_at=datetime.now(),
1721
- status="completed",
1722
- )
1723
- self.state.step_executions.append(step_execution)
1724
- self._advance_step()
1725
- self.save_state()
1726
- return
1727
-
1728
- # Create step execution tracking
1729
- step_execution = StepExecution(
1730
- step_id=step.id,
1731
- agent=agent_name,
1732
- action=action,
1733
- started_at=datetime.now(),
1734
- )
1735
- self.state.step_executions.append(step_execution)
1736
-
1737
- try:
1738
- # Create worktree for this step
1739
- worktree_name = self._worktree_name_for_step(step.id)
1740
- worktree_path = await self.worktree_manager.create_worktree(
1741
- worktree_name=worktree_name
1742
- )
1743
-
1744
- # Copy artifacts from previous steps to worktree
1745
- artifacts_list = list(self.state.artifacts.values())
1746
- await self.worktree_manager.copy_artifacts(
1747
- worktree_path=worktree_path,
1748
- artifacts=artifacts_list,
1749
- )
1750
-
1751
- # Invoke Skill via SkillInvoker (direct execution)
1752
- result = await self.skill_invoker.invoke_skill(
1753
- agent_name=agent_name,
1754
- action=action,
1755
- step=step,
1756
- target_path=target_path,
1757
- worktree_path=worktree_path,
1758
- state=self.state,
1759
- )
1760
-
1761
- # Wait for Skill to complete (direct execution)
1762
- # Poll for artifacts or completion marker
1763
- import asyncio
1764
-
1765
- from .cursor_skill_helper import check_skill_completion
1766
-
1767
- max_wait_time = 3600 # 1 hour max wait
1768
- poll_interval = 2 # Check every 2 seconds
1769
- elapsed = 0
1770
-
1771
- print(f"Waiting for {agent_name}/{action} to complete...")
1772
- while elapsed < max_wait_time:
1773
- completion_status = check_skill_completion(
1774
- worktree_path=worktree_path,
1775
- expected_artifacts=step.creates,
1776
- )
1777
-
1778
- if completion_status["completed"]:
1779
- from ..core.unicode_safe import safe_print
1780
- safe_print(f"[OK] {agent_name}/{action} completed - found artifacts: {completion_status['found_artifacts']}")
1781
- break
1782
-
1783
- await asyncio.sleep(poll_interval)
1784
- elapsed += poll_interval
1785
-
1786
- # Print progress every 10 seconds
1787
- if elapsed % 10 == 0:
1788
- print(f" Still waiting... ({elapsed}s elapsed)")
1789
- else:
1790
- raise TimeoutError(
1791
- f"Skill {agent_name}/{action} did not complete within {max_wait_time}s. "
1792
- f"Expected artifacts: {step.creates}, Missing: {completion_status.get('missing_artifacts', [])}"
1793
- )
1794
-
1795
- # Extract artifacts from worktree
1796
- artifacts = await self.worktree_manager.extract_artifacts(
1797
- worktree_path=worktree_path,
1798
- step=step,
1799
- )
1800
-
1801
- # Update state with artifacts
1802
- for artifact in artifacts:
1803
- self.state.artifacts[artifact.name] = artifact
1804
-
1805
- # Story-level step handling (Phase 3: Story-Level Granularity)
1806
- # Verify acceptance criteria BEFORE marking step as completed
1807
- if step.metadata and step.metadata.get("story_id"):
1808
- self._handle_story_completion(step, artifacts, step_execution)
1809
-
1810
- # Update step execution (after story verification)
1811
- step_execution.completed_at = datetime.now()
1812
- step_execution.status = "completed"
1813
- step_execution.result = result
1814
-
1815
- # Remove the worktree on success (keep on failure for debugging)
1816
- try:
1817
- # Determine if we should delete the branch based on configuration
1818
- from ..core.config import load_config
1819
- config = load_config()
1820
- should_delete = (
1821
- config.workflow.branch_cleanup.delete_branches_on_cleanup
1822
- if (
1823
- config.workflow.branch_cleanup
1824
- and config.workflow.branch_cleanup.enabled
1825
- )
1826
- else True # Default to True for backward compatibility
1827
- )
1828
- await self.worktree_manager.remove_worktree(
1829
- worktree_name, delete_branch=should_delete
1830
- )
1831
- except Exception:
1832
- pass
1833
-
1834
- # Advance to next step
1835
- self._advance_step()
1836
-
1837
- except Exception as e:
1838
- step_execution.completed_at = datetime.now()
1839
- step_execution.status = "failed"
1840
- step_execution.error = str(e)
1841
- raise
1842
-
1843
- finally:
1844
- self.save_state()
1845
-
1846
- def _normalize_action(self, action: str) -> str:
1847
- """
1848
- Normalize action name to use underscores (Python convention).
1849
-
1850
- Converts hyphens to underscores so workflow YAMLs can use either format,
1851
- but handlers always receive underscore format (e.g., "write_code").
1852
- """
1853
- return action.replace("-", "_").lower()
1854
-
1855
- def _get_step_params(self, step: WorkflowStep, target_path: Path | None) -> dict[str, Any]:
1856
- """
1857
- Extract parameters for step execution.
1858
-
1859
- Args:
1860
- step: Workflow step
1861
- target_path: Optional target file path
1862
-
1863
- Returns:
1864
- Dictionary of parameters for command building
1865
- """
1866
- params: dict[str, Any] = {}
1867
-
1868
- # Add target file if provided
1869
- if target_path:
1870
- try:
1871
- # Try relative path first (most common case)
1872
- resolved_target = Path(target_path).resolve()
1873
- resolved_root = self.project_root.resolve()
1874
-
1875
- # Use is_relative_to if available (Python 3.9+)
1876
- try:
1877
- if resolved_target.is_relative_to(resolved_root):
1878
- params["target_file"] = str(resolved_target.relative_to(resolved_root))
1879
- else:
1880
- # Path is outside project root - use path normalizer
1881
- from ...core.path_normalizer import normalize_for_cli
1882
- params["target_file"] = normalize_for_cli(target_path, self.project_root)
1883
- except AttributeError:
1884
- # Python < 3.9 - use try/except
1885
- try:
1886
- params["target_file"] = str(resolved_target.relative_to(resolved_root))
1887
- except ValueError:
1888
- # Path is outside project root - use path normalizer
1889
- from ...core.path_normalizer import normalize_for_cli
1890
- params["target_file"] = normalize_for_cli(target_path, self.project_root)
1891
- except Exception as e:
1892
- # Fallback: use path normalizer for any error
1893
- from ...core.path_normalizer import normalize_for_cli
1894
- if self.logger:
1895
- self.logger.warning(f"Path conversion error: {e}. Using path normalizer.")
1896
- params["target_file"] = normalize_for_cli(target_path, self.project_root)
1897
-
1898
- # Add step metadata
1899
- if step.metadata:
1900
- params.update(step.metadata)
1901
-
1902
- # Add workflow variables
1903
- if self.state and self.state.variables:
1904
- # Include relevant variables (avoid exposing everything)
1905
- if "user_prompt" in self.state.variables:
1906
- params["user_prompt"] = self.state.variables["user_prompt"]
1907
- if "target_file" in self.state.variables:
1908
- params["target_file"] = self.state.variables["target_file"]
1909
-
1910
- return params
1911
-
1912
- def _handle_story_completion(
1913
- self, step: WorkflowStep, artifacts: list[Artifact], step_execution: StepExecution
1914
- ) -> None:
1915
- """
1916
- Handle story-level step completion (Phase 3: Story-Level Granularity).
1917
-
1918
- Verifies acceptance criteria, logs to progress.txt, and tracks story completion.
1919
-
1920
- Args:
1921
- step: Completed workflow step with story metadata
1922
- artifacts: Artifacts created by the step
1923
- step_execution: Step execution record to update if criteria fail
1924
- """
1925
- if not step.metadata:
1926
- return
1927
-
1928
- story_id = step.metadata.get("story_id")
1929
- story_title = step.metadata.get("story_title")
1930
- acceptance_criteria = step.metadata.get("acceptance_criteria", [])
1931
-
1932
- if not story_id:
1933
- return # Not a story-level step
1934
-
1935
- # Verify acceptance criteria if provided
1936
- passes = True
1937
- verification_result = None
1938
-
1939
- if acceptance_criteria:
1940
- from .acceptance_verifier import AcceptanceCriteriaVerifier
1941
-
1942
- # Convert artifacts list to dict
1943
- artifacts_dict = {art.name: art for art in artifacts}
1944
-
1945
- # Get code files from artifacts
1946
- code_files = []
1947
- for art in artifacts:
1948
- if art.path:
1949
- art_path = Path(art.path)
1950
- if art_path.exists() and art_path.suffix in [".py", ".js", ".ts", ".tsx", ".jsx", ".java", ".go", ".rs"]:
1951
- code_files.append(art_path)
1952
-
1953
- # Verify criteria
1954
- verifier = AcceptanceCriteriaVerifier()
1955
- verification_result = verifier.verify(
1956
- criteria=acceptance_criteria,
1957
- artifacts=artifacts_dict,
1958
- code_files=code_files if code_files else None,
1959
- )
1960
- passes = verification_result.get("all_passed", True)
1961
-
1962
- # Store verification result in state variables
1963
- if "story_verifications" not in self.state.variables:
1964
- self.state.variables["story_verifications"] = {}
1965
- self.state.variables["story_verifications"][story_id] = verification_result
1966
-
1967
- # Track story completion in state.variables
1968
- if "story_completions" not in self.state.variables:
1969
- self.state.variables["story_completions"] = {}
1970
- self.state.variables["story_completions"][story_id] = passes
1971
-
1972
- # Log to progress.txt if progress logger is available
1973
- try:
1974
- from .progress_logger import ProgressLogger
1975
-
1976
- progress_file = self.project_root / ".tapps-agents" / "progress.txt"
1977
- progress_logger = ProgressLogger(progress_file)
1978
-
1979
- # Extract files changed
1980
- files_changed = [art.path for art in artifacts if art.path]
1981
-
1982
- # Extract learnings from verification result
1983
- learnings = []
1984
- if verification_result and not passes:
1985
- failed_criteria = [
1986
- r["criterion"]
1987
- for r in verification_result.get("results", [])
1988
- if not r.get("passed", False)
1989
- ]
1990
- if failed_criteria:
1991
- learnings.append(f"Acceptance criteria not met: {', '.join(failed_criteria)}")
1992
-
1993
- # Log story completion
1994
- progress_logger.log_story_completion(
1995
- story_id=story_id,
1996
- story_title=story_title or step.id,
1997
- passes=passes,
1998
- files_changed=files_changed if files_changed else None,
1999
- learnings=learnings if learnings else None,
2000
- )
2001
- except Exception:
2002
- # Don't fail workflow if progress logging fails
2003
- import logging
2004
- logger = logging.getLogger(__name__)
2005
- logger.warning("Failed to log story completion to progress.txt", exc_info=True)
2006
-
2007
- # If acceptance criteria not met, mark step as failed and raise exception
2008
- if not passes:
2009
- step_execution.status = "failed"
2010
- step_execution.error = f"Acceptance criteria not met for story {story_id}"
2011
- # Raise exception to prevent advancing to next step
2012
- raise ValueError(f"Story {story_id} failed acceptance criteria verification")
2013
-
2014
- def _advance_step(self) -> None:
2015
- """Advance to the next workflow step."""
2016
- if not self.workflow or not self.state:
2017
- return
2018
-
2019
- # Use auto-progression if enabled
2020
- if self.auto_progression.should_auto_progress():
2021
- current_step = self.get_current_step()
2022
- if current_step:
2023
- # Get progression decision
2024
- step_execution = next(
2025
- (se for se in self.state.step_executions if se.step_id == current_step.id),
2026
- None
2027
- )
2028
- if step_execution:
2029
- review_result = None
2030
- if current_step.agent == "reviewer":
2031
- review_result = self.state.variables.get("reviewer_result")
2032
-
2033
- decision = self.auto_progression.handle_step_completion(
2034
- step=current_step,
2035
- state=self.state,
2036
- step_execution=step_execution,
2037
- review_result=review_result,
2038
- )
2039
-
2040
- next_step_id = self.auto_progression.get_next_step_id(
2041
- step=current_step,
2042
- decision=decision,
2043
- workflow_steps=self.workflow.steps,
2044
- )
2045
-
2046
- if next_step_id:
2047
- self.state.current_step = next_step_id
2048
- else:
2049
- # Workflow complete
2050
- self.state.status = "completed"
2051
- self.state.completed_at = datetime.now()
2052
- self.state.current_step = None
2053
- return
2054
-
2055
- # Fallback to sequential progression
2056
- current_index = None
2057
- for i, step in enumerate(self.workflow.steps):
2058
- if step.id == self.state.current_step:
2059
- current_index = i
2060
- break
2061
-
2062
- if current_index is None:
2063
- self.state.status = "failed"
2064
- self.state.error = f"Current step {self.state.current_step} not found"
2065
- return
2066
-
2067
- # Move to next step
2068
- if current_index + 1 < len(self.workflow.steps):
2069
- self.state.current_step = self.workflow.steps[current_index + 1].id
2070
- else:
2071
- # All steps completed
2072
- self.state.status = "completed"
2073
- self.state.completed_at = datetime.now()
2074
- self.state.current_step = None
2075
-
2076
- def get_progression_status(self) -> dict[str, Any]:
2077
- """
2078
- Get current progression status and visibility information.
2079
-
2080
- Returns:
2081
- Dictionary with progression status
2082
- """
2083
- if not self.workflow or not self.state:
2084
- return {"status": "not_started"}
2085
-
2086
- return self.auto_progression.get_progression_status(
2087
- state=self.state,
2088
- workflow_steps=self.workflow.steps,
2089
- )
2090
-
2091
- def get_progression_history(self, step_id: str | None = None) -> list[dict[str, Any]]:
2092
- """
2093
- Get progression history.
2094
-
2095
- Args:
2096
- step_id: Optional step ID to filter by
2097
-
2098
- Returns:
2099
- List of progression history entries
2100
- """
2101
- history = self.auto_progression.get_progression_history(step_id=step_id)
2102
- return [
2103
- {
2104
- "step_id": h.step_id,
2105
- "timestamp": h.timestamp.isoformat(),
2106
- "action": h.action.value,
2107
- "reason": h.reason,
2108
- "gate_result": h.gate_result,
2109
- "metadata": h.metadata,
2110
- }
2111
- for h in history
2112
- ]
2113
-
2114
- def pause_workflow(self) -> None:
2115
- """
2116
- Pause workflow execution.
2117
-
2118
- Epic 10: Progression Control
2119
- """
2120
- if not self.state:
2121
- raise ValueError("Workflow not started")
2122
-
2123
- if self.state.status == "running":
2124
- self.state.status = "paused"
2125
- self.save_state()
2126
- if self.logger:
2127
- self.logger.info("Workflow paused by user")
2128
- self.auto_progression.record_progression(
2129
- step_id=self.state.current_step or "unknown",
2130
- action=ProgressionAction.PAUSE,
2131
- reason="Workflow paused by user",
2132
- )
2133
-
2134
- def resume_workflow(self) -> None:
2135
- """
2136
- Resume paused workflow execution.
2137
-
2138
- Epic 10: Progression Control
2139
- """
2140
- if not self.state:
2141
- raise ValueError("Workflow not started")
2142
-
2143
- if self.state.status == "paused":
2144
- self.state.status = "running"
2145
- self.save_state()
2146
- if self.logger:
2147
- self.logger.info("Workflow resumed by user")
2148
- self.auto_progression.record_progression(
2149
- step_id=self.state.current_step or "unknown",
2150
- action=ProgressionAction.CONTINUE,
2151
- reason="Workflow resumed by user",
2152
- )
2153
-
2154
- def skip_step(self, step_id: str | None = None) -> None:
2155
- """
2156
- Skip a workflow step.
2157
-
2158
- Args:
2159
- step_id: Step ID to skip (defaults to current step)
2160
-
2161
- Epic 10: Progression Control
2162
- """
2163
- if not self.state or not self.workflow:
2164
- raise ValueError("Workflow not started")
2165
-
2166
- step_id = step_id or self.state.current_step
2167
- if not step_id:
2168
- raise ValueError("No step to skip")
2169
-
2170
- # Find the step
2171
- step = next((s for s in self.workflow.steps if s.id == step_id), None)
2172
- if not step:
2173
- raise ValueError(f"Step {step_id} not found")
2174
-
2175
- # Record skip in progression history
2176
- self.auto_progression.record_progression(
2177
- step_id=step_id,
2178
- action=ProgressionAction.SKIP,
2179
- reason="Step skipped by user",
2180
- )
2181
-
2182
- # Advance to next step
2183
- if step.next:
2184
- self.state.current_step = step.next
2185
- self.save_state()
2186
- if self.logger:
2187
- self.logger.info(f"Step {step_id} skipped, advancing to {step.next}")
2188
- else:
2189
- # No next step - workflow complete
2190
- self.state.status = "completed"
2191
- self.state.completed_at = datetime.now()
2192
- self.state.current_step = None
2193
- self.save_state()
2194
- if self.logger:
2195
- self.logger.info(f"Step {step_id} skipped, workflow completed")
2196
-
1
+ """
2
+ Cursor-Native Workflow Executor.
3
+
4
+ This module provides a Cursor-native execution model that uses Cursor Skills
5
+ and direct execution for LLM operations.
6
+ """
7
+
8
+ # @ai-prime-directive: This file implements the Cursor-native workflow executor for Cursor Skills integration.
9
+ # This executor is used when running in Cursor mode (TAPPS_AGENTS_MODE=cursor) and invokes Cursor Skills
10
+ # for LLM operations instead of direct API calls. Do not modify the Skill invocation pattern without
11
+ # updating Cursor Skills integration and tests.
12
+
13
+ # @ai-constraints:
14
+ # - Must only execute in Cursor mode (is_cursor_mode() must return True)
15
+ # - Must use SkillInvoker for all LLM operations - do not make direct API calls
16
+ # - Workflow state must be compatible with WorkflowExecutor for cross-mode compatibility
17
+ # - Performance: Skill invocation should complete in <5s for typical operations
18
+ # - Must maintain backward compatibility with WorkflowExecutor workflow definitions
19
+
20
+ # @note[2026-02-03]: Equal platform support policy per ADR-002.
21
+ # The framework provides equal support for Claude Desktop, Cursor IDE, and Claude Code CLI.
22
+ # Uses handler-first execution (AgentHandlerRegistry) before platform-specific features.
23
+ # See docs/architecture/decisions/ADR-002-equal-platform-support.md
24
+
25
+ from __future__ import annotations
26
+
27
+ import asyncio
28
+ import hashlib
29
+ import os
30
+ import traceback
31
+ from collections.abc import AsyncIterator
32
+ from contextlib import asynccontextmanager
33
+ from dataclasses import asdict
34
+ from datetime import datetime
35
+ from pathlib import Path
36
+ from typing import Any
37
+
38
+ from ..core.project_profile import (
39
+ ProjectProfile,
40
+ ProjectProfileDetector,
41
+ load_project_profile,
42
+ save_project_profile,
43
+ )
44
+ from ..core.runtime_mode import is_cursor_mode
45
+ from .auto_progression import AutoProgressionManager, ProgressionAction
46
+ from .checkpoint_manager import (
47
+ CheckpointConfig,
48
+ CheckpointFrequency,
49
+ WorkflowCheckpointManager,
50
+ )
51
+ from .error_recovery import ErrorContext, ErrorRecoveryManager
52
+ from .event_bus import FileBasedEventBus
53
+ from .events import EventType, WorkflowEvent
54
+ from .logging_helper import WorkflowLogger
55
+ from .marker_writer import MarkerWriter
56
+ from .models import Artifact, StepExecution, StepResult, Workflow, WorkflowState, WorkflowStep
57
+ from .parallel_executor import ParallelStepExecutor
58
+ from .progress_manager import ProgressUpdateManager
59
+ from .skill_invoker import SkillInvoker
60
+ from .state_manager import AdvancedStateManager
61
+ from .state_persistence_config import StatePersistenceConfigManager
62
+ from .worktree_manager import WorktreeManager
63
+
64
+
65
+ class CursorWorkflowExecutor:
66
+ """
67
+ Cursor-native workflow executor that uses Skills.
68
+
69
+ This executor is used when running in Cursor mode (TAPPS_AGENTS_MODE=cursor).
70
+ It invokes Cursor Skills for LLM operations.
71
+ """
72
+
73
+ def __init__(
74
+ self,
75
+ project_root: Path | None = None,
76
+ expert_registry: Any | None = None,
77
+ auto_mode: bool = False,
78
+ ):
79
+ """
80
+ Initialize Cursor-native workflow executor.
81
+
82
+ Args:
83
+ project_root: Root directory for the project
84
+ expert_registry: Optional ExpertRegistry instance for expert consultation
85
+ auto_mode: Whether to run in fully automated mode (no prompts)
86
+ """
87
+ if not is_cursor_mode():
88
+ raise RuntimeError(
89
+ "CursorWorkflowExecutor can only be used in Cursor mode. "
90
+ "Use WorkflowExecutor for headless mode."
91
+ )
92
+
93
+ self.project_root = project_root or Path.cwd()
94
+ self.state: WorkflowState | None = None
95
+ self.workflow: Workflow | None = None
96
+ self.expert_registry = expert_registry
97
+ self.auto_mode = auto_mode
98
+ self.skill_invoker = SkillInvoker(
99
+ project_root=self.project_root, use_api=True
100
+ )
101
+ self.worktree_manager = WorktreeManager(project_root=self.project_root)
102
+ self.project_profile: ProjectProfile | None = None
103
+ self.parallel_executor = ParallelStepExecutor(max_parallel=8, default_timeout_seconds=3600.0)
104
+ self.logger: WorkflowLogger | None = None # Initialized in start() with workflow_id
105
+ self.progress_manager: ProgressUpdateManager | None = None # Initialized in start() with workflow
106
+
107
+ # Issue fix: Support for continue-from and skip-steps flags
108
+ self.continue_from: str | None = None
109
+ self.skip_steps: list[str] = []
110
+ self.print_paths: bool = True # Issue fix: Print artifact paths after each step
111
+
112
+ # Initialize event bus for event-driven communication (Phase 2)
113
+ self.event_bus = FileBasedEventBus(project_root=self.project_root)
114
+
115
+ # Initialize auto-progression manager (Epic 10)
116
+ auto_progression_enabled = os.getenv("TAPPS_AGENTS_AUTO_PROGRESSION", "true").lower() == "true"
117
+ self.auto_progression = AutoProgressionManager(
118
+ auto_progression_enabled=auto_progression_enabled,
119
+ auto_retry_enabled=True,
120
+ max_retries=3,
121
+ )
122
+
123
+ # Initialize error recovery manager (Epic 14)
124
+ error_recovery_enabled = os.getenv("TAPPS_AGENTS_ERROR_RECOVERY", "true").lower() == "true"
125
+ self.error_recovery = ErrorRecoveryManager(
126
+ enable_auto_retry=error_recovery_enabled,
127
+ max_retries=3,
128
+ ) if error_recovery_enabled else None
129
+
130
+ # Initialize state persistence configuration manager (Epic 12 - Story 12.6)
131
+ self.state_config_manager = StatePersistenceConfigManager(project_root=self.project_root)
132
+
133
+ # Initialize checkpoint manager (Epic 12)
134
+ # Use configuration from state persistence config if available
135
+ state_config = self.state_config_manager.config
136
+ if state_config and state_config.checkpoint:
137
+ checkpoint_frequency = state_config.checkpoint.mode
138
+ checkpoint_interval = state_config.checkpoint.interval
139
+ checkpoint_enabled = state_config.checkpoint.enabled
140
+ else:
141
+ # Fall back to environment variables
142
+ checkpoint_frequency = os.getenv("TAPPS_AGENTS_CHECKPOINT_FREQUENCY", "every_step")
143
+ checkpoint_interval = int(os.getenv("TAPPS_AGENTS_CHECKPOINT_INTERVAL", "1"))
144
+ checkpoint_enabled = os.getenv("TAPPS_AGENTS_CHECKPOINT_ENABLED", "true").lower() == "true"
145
+
146
+ try:
147
+ frequency = CheckpointFrequency(checkpoint_frequency)
148
+ except ValueError:
149
+ frequency = CheckpointFrequency.EVERY_STEP
150
+
151
+ checkpoint_config = CheckpointConfig(
152
+ frequency=frequency,
153
+ interval=checkpoint_interval,
154
+ enabled=checkpoint_enabled,
155
+ )
156
+ self.checkpoint_manager = WorkflowCheckpointManager(config=checkpoint_config)
157
+
158
+ # Initialize state manager
159
+ # Use storage location from config
160
+ if state_config and state_config.enabled:
161
+ state_dir = self.state_config_manager.get_storage_path()
162
+ compression = state_config.compression
163
+ else:
164
+ state_dir = self._state_dir()
165
+ compression = False
166
+ self.state_manager = AdvancedStateManager(state_dir, compression=compression)
167
+
168
+ # Always use direct execution via Skills (Background Agents removed)
169
+
170
+ # Initialize marker writer for durable step completion tracking
171
+ self.marker_writer = MarkerWriter(project_root=self.project_root)
172
+
173
+ def _state_dir(self) -> Path:
174
+ """Get state directory path."""
175
+ return self.project_root / ".tapps-agents" / "workflow-state"
176
+
177
+ def _print_step_artifacts(
178
+ self,
179
+ step: Any,
180
+ artifacts: dict[str, Any],
181
+ step_execution: Any,
182
+ ) -> None:
183
+ """
184
+ Print artifact paths after step completion (Issue fix: Hidden workflow state).
185
+
186
+ Provides clear visibility into where workflow outputs are saved.
187
+ """
188
+ from ..core.unicode_safe import safe_print
189
+
190
+ duration = step_execution.duration_seconds if step_execution else 0
191
+ duration_str = f"{duration:.1f}s" if duration else "N/A"
192
+
193
+ safe_print(f"\n[OK] Step '{step.id}' completed ({duration_str})")
194
+
195
+ if artifacts:
196
+ print(" 📄 Artifacts created:")
197
+ for art_name, art_data in artifacts.items():
198
+ if isinstance(art_data, dict):
199
+ path = art_data.get("path", "")
200
+ if path:
201
+ print(f" - {path}")
202
+ else:
203
+ print(f" - {art_name} (in-memory)")
204
+ else:
205
+ print(f" - {art_name}")
206
+
207
+ # Also print workflow state location for reference
208
+ if self.state:
209
+ state_dir = self._state_dir()
210
+ print(f" 📁 State: {state_dir / self.state.workflow_id}")
211
+
212
+ def _profile_project(self) -> None:
213
+ """
214
+ Perform project profiling before workflow execution.
215
+
216
+ Loads existing profile if available, otherwise detects and saves a new one.
217
+ The profile is stored in workflow state and passed to all Skills via context.
218
+ """
219
+ # Try to load existing profile first
220
+ self.project_profile = load_project_profile(project_root=self.project_root)
221
+
222
+ # If no profile exists, detect and save it
223
+ if not self.project_profile:
224
+ detector = ProjectProfileDetector(project_root=self.project_root)
225
+ self.project_profile = detector.detect_profile()
226
+ save_project_profile(profile=self.project_profile, project_root=self.project_root)
227
+
228
+ async def start(
229
+ self,
230
+ workflow: Workflow,
231
+ user_prompt: str | None = None,
232
+ ) -> WorkflowState:
233
+ """
234
+ Start a new workflow execution.
235
+
236
+ Also executes state cleanup if configured for "on_startup" schedule.
237
+
238
+ Args:
239
+ workflow: Workflow to execute
240
+ user_prompt: Optional user prompt for the workflow
241
+
242
+ Returns:
243
+ Initial workflow state
244
+ """
245
+ # Execute cleanup on startup if configured (Epic 12 - Story 12.6)
246
+ if self.state_config_manager.config and self.state_config_manager.config.cleanup:
247
+ if self.state_config_manager.config.cleanup.cleanup_schedule == "on_startup":
248
+ cleanup_result = self.state_config_manager.execute_cleanup()
249
+ if self.logger:
250
+ self.logger.info(
251
+ f"State cleanup on startup: {cleanup_result}",
252
+ cleanup_result=cleanup_result,
253
+ )
254
+
255
+ self.workflow = workflow
256
+
257
+ # Check workflow metadata for auto-execution override (per-workflow config)
258
+ # Always use direct execution via Skills (Background Agents removed)
259
+
260
+ # Use consistent workflow_id format: {workflow.id}-{timestamp}
261
+ # Include microseconds to ensure uniqueness for parallel workflows (BUG-001 fix)
262
+ workflow_id = f"{workflow.id}-{datetime.now().strftime('%Y%m%d-%H%M%S-%f')}"
263
+
264
+ # Initialize logger with workflow_id for correlation
265
+ self.logger = WorkflowLogger(workflow_id=workflow_id)
266
+
267
+ # Perform project profiling before workflow execution
268
+ self._profile_project()
269
+
270
+ self.state = WorkflowState(
271
+ workflow_id=workflow_id,
272
+ started_at=datetime.now(),
273
+ current_step=workflow.steps[0].id if workflow.steps else None,
274
+ status="running",
275
+ variables={
276
+ "user_prompt": user_prompt or "",
277
+ "project_profile": self.project_profile.to_dict() if self.project_profile else None,
278
+ "workflow_name": workflow.name, # Store in variables for reference
279
+ },
280
+ )
281
+
282
+ # Beads: create workflow issue when enabled (store for close in run finally)
283
+ try:
284
+ from ..core.config import load_config
285
+ from ..beads import require_beads
286
+ from ..simple_mode.beads_hooks import create_workflow_issue
287
+
288
+ config = load_config(self.project_root / ".tapps-agents" / "config.yaml")
289
+ require_beads(config, self.project_root)
290
+ state_vars = self.state.variables or {}
291
+ # On resume: reuse id from .beads_issue_id file (same layout as *build)
292
+ state_dir = self._state_dir()
293
+ wf_dir = state_dir / workflow_id
294
+ beads_file = wf_dir / ".beads_issue_id"
295
+ if beads_file.exists():
296
+ try:
297
+ bid = beads_file.read_text(encoding="utf-8").strip() or None
298
+ if bid:
299
+ state_vars["_beads_issue_id"] = bid
300
+ self.state.variables = state_vars
301
+ except OSError:
302
+ pass
303
+ if "_beads_issue_id" not in state_vars:
304
+ bid = create_workflow_issue(
305
+ self.project_root,
306
+ config,
307
+ workflow.name,
308
+ user_prompt or state_vars.get("target_file", "") or "",
309
+ )
310
+ if bid:
311
+ state_vars["_beads_issue_id"] = bid
312
+ self.state.variables = state_vars
313
+ try:
314
+ wf_dir.mkdir(parents=True, exist_ok=True)
315
+ beads_file.write_text(bid, encoding="utf-8")
316
+ except OSError:
317
+ pass
318
+ except Exception as e:
319
+ from ..beads import BeadsRequiredError
320
+
321
+ if isinstance(e, BeadsRequiredError):
322
+ raise
323
+ pass # log-and-continue: do not fail start for other beads errors
324
+
325
+ # Generate and save execution plan (Epic 6 - Story 6.7)
326
+ try:
327
+ from .execution_plan import generate_execution_plan, save_execution_plan
328
+ execution_plan = generate_execution_plan(workflow)
329
+ state_dir = self._state_dir()
330
+ plan_path = save_execution_plan(execution_plan, state_dir, workflow_id)
331
+ if self.logger:
332
+ self.logger.info(
333
+ f"Execution plan generated: {plan_path}",
334
+ execution_plan_path=str(plan_path),
335
+ )
336
+ except Exception as e:
337
+ # Don't fail workflow start if execution plan generation fails
338
+ if self.logger:
339
+ self.logger.warning(f"Failed to generate execution plan: {e}")
340
+
341
+ self.logger.info(
342
+ "Workflow started",
343
+ workflow_name=workflow.name,
344
+ workflow_version=workflow.version,
345
+ step_count=len(workflow.steps),
346
+ )
347
+
348
+ # Publish workflow started event (Phase 2)
349
+ await self.event_bus.publish(
350
+ WorkflowEvent(
351
+ event_type=EventType.WORKFLOW_STARTED,
352
+ workflow_id=workflow_id,
353
+ step_id=None,
354
+ data={
355
+ "workflow_name": workflow.name,
356
+ "workflow_version": workflow.version,
357
+ "step_count": len(workflow.steps),
358
+ "user_prompt": user_prompt or "",
359
+ },
360
+ timestamp=datetime.now(),
361
+ correlation_id=workflow_id,
362
+ )
363
+ )
364
+
365
+ # Initialize progress update manager
366
+ self.progress_manager = ProgressUpdateManager(
367
+ workflow=workflow,
368
+ state=self.state,
369
+ project_root=self.project_root,
370
+ enable_updates=True,
371
+ )
372
+ # Connect event bus to status monitor (Phase 2)
373
+ if self.progress_manager.status_monitor:
374
+ self.progress_manager.status_monitor.event_bus = self.event_bus
375
+ # Start progress monitoring (non-blocking)
376
+ import asyncio
377
+ try:
378
+ asyncio.get_running_loop()
379
+ asyncio.create_task(self.progress_manager.start())
380
+ except RuntimeError:
381
+ # No running event loop - progress manager will start when event loop is available
382
+ pass
383
+
384
+ self.save_state()
385
+
386
+ # Generate task manifest (Epic 7)
387
+ self._generate_manifest()
388
+
389
+ return self.state
390
+
391
+ def save_state(self) -> None:
392
+ """Save workflow state to disk."""
393
+ if not self.state:
394
+ return
395
+
396
+ def _make_json_serializable(obj: Any) -> Any:
397
+ """Recursively convert objects to JSON-serializable format."""
398
+ # Handle ProjectProfile objects
399
+ if hasattr(obj, "to_dict") and hasattr(obj, "compliance_requirements"):
400
+ try:
401
+ from ..core.project_profile import ProjectProfile
402
+ if isinstance(obj, ProjectProfile):
403
+ return obj.to_dict()
404
+ except (ImportError, AttributeError):
405
+ pass
406
+
407
+ # Handle ComplianceRequirement objects
408
+ if hasattr(obj, "name") and hasattr(obj, "confidence") and hasattr(obj, "indicators"):
409
+ try:
410
+ from ..core.project_profile import ComplianceRequirement
411
+ if isinstance(obj, ComplianceRequirement):
412
+ return asdict(obj)
413
+ except (ImportError, AttributeError):
414
+ pass
415
+
416
+ # Handle dictionaries recursively
417
+ if isinstance(obj, dict):
418
+ return {k: _make_json_serializable(v) for k, v in obj.items()}
419
+
420
+ # Handle lists recursively
421
+ if isinstance(obj, list):
422
+ return [_make_json_serializable(item) for item in obj]
423
+
424
+ # Handle other non-serializable types
425
+ try:
426
+ import json
427
+ json.dumps(obj)
428
+ return obj
429
+ except (TypeError, ValueError):
430
+ # For non-serializable types, convert to string as fallback
431
+ return str(obj)
432
+
433
+ state_file = self._state_dir() / f"{self.state.workflow_id}.json"
434
+ state_file.parent.mkdir(parents=True, exist_ok=True)
435
+
436
+ # Convert variables to JSON-serializable format
437
+ variables = self.state.variables or {}
438
+ serializable_variables = _make_json_serializable(variables)
439
+
440
+ # Convert to dict for JSON serialization
441
+ state_dict = {
442
+ "workflow_id": self.state.workflow_id,
443
+ "status": self.state.status,
444
+ "current_step": self.state.current_step,
445
+ "started_at": self.state.started_at.isoformat() if self.state.started_at else None,
446
+ "completed_steps": self.state.completed_steps,
447
+ "skipped_steps": self.state.skipped_steps,
448
+ "variables": serializable_variables,
449
+ "artifacts": {
450
+ name: {
451
+ "name": a.name,
452
+ "path": a.path,
453
+ "status": a.status,
454
+ "created_by": a.created_by,
455
+ "created_at": a.created_at.isoformat() if a.created_at else None,
456
+ "metadata": a.metadata,
457
+ }
458
+ for name, a in self.state.artifacts.items()
459
+ },
460
+ "step_executions": [
461
+ {
462
+ "step_id": se.step_id,
463
+ "agent": se.agent,
464
+ "action": se.action,
465
+ "started_at": se.started_at.isoformat() if se.started_at else None,
466
+ "completed_at": se.completed_at.isoformat() if se.completed_at else None,
467
+ "duration_seconds": se.duration_seconds,
468
+ "status": se.status,
469
+ "error": se.error,
470
+ }
471
+ for se in self.state.step_executions
472
+ ],
473
+ "error": self.state.error,
474
+ }
475
+
476
+ from .file_utils import atomic_write_json
477
+
478
+ atomic_write_json(state_file, state_dict, indent=2)
479
+
480
+ # Also save to history
481
+ history_dir = state_file.parent / "history"
482
+ history_dir.mkdir(exist_ok=True)
483
+ history_file = history_dir / state_file.name
484
+ atomic_write_json(history_file, state_dict, indent=2)
485
+
486
+ # Generate task manifest (Epic 7)
487
+ self._generate_manifest()
488
+
489
+ def _generate_manifest(self) -> None:
490
+ """
491
+ Generate and save task manifest (Epic 7).
492
+
493
+ Generates manifest on workflow start, step completion, and state save.
494
+ """
495
+ if not self.workflow or not self.state:
496
+ return
497
+
498
+ try:
499
+ from .manifest import (
500
+ generate_manifest,
501
+ save_manifest,
502
+ sync_manifest_to_project_root,
503
+ )
504
+
505
+ # Generate manifest
506
+ manifest_content = generate_manifest(self.workflow, self.state)
507
+
508
+ # Save to state directory
509
+ state_dir = self._state_dir()
510
+ manifest_path = save_manifest(manifest_content, state_dir, self.state.workflow_id)
511
+
512
+ # Optional: Sync to project root if configured
513
+ sync_enabled = os.getenv("TAPPS_AGENTS_MANIFEST_SYNC", "false").lower() == "true"
514
+ if sync_enabled:
515
+ sync_path = sync_manifest_to_project_root(manifest_content, self.project_root)
516
+ if self.logger:
517
+ self.logger.debug(
518
+ "Task manifest synced to project root",
519
+ manifest_path=str(manifest_path),
520
+ sync_path=str(sync_path),
521
+ )
522
+ elif self.logger:
523
+ self.logger.debug(
524
+ "Task manifest generated",
525
+ manifest_path=str(manifest_path),
526
+ )
527
+ except Exception as e:
528
+ # Don't fail workflow if manifest generation fails
529
+ if self.logger:
530
+ self.logger.warning(
531
+ "Failed to generate task manifest",
532
+ error=str(e),
533
+ )
534
+
535
+ async def run(
536
+ self,
537
+ workflow: Workflow | None = None,
538
+ target_file: str | None = None,
539
+ max_steps: int = 100,
540
+ ) -> WorkflowState:
541
+ """
542
+ Run workflow to completion with timeout protection.
543
+
544
+ Args:
545
+ workflow: Workflow to execute (if not already loaded)
546
+ target_file: Optional target file path
547
+ max_steps: Maximum number of steps to execute
548
+
549
+ Returns:
550
+ Final workflow state
551
+ """
552
+ import asyncio
553
+ from datetime import datetime
554
+
555
+ from tapps_agents.core.config import load_config
556
+
557
+ config = load_config()
558
+ # Use 2x step timeout for overall workflow timeout (default: 2 hours)
559
+ workflow_timeout = getattr(config.workflow, 'timeout_seconds', 3600.0) * 2
560
+
561
+ async def _run_workflow_inner() -> WorkflowState:
562
+ """Inner function to wrap actual execution for timeout protection."""
563
+ # Initialize execution
564
+ target_path = await self._initialize_run(workflow, target_file)
565
+
566
+ # Log workflow start
567
+ start_time = datetime.now()
568
+ if self.logger:
569
+ self.logger.info(
570
+ "Starting workflow execution",
571
+ extra={
572
+ "workflow_id": self.state.workflow_id if self.state else None,
573
+ "workflow_name": workflow.name if workflow else (self.workflow.name if self.workflow else None),
574
+ "max_steps": max_steps,
575
+ "total_steps": len(workflow.steps) if workflow else (len(self.workflow.steps) if self.workflow else 0),
576
+ "workflow_timeout": workflow_timeout,
577
+ }
578
+ )
579
+
580
+ # Use parallel execution for independent steps
581
+ steps_executed = 0
582
+ completed_step_ids = set(self.state.completed_steps)
583
+ running_step_ids: set[str] = set()
584
+
585
+ while (
586
+ self.state
587
+ and self.workflow
588
+ and self.state.status == "running"
589
+ ):
590
+ if steps_executed >= max_steps:
591
+ self._handle_max_steps_exceeded(max_steps)
592
+ break
593
+
594
+ # Find steps ready to execute (dependencies met)
595
+ ready_steps = self._find_ready_steps(
596
+ completed_step_ids, running_step_ids
597
+ )
598
+
599
+ if not ready_steps:
600
+ if self._handle_no_ready_steps(completed_step_ids):
601
+ break
602
+ continue
603
+
604
+ # Execute ready steps in parallel
605
+ running_step_ids.update(step.id for step in ready_steps)
606
+
607
+ # Store completed steps with their results for dependency validation (BUG-003B)
608
+ completed_step_results: dict[str, StepResult] = {}
609
+
610
+ async def execute_step_wrapper(step: WorkflowStep) -> dict[str, Any]:
611
+ """Wrapper to adapt _execute_step_for_parallel to parallel executor interface (BUG-003B fix)."""
612
+ # Validate dependencies before execution (BUG-003B)
613
+ can_execute, skip_reason = self._can_execute_step(step, completed_step_results)
614
+
615
+ if not can_execute:
616
+ # Create skipped StepResult
617
+ now = datetime.now()
618
+ skipped_result = StepResult(
619
+ step_id=step.id,
620
+ status="skipped",
621
+ success=False,
622
+ duration=0.0,
623
+ started_at=now,
624
+ completed_at=now,
625
+ skip_reason=skip_reason,
626
+ artifacts=[],
627
+ )
628
+ completed_step_results[step.id] = skipped_result
629
+
630
+ # Print skip message
631
+ from ..core.unicode_safe import safe_print
632
+ safe_print(f"\n⏭️ Skipping step '{step.id}': {skip_reason}\n")
633
+
634
+ # Return empty artifacts (step was skipped)
635
+ return {}
636
+
637
+ # Execute step
638
+ step_result = await self._execute_step_for_parallel(step=step, target_path=target_path)
639
+ completed_step_results[step.id] = step_result
640
+
641
+ # Check if step failed (BUG-003B)
642
+ if not step_result.success:
643
+ # Check if step is required
644
+ is_required = step.condition == "required"
645
+
646
+ if is_required:
647
+ # Halt workflow for required step failure
648
+ from ..core.unicode_safe import safe_print
649
+ safe_print(
650
+ f"\n❌ Workflow halted: Required step '{step.id}' failed\n"
651
+ f"Error: {step_result.error}\n"
652
+ )
653
+
654
+ # Update workflow status
655
+ if self.state:
656
+ self.state.status = "blocked"
657
+ self.state.error = step_result.error
658
+
659
+ # Raise error to stop execution
660
+ raise RuntimeError(step_result.error or "Step failed")
661
+
662
+ # Convert StepResult artifacts (list of names) back to dict format for compatibility
663
+ artifacts_dict: dict[str, dict[str, Any]] = {}
664
+ for artifact_name in step_result.artifacts:
665
+ artifacts_dict[artifact_name] = {
666
+ "name": artifact_name,
667
+ "path": artifact_name,
668
+ "status": "complete",
669
+ "created_by": step.id,
670
+ "created_at": step_result.completed_at.isoformat(),
671
+ }
672
+
673
+ return artifacts_dict
674
+
675
+ try:
676
+ results = await self.parallel_executor.execute_parallel(
677
+ steps=ready_steps,
678
+ execute_fn=execute_step_wrapper,
679
+ state=self.state,
680
+ )
681
+
682
+ # Process results and update state
683
+ should_break = await self._process_parallel_results(
684
+ results, completed_step_ids, running_step_ids
685
+ )
686
+ if should_break:
687
+ break
688
+
689
+ steps_executed += len(ready_steps)
690
+ self.save_state()
691
+
692
+ # Generate task manifest after step completion (Epic 7)
693
+ self._generate_manifest()
694
+
695
+ # Log progress every 10 steps
696
+ if steps_executed % 10 == 0 and self.logger:
697
+ elapsed = (datetime.now() - start_time).total_seconds()
698
+ self.logger.info(
699
+ f"Workflow progress: {steps_executed} steps executed in {elapsed:.1f}s",
700
+ extra={
701
+ "steps_executed": steps_executed,
702
+ "completed_steps": len(completed_step_ids),
703
+ "total_steps": len(self.workflow.steps),
704
+ "elapsed_seconds": elapsed,
705
+ }
706
+ )
707
+
708
+ except Exception as e:
709
+ self._handle_execution_error(e)
710
+ break
711
+
712
+ return await self._finalize_run(completed_step_ids)
713
+
714
+ # Wrap execution with timeout
715
+ try:
716
+ return await asyncio.wait_for(
717
+ _run_workflow_inner(),
718
+ timeout=workflow_timeout
719
+ )
720
+ except TimeoutError:
721
+ if self.state:
722
+ self.state.status = "failed"
723
+ self.state.error = f"Workflow timeout after {workflow_timeout}s"
724
+ self.save_state()
725
+ if self.logger:
726
+ self.logger.error(
727
+ f"Workflow execution exceeded {workflow_timeout}s timeout",
728
+ extra={
729
+ "workflow_id": self.state.workflow_id,
730
+ "timeout_seconds": workflow_timeout,
731
+ }
732
+ )
733
+ raise TimeoutError(
734
+ f"Workflow execution exceeded {workflow_timeout}s timeout. "
735
+ f"Increase timeout in config (workflow.timeout_seconds) or check for blocking operations."
736
+ ) from None
737
+ finally:
738
+ variables = (getattr(self.state, "variables", None) or {}) if self.state else {}
739
+ beads_issue_id = variables.get("_beads_issue_id")
740
+ if beads_issue_id is None and self.state:
741
+ wf_id = getattr(self.state, "workflow_id", None)
742
+ if wf_id:
743
+ beads_file = self._state_dir() / wf_id / ".beads_issue_id"
744
+ if beads_file.exists():
745
+ try:
746
+ beads_issue_id = beads_file.read_text(
747
+ encoding="utf-8"
748
+ ).strip() or None
749
+ except OSError:
750
+ pass
751
+ from ..simple_mode.beads_hooks import close_issue
752
+ close_issue(self.project_root, beads_issue_id)
753
+
754
+ async def _initialize_run(
755
+ self,
756
+ workflow: Workflow | None,
757
+ target_file: str | None,
758
+ ) -> Path | None:
759
+ """Initialize workflow execution with validation and return target path."""
760
+ if workflow:
761
+ self.workflow = workflow
762
+ if not self.workflow:
763
+ raise ValueError(
764
+ "No workflow loaded. Call start() or pass workflow."
765
+ )
766
+
767
+ # Validate workflow has steps
768
+ if not self.workflow.steps:
769
+ raise ValueError("Workflow has no steps to execute")
770
+
771
+ # Ensure we have a state
772
+ if not self.state or not self.state.workflow_id.startswith(f"{self.workflow.id}-"):
773
+ await self.start(workflow=self.workflow)
774
+
775
+ # Validate first step can be executed (no dependencies)
776
+ first_step = self.workflow.steps[0]
777
+ if not first_step.requires: # No dependencies
778
+ # First step should always be ready
779
+ if self.logger:
780
+ self.logger.info(
781
+ f"First step {first_step.id} has no dependencies - ready to execute",
782
+ extra={
783
+ "step_id": first_step.id,
784
+ "agent": first_step.agent,
785
+ "action": first_step.action,
786
+ }
787
+ )
788
+
789
+ # Establish target file
790
+ target_path: Path | None = None
791
+ if target_file:
792
+ target_path = (
793
+ (self.project_root / target_file)
794
+ if not Path(target_file).is_absolute()
795
+ else Path(target_file)
796
+ )
797
+ else:
798
+ target_path = self._default_target_file()
799
+
800
+ if target_path and self.state:
801
+ self.state.variables["target_file"] = str(target_path)
802
+
803
+ return target_path
804
+
805
+ def _handle_max_steps_exceeded(self, max_steps: int) -> None:
806
+ """Handle max steps exceeded."""
807
+ self.state.status = "failed"
808
+ self.state.error = f"Max steps exceeded ({max_steps}). Aborting."
809
+ self.save_state()
810
+
811
+ def get_workflow_health(self) -> dict[str, Any]:
812
+ """
813
+ Get workflow health diagnostics.
814
+
815
+ Returns:
816
+ Dictionary with workflow health information including:
817
+ - status: Current workflow status
818
+ - elapsed_seconds: Time since workflow started
819
+ - completed_steps: Number of completed steps
820
+ - total_steps: Total number of steps
821
+ - progress_percent: Percentage of steps completed
822
+ - time_since_last_step: Seconds since last step completed
823
+ - is_stuck: Whether workflow appears to be stuck (no progress in 5 minutes)
824
+ - current_step: Current step ID
825
+ - error: Error message if any
826
+ """
827
+ if not self.state:
828
+ return {"status": "not_started", "message": "Workflow not started"}
829
+
830
+ elapsed = (
831
+ (datetime.now() - self.state.started_at).total_seconds()
832
+ if self.state.started_at else 0
833
+ )
834
+ completed = len(self.state.completed_steps)
835
+ total = len(self.workflow.steps) if self.workflow else 0
836
+
837
+ # Check if stuck (no progress in last 5 minutes)
838
+ last_step_time = None
839
+ if self.state.step_executions:
840
+ completed_times = [
841
+ se.completed_at for se in self.state.step_executions
842
+ if se.completed_at
843
+ ]
844
+ if completed_times:
845
+ last_step_time = max(completed_times)
846
+
847
+ if not last_step_time:
848
+ last_step_time = self.state.started_at
849
+
850
+ time_since_last_step = (
851
+ (datetime.now() - last_step_time).total_seconds()
852
+ if last_step_time else elapsed
853
+ )
854
+ is_stuck = time_since_last_step > 300 # 5 minutes
855
+
856
+ return {
857
+ "status": self.state.status,
858
+ "elapsed_seconds": elapsed,
859
+ "completed_steps": completed,
860
+ "total_steps": total,
861
+ "progress_percent": (completed / total * 100) if total > 0 else 0,
862
+ "time_since_last_step": time_since_last_step,
863
+ "is_stuck": is_stuck,
864
+ "current_step": self.state.current_step,
865
+ "error": self.state.error,
866
+ }
867
+
868
+ def _find_ready_steps(
869
+ self,
870
+ completed_step_ids: set[str],
871
+ running_step_ids: set[str],
872
+ ) -> list[WorkflowStep]:
873
+ """Find steps ready to execute (dependencies met)."""
874
+ available_artifacts = set(self.state.artifacts.keys())
875
+ return self.parallel_executor.find_ready_steps(
876
+ workflow_steps=self.workflow.steps,
877
+ completed_step_ids=completed_step_ids,
878
+ running_step_ids=running_step_ids,
879
+ available_artifacts=available_artifacts,
880
+ )
881
+
882
+ def _handle_no_ready_steps(self, completed_step_ids: set[str]) -> bool:
883
+ """Handle case when no steps are ready with better diagnostics. Returns True if workflow should stop."""
884
+ if len(completed_step_ids) >= len(self.workflow.steps):
885
+ # Workflow is complete
886
+ self.state.status = "completed"
887
+ self.state.current_step = None
888
+ self.save_state()
889
+ return True
890
+ else:
891
+ # Workflow is blocked - provide diagnostics
892
+ available_artifacts = set(self.state.artifacts.keys())
893
+ pending_steps = [
894
+ s for s in self.workflow.steps
895
+ if s.id not in completed_step_ids
896
+ ]
897
+
898
+ # Check what's blocking
899
+ blocking_info = []
900
+ for step in pending_steps:
901
+ missing = [req for req in (step.requires or []) if req not in available_artifacts]
902
+ if missing:
903
+ blocking_info.append(f"Step {step.id} ({step.agent}/{step.action}): missing {missing}")
904
+
905
+ error_msg = (
906
+ f"Workflow blocked: no ready steps and workflow not complete. "
907
+ f"Completed: {len(completed_step_ids)}/{len(self.workflow.steps)}. "
908
+ f"Blocking issues: {blocking_info if blocking_info else 'Unknown - check step dependencies'}"
909
+ )
910
+
911
+ self.state.status = "failed"
912
+ self.state.error = error_msg
913
+ self.save_state()
914
+
915
+ # Log detailed diagnostics
916
+ if self.logger:
917
+ self.logger.error(
918
+ "Workflow blocked - no ready steps",
919
+ extra={
920
+ "completed_steps": list(completed_step_ids),
921
+ "pending_steps": [s.id for s in pending_steps],
922
+ "available_artifacts": list(available_artifacts),
923
+ "blocking_info": blocking_info,
924
+ }
925
+ )
926
+
927
+ return True
928
+
929
+ async def _process_parallel_results(
930
+ self,
931
+ results: list[Any],
932
+ completed_step_ids: set[str],
933
+ running_step_ids: set[str],
934
+ ) -> bool:
935
+ """
936
+ Process results from parallel execution.
937
+ Returns True if workflow should stop (failed or aborted).
938
+ """
939
+ for result in results:
940
+ step_logger = self.logger.with_context(
941
+ step_id=result.step.id,
942
+ agent=result.step.agent,
943
+ ) if self.logger else None
944
+
945
+ if result.error:
946
+ should_break = await self._handle_step_error(
947
+ result, step_logger, completed_step_ids, running_step_ids
948
+ )
949
+ if should_break:
950
+ return True
951
+ continue
952
+
953
+ # Handle successful step completion
954
+ await self._handle_step_success(
955
+ result, step_logger, completed_step_ids, running_step_ids
956
+ )
957
+
958
+ return False
959
+
960
+ async def _handle_step_error(
961
+ self,
962
+ result: Any,
963
+ step_logger: Any,
964
+ completed_step_ids: set[str],
965
+ running_step_ids: set[str],
966
+ ) -> bool:
967
+ """Handle step error. Returns True if workflow should stop."""
968
+ # Publish step failed event (Phase 2)
969
+ await self.event_bus.publish(
970
+ WorkflowEvent(
971
+ event_type=EventType.STEP_FAILED,
972
+ workflow_id=self.state.workflow_id,
973
+ step_id=result.step.id,
974
+ data={
975
+ "agent": result.step.agent,
976
+ "action": result.step.action,
977
+ "error": str(result.error),
978
+ "attempts": getattr(result, "attempts", 1),
979
+ },
980
+ timestamp=datetime.now(),
981
+ correlation_id=f"{self.state.workflow_id}:{result.step.id}",
982
+ )
983
+ )
984
+
985
+ # Step failed - use error recovery and auto-progression (Epic 14)
986
+ error_context = ErrorContext(
987
+ workflow_id=self.state.workflow_id,
988
+ step_id=result.step.id,
989
+ agent=result.step.agent,
990
+ action=result.step.action,
991
+ step_number=None,
992
+ total_steps=len(self.workflow.steps),
993
+ workflow_status=self.state.status,
994
+ )
995
+
996
+ # Handle error with recovery manager (Epic 14)
997
+ recovery_result = None
998
+ user_friendly_error = None
999
+ if self.error_recovery:
1000
+ recovery_result = self.error_recovery.handle_error(
1001
+ error=result.error,
1002
+ context=error_context,
1003
+ attempt=getattr(result, "attempts", 1),
1004
+ )
1005
+
1006
+ # Store user-friendly message (can't modify frozen dataclass)
1007
+ if recovery_result.get("user_message"):
1008
+ user_friendly_error = recovery_result["user_message"]
1009
+
1010
+ if self.auto_progression.should_auto_progress():
1011
+ # Get review result if this was a reviewer step
1012
+ review_result = None
1013
+ if result.step.agent == "reviewer":
1014
+ review_result = self.state.variables.get("reviewer_result")
1015
+
1016
+ decision = self.auto_progression.handle_step_completion(
1017
+ step=result.step,
1018
+ state=self.state,
1019
+ step_execution=result.step_execution,
1020
+ review_result=review_result,
1021
+ )
1022
+
1023
+ if decision.action == ProgressionAction.RETRY:
1024
+ # Retry the step - remove from completed and add back to ready
1025
+ completed_step_ids.discard(result.step.id)
1026
+ running_step_ids.discard(result.step.id)
1027
+ # Apply backoff if specified
1028
+ if decision.metadata.get("backoff_seconds"):
1029
+ await asyncio.sleep(decision.metadata["backoff_seconds"])
1030
+ if step_logger:
1031
+ step_logger.info(
1032
+ f"Retrying step {result.step.id} (attempt {decision.retry_count})",
1033
+ )
1034
+ return False
1035
+ elif decision.action == ProgressionAction.SKIP:
1036
+ # Skip this step
1037
+ completed_step_ids.add(result.step.id)
1038
+ running_step_ids.discard(result.step.id)
1039
+ if result.step.id not in self.state.skipped_steps:
1040
+ self.state.skipped_steps.append(result.step.id)
1041
+ if step_logger:
1042
+ step_logger.warning(
1043
+ f"Skipping step {result.step.id}: {decision.reason}",
1044
+ )
1045
+ return False
1046
+ elif decision.action == ProgressionAction.ABORT:
1047
+ # Abort workflow
1048
+ self.state.status = "failed"
1049
+ self.state.error = decision.reason
1050
+ if step_logger:
1051
+ step_logger.error(
1052
+ f"Workflow aborted: {decision.reason}",
1053
+ )
1054
+
1055
+ # Publish workflow failed event (Phase 2)
1056
+ await self.event_bus.publish(
1057
+ WorkflowEvent(
1058
+ event_type=EventType.WORKFLOW_FAILED,
1059
+ workflow_id=self.state.workflow_id,
1060
+ step_id=result.step.id,
1061
+ data={
1062
+ "error": decision.reason,
1063
+ "step_id": result.step.id,
1064
+ },
1065
+ timestamp=datetime.now(),
1066
+ correlation_id=f"{self.state.workflow_id}:{result.step.id}",
1067
+ )
1068
+ )
1069
+
1070
+ self.save_state()
1071
+ if self.progress_manager:
1072
+ await self.progress_manager.send_workflow_failed(decision.reason)
1073
+ await self.progress_manager.stop()
1074
+ return True
1075
+ elif decision.action == ProgressionAction.CONTINUE:
1076
+ # Continue despite error (recoverable)
1077
+ completed_step_ids.add(result.step.id)
1078
+ running_step_ids.discard(result.step.id)
1079
+ if step_logger:
1080
+ step_logger.warning(
1081
+ f"Step {result.step.id} failed but continuing: {decision.reason}",
1082
+ )
1083
+ return False
1084
+
1085
+ # Fallback: WorkflowFailureConfig when auto-progression disabled (plan 3.1)
1086
+ error_message = user_friendly_error if user_friendly_error else str(result.error)
1087
+ try:
1088
+ from ..core.config import load_config
1089
+
1090
+ cfg = load_config()
1091
+ wf = getattr(cfg, "workflow", None)
1092
+ fail_cfg = getattr(wf, "failure", None) if wf else None
1093
+ except Exception: # pylint: disable=broad-except
1094
+ fail_cfg = None
1095
+ on_fail = getattr(fail_cfg, "on_step_fail", "fail") or "fail"
1096
+ retry_count = getattr(fail_cfg, "retry_count", 1) or 0
1097
+ escalate_pause = getattr(fail_cfg, "escalate_to_pause", True)
1098
+
1099
+ raw = self.state.variables.get("_step_retries")
1100
+ retries_var = raw if isinstance(raw, dict) else {}
1101
+ self.state.variables["_step_retries"] = retries_var
1102
+ retries_used = retries_var.get(result.step.id, 0)
1103
+
1104
+ if on_fail == "retry" and retries_used < retry_count:
1105
+ retries_var[result.step.id] = retries_used + 1
1106
+ completed_step_ids.discard(result.step.id)
1107
+ running_step_ids.discard(result.step.id)
1108
+ if step_logger:
1109
+ step_logger.info(f"Retrying step {result.step.id} (attempt {retries_used + 1}/{retry_count})")
1110
+ return False
1111
+
1112
+ if on_fail == "skip":
1113
+ completed_step_ids.add(result.step.id)
1114
+ running_step_ids.discard(result.step.id)
1115
+ if result.step.id not in self.state.skipped_steps:
1116
+ self.state.skipped_steps.append(result.step.id)
1117
+ if step_logger:
1118
+ step_logger.warning(f"Skipping step {result.step.id}: {error_message}")
1119
+ return False
1120
+
1121
+ # fail or escalate: stop workflow
1122
+ self.state.status = "paused" if (on_fail == "escalate" and escalate_pause) else "failed"
1123
+ self.state.error = f"Step {result.step.id} failed: {error_message}"
1124
+ suggest = None
1125
+ if on_fail == "escalate" and recovery_result and recovery_result.get("suggestions"):
1126
+ suggest = [getattr(s, "action", str(s)) for s in recovery_result["suggestions"][:3]]
1127
+
1128
+ # Publish workflow failed event (Phase 2)
1129
+ await self.event_bus.publish(
1130
+ WorkflowEvent(
1131
+ event_type=EventType.WORKFLOW_FAILED,
1132
+ workflow_id=self.state.workflow_id,
1133
+ step_id=result.step.id,
1134
+ data={
1135
+ "error": error_message,
1136
+ "step_id": result.step.id,
1137
+ "behavior": on_fail,
1138
+ "suggestions": suggest,
1139
+ },
1140
+ timestamp=datetime.now(),
1141
+ correlation_id=f"{self.state.workflow_id}:{result.step.id}",
1142
+ )
1143
+ )
1144
+
1145
+ self.save_state()
1146
+
1147
+ # Send failure update
1148
+ if self.progress_manager:
1149
+ await self.progress_manager.send_workflow_failed(error_message)
1150
+ await self.progress_manager.stop()
1151
+ return True
1152
+
1153
+ async def _handle_step_success(
1154
+ self,
1155
+ result: Any,
1156
+ step_logger: Any,
1157
+ completed_step_ids: set[str],
1158
+ running_step_ids: set[str],
1159
+ ) -> None:
1160
+ """Handle successful step completion."""
1161
+ # Mark step as completed
1162
+ completed_step_ids.add(result.step.id)
1163
+ running_step_ids.discard(result.step.id)
1164
+
1165
+ # Get review result if this was a reviewer step (for gate evaluation)
1166
+ review_result = None
1167
+ if result.step.agent == "reviewer":
1168
+ review_result = self.state.variables.get("reviewer_result")
1169
+
1170
+ # Issue fix: Print artifact paths after each step (Hidden workflow state)
1171
+ if self.print_paths and result.artifacts:
1172
+ self._print_step_artifacts(result.step, result.artifacts, result.step_execution)
1173
+
1174
+ # Publish step completed event (Phase 2)
1175
+ await self.event_bus.publish(
1176
+ WorkflowEvent(
1177
+ event_type=EventType.STEP_COMPLETED,
1178
+ workflow_id=self.state.workflow_id,
1179
+ step_id=result.step.id,
1180
+ data={
1181
+ "agent": result.step.agent,
1182
+ "action": result.step.action,
1183
+ "duration_seconds": result.step_execution.duration_seconds,
1184
+ "artifact_count": len(result.artifacts) if result.artifacts else 0,
1185
+ },
1186
+ timestamp=datetime.now(),
1187
+ correlation_id=f"{self.state.workflow_id}:{result.step.id}",
1188
+ )
1189
+ )
1190
+
1191
+ # Publish artifact created events (Phase 2)
1192
+ if result.artifacts:
1193
+ for artifact_name, artifact_data in result.artifacts.items():
1194
+ await self.event_bus.publish(
1195
+ WorkflowEvent(
1196
+ event_type=EventType.ARTIFACT_CREATED,
1197
+ workflow_id=self.state.workflow_id,
1198
+ step_id=result.step.id,
1199
+ data={
1200
+ "artifact_name": artifact_name,
1201
+ "artifact_path": artifact_data.get("path", ""),
1202
+ "created_by": result.step.id,
1203
+ },
1204
+ timestamp=datetime.now(),
1205
+ correlation_id=f"{self.state.workflow_id}:{result.step.id}",
1206
+ )
1207
+ )
1208
+
1209
+ # Use auto-progression to handle step completion and gate evaluation
1210
+ if self.auto_progression.should_auto_progress():
1211
+ decision = self.auto_progression.handle_step_completion(
1212
+ step=result.step,
1213
+ state=self.state,
1214
+ step_execution=result.step_execution,
1215
+ review_result=review_result,
1216
+ )
1217
+
1218
+ # Update current step based on gate decision if needed
1219
+ if decision.next_step_id:
1220
+ self.state.current_step = decision.next_step_id
1221
+
1222
+ if step_logger:
1223
+ step_logger.info(
1224
+ f"Step completed: {decision.reason}",
1225
+ action=result.step.action,
1226
+ duration_seconds=result.step_execution.duration_seconds,
1227
+ artifact_count=len(result.artifacts) if result.artifacts else 0,
1228
+ next_step=decision.next_step_id,
1229
+ )
1230
+ else:
1231
+ if step_logger:
1232
+ step_logger.info(
1233
+ "Step completed",
1234
+ action=result.step.action,
1235
+ duration_seconds=result.step_execution.duration_seconds,
1236
+ artifact_count=len(result.artifacts) if result.artifacts else 0,
1237
+ )
1238
+
1239
+ # Send step completed update (Epic 11: Include gate result for quality dashboard)
1240
+ is_gate_step = result.step.agent == "reviewer" and result.step.gate is not None
1241
+ if self.progress_manager:
1242
+ # Extract gate result if this was a reviewer step
1243
+ gate_result = None
1244
+ if result.step.agent == "reviewer" and review_result:
1245
+ # Get gate result from state variables (set by auto-progression)
1246
+ gate_last = self.state.variables.get("gate_last", {})
1247
+ if gate_last:
1248
+ gate_result = gate_last
1249
+
1250
+ # Publish gate evaluated event (Phase 2)
1251
+ await self.event_bus.publish(
1252
+ WorkflowEvent(
1253
+ event_type=EventType.GATE_EVALUATED,
1254
+ workflow_id=self.state.workflow_id,
1255
+ step_id=result.step.id,
1256
+ data={
1257
+ "gate_result": gate_result,
1258
+ "passed": gate_result.get("passed", False),
1259
+ },
1260
+ timestamp=datetime.now(),
1261
+ correlation_id=f"{self.state.workflow_id}:{result.step.id}",
1262
+ )
1263
+ )
1264
+
1265
+ await self.progress_manager.send_step_completed(
1266
+ step_id=result.step.id,
1267
+ agent=result.step.agent,
1268
+ action=result.step.action,
1269
+ duration=result.step_execution.duration_seconds,
1270
+ gate_result=gate_result,
1271
+ )
1272
+
1273
+ # Epic 12: Automatic checkpointing after step completion
1274
+ if self.checkpoint_manager.should_checkpoint(
1275
+ step=result.step,
1276
+ state=self.state,
1277
+ is_gate_step=is_gate_step,
1278
+ ):
1279
+ # Enhance state with checkpoint metadata before saving
1280
+ checkpoint_metadata = self.checkpoint_manager.get_checkpoint_metadata(
1281
+ state=self.state,
1282
+ step=result.step,
1283
+ )
1284
+ # Store metadata in state variables for persistence
1285
+ if "_checkpoint_metadata" not in self.state.variables:
1286
+ self.state.variables["_checkpoint_metadata"] = {}
1287
+ self.state.variables["_checkpoint_metadata"].update(checkpoint_metadata)
1288
+
1289
+ # Save checkpoint
1290
+ self.save_state()
1291
+ self.checkpoint_manager.record_checkpoint(result.step.id)
1292
+
1293
+ if self.logger:
1294
+ self.logger.info(
1295
+ f"Checkpoint created after step {result.step.id}",
1296
+ checkpoint_metadata=checkpoint_metadata,
1297
+ )
1298
+
1299
+ # Update artifacts from result
1300
+ if result.artifacts and isinstance(result.artifacts, dict):
1301
+ for art_name, art_data in result.artifacts.items():
1302
+ if isinstance(art_data, dict):
1303
+ artifact = Artifact(
1304
+ name=art_data.get("name", art_name),
1305
+ path=art_data.get("path", ""),
1306
+ status="complete",
1307
+ created_by=result.step.id,
1308
+ created_at=datetime.now(),
1309
+ metadata=art_data.get("metadata", {}),
1310
+ )
1311
+ self.state.artifacts[artifact.name] = artifact
1312
+
1313
+ def _handle_execution_error(self, error: Exception) -> None:
1314
+ """Handle execution error."""
1315
+ self.state.status = "failed"
1316
+ self.state.error = str(error)
1317
+ if self.logger:
1318
+ self.logger.error(
1319
+ "Workflow execution failed",
1320
+ error=str(error),
1321
+ exc_info=True,
1322
+ )
1323
+ self.save_state()
1324
+
1325
+ async def _finalize_run(self, completed_step_ids: set[str]) -> WorkflowState:
1326
+ """Finalize workflow execution and return final state."""
1327
+ if not self.state:
1328
+ raise RuntimeError("Workflow state lost during execution")
1329
+
1330
+ # Mark as completed if no error
1331
+ if self.state.status == "running":
1332
+ self.state.status = "completed"
1333
+ if self.logger:
1334
+ self.logger.info(
1335
+ "Workflow completed",
1336
+ completed_steps=len(completed_step_ids),
1337
+ total_steps=len(self.workflow.steps) if self.workflow else 0,
1338
+ )
1339
+
1340
+ # Publish workflow completed event (Phase 2)
1341
+ await self.event_bus.publish(
1342
+ WorkflowEvent(
1343
+ event_type=EventType.WORKFLOW_COMPLETED,
1344
+ workflow_id=self.state.workflow_id,
1345
+ step_id=None,
1346
+ data={
1347
+ "completed_steps": len(completed_step_ids),
1348
+ "total_steps": len(self.workflow.steps) if self.workflow else 0,
1349
+ },
1350
+ timestamp=datetime.now(),
1351
+ correlation_id=self.state.workflow_id,
1352
+ )
1353
+ )
1354
+
1355
+ self.save_state()
1356
+
1357
+ # Send completion summary
1358
+ if self.progress_manager:
1359
+ await self.progress_manager.send_workflow_completed()
1360
+ await self.progress_manager.stop()
1361
+
1362
+ # Best-effort cleanup of worktrees created during this run
1363
+ try:
1364
+ await self.worktree_manager.cleanup_all()
1365
+ except Exception:
1366
+ pass
1367
+
1368
+ # Dual-write workflow completion to analytics (best-effort)
1369
+ if self.state.status in ("completed", "failed") and self.workflow:
1370
+ try:
1371
+ from .analytics_dual_write import record_workflow_execution_to_analytics
1372
+
1373
+ duration_sec = 0.0
1374
+ if self.state.started_at:
1375
+ end = datetime.now()
1376
+ duration_sec = (end - self.state.started_at).total_seconds()
1377
+ record_workflow_execution_to_analytics(
1378
+ project_root=self.project_root,
1379
+ workflow_id=self.state.workflow_id,
1380
+ workflow_name=self.workflow.name or self.state.workflow_id,
1381
+ duration_seconds=duration_sec,
1382
+ steps=len(self.workflow.steps),
1383
+ success=(self.state.status == "completed"),
1384
+ )
1385
+ except Exception: # pylint: disable=broad-except
1386
+ pass
1387
+
1388
+ return self.state
1389
+
1390
+ async def _execute_step_for_parallel(
1391
+ self, step: WorkflowStep, target_path: Path | None
1392
+ ) -> StepResult:
1393
+ """
1394
+ Execute a single workflow step using Cursor Skills and return result (BUG-003B fix).
1395
+
1396
+ This method now returns StepResult with proper error handling:
1397
+ - success=True + artifacts on success
1398
+ - success=False + error details on failure (no exception raised)
1399
+
1400
+ State updates (step_execution tracking) are handled by ParallelStepExecutor.
1401
+ """
1402
+ if not self.state or not self.workflow:
1403
+ raise ValueError("Workflow not started")
1404
+
1405
+ action = self._normalize_action(step.action)
1406
+ agent_name = (step.agent or "").strip().lower()
1407
+
1408
+ # Publish step started event (Phase 2)
1409
+ await self.event_bus.publish(
1410
+ WorkflowEvent(
1411
+ event_type=EventType.STEP_STARTED,
1412
+ workflow_id=self.state.workflow_id,
1413
+ step_id=step.id,
1414
+ data={
1415
+ "agent": agent_name,
1416
+ "action": action,
1417
+ "step_id": step.id,
1418
+ },
1419
+ timestamp=datetime.now(),
1420
+ correlation_id=f"{self.state.workflow_id}:{step.id}",
1421
+ )
1422
+ )
1423
+
1424
+ # Handle completion/finalization steps that don't require agent execution
1425
+ if agent_name == "orchestrator" and action in ["finalize", "complete"]:
1426
+ # Return successful result for completion steps (no artifacts)
1427
+ now = datetime.now()
1428
+ return StepResult(
1429
+ step_id=step.id,
1430
+ status="completed",
1431
+ success=True,
1432
+ duration=0.0,
1433
+ started_at=now,
1434
+ completed_at=now,
1435
+ artifacts=[],
1436
+ )
1437
+
1438
+ # Track step start time for duration calculation
1439
+ step_started_at = datetime.now()
1440
+
1441
+ # Use context manager for worktree lifecycle (guaranteed cleanup)
1442
+ async with self._worktree_context(step) as worktree_path:
1443
+ worktree_name = self._worktree_name_for_step(step.id)
1444
+
1445
+ # Try AgentHandlerRegistry first for context-aware execution (BUG-003 fix)
1446
+ # Falls back to SkillInvoker if no handler found
1447
+ from .agent_handlers import AgentHandlerRegistry
1448
+
1449
+ # Helper function to run agents (needed by handlers)
1450
+ async def run_agent(agent: str, command: str, **kwargs: Any) -> dict[str, Any]:
1451
+ """Run agent by importing and invoking its class."""
1452
+ module = __import__(f"tapps_agents.agents.{agent}.agent", fromlist=["*"])
1453
+ class_name = f"{agent.title()}Agent"
1454
+ agent_cls = getattr(module, class_name)
1455
+ instance = agent_cls()
1456
+ await instance.activate(self.project_root)
1457
+ try:
1458
+ return await instance.run(command, **kwargs)
1459
+ finally:
1460
+ if hasattr(instance, 'close'):
1461
+ await instance.close()
1462
+
1463
+ # Create handler registry and try to find handler
1464
+ registry = AgentHandlerRegistry.create_registry(
1465
+ project_root=self.project_root,
1466
+ state=self.state,
1467
+ workflow=self.workflow,
1468
+ run_agent_fn=run_agent,
1469
+ executor=self,
1470
+ )
1471
+
1472
+ handler = registry.find_handler(agent_name, action)
1473
+
1474
+ try:
1475
+ from ..core.unicode_safe import safe_print
1476
+
1477
+ if handler:
1478
+ # Use handler for context-aware execution (e.g., ImplementerHandler)
1479
+ safe_print(f"\n[EXEC] Executing {agent_name}/{action} via handler...", flush=True)
1480
+
1481
+ # Execute handler and get artifacts directly
1482
+ # Note: Handler execution happens in main working directory, not worktree
1483
+ # Worktree is only used for skill invocation fallback
1484
+ created_artifacts_list = await handler.execute(step, action, target_path)
1485
+
1486
+ # Write success marker
1487
+ step_completed_at = datetime.now()
1488
+ duration = (step_completed_at - step_started_at).total_seconds()
1489
+
1490
+ found_artifact_paths = [art["path"] for art in (created_artifacts_list or [])]
1491
+ artifact_names = [art["name"] for art in (created_artifacts_list or [])]
1492
+
1493
+ marker_path = self.marker_writer.write_done_marker(
1494
+ workflow_id=self.state.workflow_id,
1495
+ step_id=step.id,
1496
+ agent=agent_name,
1497
+ action=action,
1498
+ worktree_name=worktree_name,
1499
+ worktree_path=str(worktree_path),
1500
+ expected_artifacts=step.creates or [],
1501
+ found_artifacts=found_artifact_paths,
1502
+ duration_seconds=duration,
1503
+ started_at=step_started_at,
1504
+ completed_at=step_completed_at,
1505
+ )
1506
+
1507
+ if self.logger:
1508
+ self.logger.debug(
1509
+ f"Handler execution complete for step {step.id}",
1510
+ marker_path=str(marker_path),
1511
+ )
1512
+
1513
+ # Return successful StepResult (BUG-003B fix)
1514
+ return StepResult(
1515
+ step_id=step.id,
1516
+ status="completed",
1517
+ success=True,
1518
+ duration=duration,
1519
+ started_at=step_started_at,
1520
+ completed_at=step_completed_at,
1521
+ artifacts=artifact_names,
1522
+ )
1523
+ else:
1524
+ # Fall back to SkillInvoker for steps without handlers
1525
+ safe_print(f"\n[EXEC] Executing {agent_name}/{action} via skill...", flush=True)
1526
+ await self.skill_invoker.invoke_skill(
1527
+ agent_name=agent_name,
1528
+ action=action,
1529
+ step=step,
1530
+ target_path=target_path,
1531
+ worktree_path=worktree_path,
1532
+ state=self.state,
1533
+ )
1534
+ # Skill invoker handles execution (direct execution or Cursor Skills)
1535
+ # Artifacts are extracted after completion
1536
+
1537
+ # Extract artifacts from worktree (skill_invoker path only)
1538
+ artifacts = await self.worktree_manager.extract_artifacts(
1539
+ worktree_path=worktree_path,
1540
+ step=step,
1541
+ )
1542
+
1543
+ # Extract artifact paths and names
1544
+ found_artifact_paths = []
1545
+ artifact_names = []
1546
+ for artifact in artifacts:
1547
+ found_artifact_paths.append(artifact.path)
1548
+ artifact_names.append(artifact.name)
1549
+
1550
+ # Write DONE marker for successful completion
1551
+ step_completed_at = datetime.now()
1552
+ duration = (step_completed_at - step_started_at).total_seconds()
1553
+
1554
+ marker_path = self.marker_writer.write_done_marker(
1555
+ workflow_id=self.state.workflow_id,
1556
+ step_id=step.id,
1557
+ agent=agent_name,
1558
+ action=action,
1559
+ worktree_name=worktree_name,
1560
+ worktree_path=str(worktree_path),
1561
+ expected_artifacts=step.creates or [],
1562
+ found_artifacts=found_artifact_paths,
1563
+ duration_seconds=duration,
1564
+ started_at=step_started_at,
1565
+ completed_at=step_completed_at,
1566
+ )
1567
+
1568
+ if self.logger:
1569
+ self.logger.debug(
1570
+ f"DONE marker written for step {step.id}",
1571
+ marker_path=str(marker_path),
1572
+ )
1573
+
1574
+ # Return successful StepResult (BUG-003B fix)
1575
+ # Worktree cleanup is handled by context manager
1576
+ return StepResult(
1577
+ step_id=step.id,
1578
+ status="completed",
1579
+ success=True,
1580
+ duration=duration,
1581
+ started_at=step_started_at,
1582
+ completed_at=step_completed_at,
1583
+ artifacts=artifact_names,
1584
+ )
1585
+
1586
+ except (TimeoutError, RuntimeError) as e:
1587
+ # Write FAILED marker for timeout or execution errors
1588
+ step_failed_at = datetime.now()
1589
+ duration = (step_failed_at - step_started_at).total_seconds()
1590
+ error_type = type(e).__name__
1591
+ error_msg = str(e)
1592
+ error_tb = traceback.format_exc()
1593
+
1594
+ # Try to get completion status if available (for missing artifacts)
1595
+ found_artifact_paths = []
1596
+ try:
1597
+ from .cursor_skill_helper import check_skill_completion
1598
+ completion_status = check_skill_completion(
1599
+ worktree_path=worktree_path,
1600
+ expected_artifacts=step.creates or [],
1601
+ )
1602
+ found_artifact_paths = completion_status.get("found_artifacts", [])
1603
+ except Exception:
1604
+ pass
1605
+
1606
+ marker_path = self.marker_writer.write_failed_marker(
1607
+ workflow_id=self.state.workflow_id,
1608
+ step_id=step.id,
1609
+ agent=agent_name,
1610
+ action=action,
1611
+ error=error_msg,
1612
+ worktree_name=worktree_name,
1613
+ worktree_path=str(worktree_path),
1614
+ expected_artifacts=step.creates or [],
1615
+ found_artifacts=found_artifact_paths,
1616
+ duration_seconds=duration,
1617
+ started_at=step_started_at,
1618
+ failed_at=step_failed_at,
1619
+ error_type=error_type,
1620
+ metadata={
1621
+ "marker_location": f".tapps-agents/workflows/markers/{self.state.workflow_id}/step-{step.id}/FAILED.json",
1622
+ },
1623
+ )
1624
+
1625
+ if self.logger:
1626
+ self.logger.warning(
1627
+ f"FAILED marker written for step {step.id}",
1628
+ marker_path=str(marker_path),
1629
+ error=error_msg,
1630
+ )
1631
+
1632
+ # Include marker location in error message for better troubleshooting
1633
+ from ..core.unicode_safe import safe_print
1634
+ safe_print(
1635
+ f"\n[INFO] Failure marker written to: {marker_path}",
1636
+ flush=True,
1637
+ )
1638
+
1639
+ # Return failed StepResult (BUG-003B fix - don't raise)
1640
+ return StepResult(
1641
+ step_id=step.id,
1642
+ status="failed",
1643
+ success=False,
1644
+ duration=duration,
1645
+ started_at=step_started_at,
1646
+ completed_at=step_failed_at,
1647
+ error=error_msg,
1648
+ error_traceback=error_tb,
1649
+ artifacts=[],
1650
+ )
1651
+ except Exception as e:
1652
+ # Write FAILED marker for unexpected errors
1653
+ step_failed_at = datetime.now()
1654
+ duration = (step_failed_at - step_started_at).total_seconds()
1655
+ error_type = type(e).__name__
1656
+ error_msg = str(e)
1657
+ error_tb = traceback.format_exc()
1658
+
1659
+ marker_path = self.marker_writer.write_failed_marker(
1660
+ workflow_id=self.state.workflow_id,
1661
+ step_id=step.id,
1662
+ agent=agent_name,
1663
+ action=action,
1664
+ error=error_msg,
1665
+ worktree_name=worktree_name,
1666
+ worktree_path=str(worktree_path) if 'worktree_path' in locals() else None,
1667
+ expected_artifacts=step.creates or [],
1668
+ found_artifacts=[],
1669
+ duration_seconds=duration,
1670
+ started_at=step_started_at,
1671
+ failed_at=step_failed_at,
1672
+ error_type=error_type,
1673
+ metadata={
1674
+ "marker_location": f".tapps-agents/workflows/markers/{self.state.workflow_id}/step-{step.id}/FAILED.json",
1675
+ },
1676
+ )
1677
+
1678
+ if self.logger:
1679
+ self.logger.error(
1680
+ f"FAILED marker written for step {step.id} (unexpected error)",
1681
+ marker_path=str(marker_path),
1682
+ error=error_msg,
1683
+ exc_info=True,
1684
+ )
1685
+
1686
+ # Return failed StepResult (BUG-003B fix - don't raise)
1687
+ return StepResult(
1688
+ step_id=step.id,
1689
+ status="failed",
1690
+ success=False,
1691
+ duration=duration,
1692
+ started_at=step_started_at,
1693
+ completed_at=step_failed_at,
1694
+ error=error_msg,
1695
+ error_traceback=error_tb,
1696
+ artifacts=[],
1697
+ )
1698
+
1699
+ @asynccontextmanager
1700
+ async def _worktree_context(
1701
+ self, step: WorkflowStep
1702
+ ) -> AsyncIterator[Path]:
1703
+ """
1704
+ Context manager for worktree lifecycle management.
1705
+
1706
+ Ensures worktree is properly cleaned up even on cancellation or exceptions.
1707
+ This is a 2025 best practice for resource management in async code.
1708
+
1709
+ Args:
1710
+ step: Workflow step that needs a worktree
1711
+
1712
+ Yields:
1713
+ Path to the worktree
1714
+
1715
+ Example:
1716
+ async with self._worktree_context(step) as worktree_path:
1717
+ # Use worktree_path here
1718
+ # Worktree automatically cleaned up on exit
1719
+ """
1720
+ worktree_name = self._worktree_name_for_step(step.id)
1721
+ worktree_path: Path | None = None
1722
+
1723
+ try:
1724
+ # Create worktree
1725
+ worktree_path = await self.worktree_manager.create_worktree(
1726
+ worktree_name=worktree_name
1727
+ )
1728
+
1729
+ # Copy artifacts from previous steps to worktree
1730
+ artifacts_list = list(self.state.artifacts.values())
1731
+ await self.worktree_manager.copy_artifacts(
1732
+ worktree_path=worktree_path,
1733
+ artifacts=artifacts_list,
1734
+ )
1735
+
1736
+ # Yield worktree path
1737
+ yield worktree_path
1738
+
1739
+ finally:
1740
+ # Always cleanup, even on cancellation or exception
1741
+ if worktree_path:
1742
+ try:
1743
+ # Determine if we should delete the branch based on configuration
1744
+ from ..core.config import load_config
1745
+ config = load_config()
1746
+ should_delete = (
1747
+ config.workflow.branch_cleanup.delete_branches_on_cleanup
1748
+ if (
1749
+ config.workflow.branch_cleanup
1750
+ and config.workflow.branch_cleanup.enabled
1751
+ )
1752
+ else True # Default to True for backward compatibility (same as parameter default)
1753
+ )
1754
+ await self.worktree_manager.remove_worktree(
1755
+ worktree_name, delete_branch=should_delete
1756
+ )
1757
+ except Exception as e:
1758
+ # Log but don't raise - cleanup failures shouldn't break workflow
1759
+ if self.logger:
1760
+ self.logger.warning(
1761
+ f"Failed to cleanup worktree {worktree_name}: {e}",
1762
+ step_id=step.id,
1763
+ )
1764
+
1765
+ def _worktree_name_for_step(self, step_id: str) -> str:
1766
+ """
1767
+ Deterministic, collision-resistant worktree name for a workflow step.
1768
+
1769
+ Keeps names short/safe for Windows while still traceable back to workflow+step.
1770
+ """
1771
+ if not self.state:
1772
+ raise ValueError("Workflow not started")
1773
+ raw = f"workflow-{self.state.workflow_id}-step-{step_id}"
1774
+ digest = hashlib.sha256(raw.encode("utf-8")).hexdigest()[:8]
1775
+ base = f"{raw}-{digest}"
1776
+ return WorktreeManager._sanitize_component(base, max_len=80)
1777
+
1778
+ def get_current_step(self) -> WorkflowStep | None:
1779
+ """Get the current workflow step."""
1780
+ if not self.workflow or not self.state:
1781
+ return None
1782
+
1783
+ for step in self.workflow.steps:
1784
+ if step.id == self.state.current_step:
1785
+ return step
1786
+ return None
1787
+
1788
+ def _default_target_file(self) -> Path | None:
1789
+ """Get default target file path."""
1790
+ # Try common locations
1791
+ candidates = [
1792
+ self.project_root / "src" / "app.py",
1793
+ self.project_root / "app.py",
1794
+ self.project_root / "main.py",
1795
+ ]
1796
+ for candidate in candidates:
1797
+ if candidate.exists():
1798
+ return candidate
1799
+ return None
1800
+
1801
+ async def _execute_step(
1802
+ self, step: WorkflowStep, target_path: Path | None
1803
+ ) -> None:
1804
+ """
1805
+ Execute a single workflow step using Cursor Skills.
1806
+
1807
+ Args:
1808
+ step: Workflow step to execute
1809
+ target_path: Optional target file path
1810
+ """
1811
+ if not self.state or not self.workflow:
1812
+ raise ValueError("Workflow not started")
1813
+
1814
+ action = self._normalize_action(step.action)
1815
+ agent_name = (step.agent or "").strip().lower()
1816
+
1817
+ # Handle completion/finalization steps that don't require agent execution
1818
+ if agent_name == "orchestrator" and action in ["finalize", "complete"]:
1819
+ # Mark step as completed without executing an agent
1820
+ step_execution = StepExecution(
1821
+ step_id=step.id,
1822
+ agent=agent_name,
1823
+ action=action,
1824
+ started_at=datetime.now(),
1825
+ completed_at=datetime.now(),
1826
+ status="completed",
1827
+ )
1828
+ self.state.step_executions.append(step_execution)
1829
+ self._advance_step()
1830
+ self.save_state()
1831
+ return
1832
+
1833
+ # Create step execution tracking
1834
+ step_execution = StepExecution(
1835
+ step_id=step.id,
1836
+ agent=agent_name,
1837
+ action=action,
1838
+ started_at=datetime.now(),
1839
+ )
1840
+ self.state.step_executions.append(step_execution)
1841
+
1842
+ try:
1843
+ # Create worktree for this step
1844
+ worktree_name = self._worktree_name_for_step(step.id)
1845
+ worktree_path = await self.worktree_manager.create_worktree(
1846
+ worktree_name=worktree_name
1847
+ )
1848
+
1849
+ # Copy artifacts from previous steps to worktree
1850
+ artifacts_list = list(self.state.artifacts.values())
1851
+ await self.worktree_manager.copy_artifacts(
1852
+ worktree_path=worktree_path,
1853
+ artifacts=artifacts_list,
1854
+ )
1855
+
1856
+ # Invoke Skill via SkillInvoker (direct execution)
1857
+ result = await self.skill_invoker.invoke_skill(
1858
+ agent_name=agent_name,
1859
+ action=action,
1860
+ step=step,
1861
+ target_path=target_path,
1862
+ worktree_path=worktree_path,
1863
+ state=self.state,
1864
+ )
1865
+
1866
+ # Wait for Skill to complete (direct execution)
1867
+ # Poll for artifacts or completion marker
1868
+ import asyncio
1869
+
1870
+ from .cursor_skill_helper import check_skill_completion
1871
+
1872
+ max_wait_time = 3600 # 1 hour max wait
1873
+ poll_interval = 2 # Check every 2 seconds
1874
+ elapsed = 0
1875
+
1876
+ print(f"Waiting for {agent_name}/{action} to complete...")
1877
+ while elapsed < max_wait_time:
1878
+ completion_status = check_skill_completion(
1879
+ worktree_path=worktree_path,
1880
+ expected_artifacts=step.creates,
1881
+ )
1882
+
1883
+ if completion_status["completed"]:
1884
+ from ..core.unicode_safe import safe_print
1885
+ safe_print(f"[OK] {agent_name}/{action} completed - found artifacts: {completion_status['found_artifacts']}")
1886
+ break
1887
+
1888
+ await asyncio.sleep(poll_interval)
1889
+ elapsed += poll_interval
1890
+
1891
+ # Print progress every 10 seconds
1892
+ if elapsed % 10 == 0:
1893
+ print(f" Still waiting... ({elapsed}s elapsed)")
1894
+ else:
1895
+ raise TimeoutError(
1896
+ f"Skill {agent_name}/{action} did not complete within {max_wait_time}s. "
1897
+ f"Expected artifacts: {step.creates}, Missing: {completion_status.get('missing_artifacts', [])}"
1898
+ )
1899
+
1900
+ # Extract artifacts from worktree
1901
+ artifacts = await self.worktree_manager.extract_artifacts(
1902
+ worktree_path=worktree_path,
1903
+ step=step,
1904
+ )
1905
+
1906
+ # Update state with artifacts
1907
+ for artifact in artifacts:
1908
+ self.state.artifacts[artifact.name] = artifact
1909
+
1910
+ # Story-level step handling (Phase 3: Story-Level Granularity)
1911
+ # Verify acceptance criteria BEFORE marking step as completed
1912
+ if step.metadata and step.metadata.get("story_id"):
1913
+ self._handle_story_completion(step, artifacts, step_execution)
1914
+
1915
+ # Update step execution (after story verification)
1916
+ step_execution.completed_at = datetime.now()
1917
+ step_execution.status = "completed"
1918
+ step_execution.result = result
1919
+
1920
+ # Remove the worktree on success (keep on failure for debugging)
1921
+ try:
1922
+ # Determine if we should delete the branch based on configuration
1923
+ from ..core.config import load_config
1924
+ config = load_config()
1925
+ should_delete = (
1926
+ config.workflow.branch_cleanup.delete_branches_on_cleanup
1927
+ if (
1928
+ config.workflow.branch_cleanup
1929
+ and config.workflow.branch_cleanup.enabled
1930
+ )
1931
+ else True # Default to True for backward compatibility
1932
+ )
1933
+ await self.worktree_manager.remove_worktree(
1934
+ worktree_name, delete_branch=should_delete
1935
+ )
1936
+ except Exception:
1937
+ pass
1938
+
1939
+ # Advance to next step
1940
+ self._advance_step()
1941
+
1942
+ except Exception as e:
1943
+ step_execution.completed_at = datetime.now()
1944
+ step_execution.status = "failed"
1945
+ step_execution.error = str(e)
1946
+ raise
1947
+
1948
+ finally:
1949
+ self.save_state()
1950
+
1951
+ def _can_execute_step(
1952
+ self,
1953
+ step: WorkflowStep,
1954
+ completed_steps: dict[str, StepResult]
1955
+ ) -> tuple[bool, str]:
1956
+ """
1957
+ Check if step can execute based on dependencies (BUG-003B fix).
1958
+
1959
+ Validates that all required dependencies have been executed and succeeded.
1960
+ If any dependency is missing or failed, the step cannot execute.
1961
+
1962
+ Args:
1963
+ step: Step to check
1964
+ completed_steps: Results of previously executed steps
1965
+
1966
+ Returns:
1967
+ (can_execute, skip_reason) tuple:
1968
+ - (True, "") if all dependencies met
1969
+ - (False, reason) if dependencies not met
1970
+
1971
+ Example:
1972
+ can_run, reason = self._can_execute_step(step, completed_steps)
1973
+ if not can_run:
1974
+ # Skip step with reason
1975
+ skip_result = StepResult(status="skipped", skip_reason=reason, ...)
1976
+ """
1977
+ for dep in step.requires or []:
1978
+ if dep not in completed_steps:
1979
+ return False, f"Dependency '{dep}' not executed"
1980
+
1981
+ dep_result = completed_steps[dep]
1982
+ if not dep_result.success:
1983
+ return False, f"Dependency '{dep}' failed: {dep_result.error}"
1984
+
1985
+ return True, ""
1986
+
1987
+ def _normalize_action(self, action: str) -> str:
1988
+ """
1989
+ Normalize action name to use underscores (Python convention).
1990
+
1991
+ Converts hyphens to underscores so workflow YAMLs can use either format,
1992
+ but handlers always receive underscore format (e.g., "write_code").
1993
+ """
1994
+ return action.replace("-", "_").lower()
1995
+
1996
+ def _get_step_params(self, step: WorkflowStep, target_path: Path | None) -> dict[str, Any]:
1997
+ """
1998
+ Extract parameters for step execution.
1999
+
2000
+ Args:
2001
+ step: Workflow step
2002
+ target_path: Optional target file path
2003
+
2004
+ Returns:
2005
+ Dictionary of parameters for command building
2006
+ """
2007
+ params: dict[str, Any] = {}
2008
+
2009
+ # Add target file if provided
2010
+ if target_path:
2011
+ try:
2012
+ # Try relative path first (most common case)
2013
+ resolved_target = Path(target_path).resolve()
2014
+ resolved_root = self.project_root.resolve()
2015
+
2016
+ # Use is_relative_to if available (Python 3.9+)
2017
+ try:
2018
+ if resolved_target.is_relative_to(resolved_root):
2019
+ params["target_file"] = str(resolved_target.relative_to(resolved_root))
2020
+ else:
2021
+ # Path is outside project root - use path normalizer
2022
+ from ...core.path_normalizer import normalize_for_cli
2023
+ params["target_file"] = normalize_for_cli(target_path, self.project_root)
2024
+ except AttributeError:
2025
+ # Python < 3.9 - use try/except
2026
+ try:
2027
+ params["target_file"] = str(resolved_target.relative_to(resolved_root))
2028
+ except ValueError:
2029
+ # Path is outside project root - use path normalizer
2030
+ from ...core.path_normalizer import normalize_for_cli
2031
+ params["target_file"] = normalize_for_cli(target_path, self.project_root)
2032
+ except Exception as e:
2033
+ # Fallback: use path normalizer for any error
2034
+ from ...core.path_normalizer import normalize_for_cli
2035
+ if self.logger:
2036
+ self.logger.warning(f"Path conversion error: {e}. Using path normalizer.")
2037
+ params["target_file"] = normalize_for_cli(target_path, self.project_root)
2038
+
2039
+ # Add step metadata
2040
+ if step.metadata:
2041
+ params.update(step.metadata)
2042
+
2043
+ # Add workflow variables
2044
+ if self.state and self.state.variables:
2045
+ # Include relevant variables (avoid exposing everything)
2046
+ if "user_prompt" in self.state.variables:
2047
+ params["user_prompt"] = self.state.variables["user_prompt"]
2048
+ if "target_file" in self.state.variables:
2049
+ params["target_file"] = self.state.variables["target_file"]
2050
+
2051
+ return params
2052
+
2053
+ def _handle_story_completion(
2054
+ self, step: WorkflowStep, artifacts: list[Artifact], step_execution: StepExecution
2055
+ ) -> None:
2056
+ """
2057
+ Handle story-level step completion (Phase 3: Story-Level Granularity).
2058
+
2059
+ Verifies acceptance criteria, logs to progress.txt, and tracks story completion.
2060
+
2061
+ Args:
2062
+ step: Completed workflow step with story metadata
2063
+ artifacts: Artifacts created by the step
2064
+ step_execution: Step execution record to update if criteria fail
2065
+ """
2066
+ if not step.metadata:
2067
+ return
2068
+
2069
+ story_id = step.metadata.get("story_id")
2070
+ story_title = step.metadata.get("story_title")
2071
+ acceptance_criteria = step.metadata.get("acceptance_criteria", [])
2072
+
2073
+ if not story_id:
2074
+ return # Not a story-level step
2075
+
2076
+ # Verify acceptance criteria if provided
2077
+ passes = True
2078
+ verification_result = None
2079
+
2080
+ if acceptance_criteria:
2081
+ from .acceptance_verifier import AcceptanceCriteriaVerifier
2082
+
2083
+ # Convert artifacts list to dict
2084
+ artifacts_dict = {art.name: art for art in artifacts}
2085
+
2086
+ # Get code files from artifacts
2087
+ code_files = []
2088
+ for art in artifacts:
2089
+ if art.path:
2090
+ art_path = Path(art.path)
2091
+ if art_path.exists() and art_path.suffix in [".py", ".js", ".ts", ".tsx", ".jsx", ".java", ".go", ".rs"]:
2092
+ code_files.append(art_path)
2093
+
2094
+ # Verify criteria
2095
+ verifier = AcceptanceCriteriaVerifier()
2096
+ verification_result = verifier.verify(
2097
+ criteria=acceptance_criteria,
2098
+ artifacts=artifacts_dict,
2099
+ code_files=code_files if code_files else None,
2100
+ )
2101
+ passes = verification_result.get("all_passed", True)
2102
+
2103
+ # Store verification result in state variables
2104
+ if "story_verifications" not in self.state.variables:
2105
+ self.state.variables["story_verifications"] = {}
2106
+ self.state.variables["story_verifications"][story_id] = verification_result
2107
+
2108
+ # Track story completion in state.variables
2109
+ if "story_completions" not in self.state.variables:
2110
+ self.state.variables["story_completions"] = {}
2111
+ self.state.variables["story_completions"][story_id] = passes
2112
+
2113
+ # Log to progress.txt if progress logger is available
2114
+ try:
2115
+ from .progress_logger import ProgressLogger
2116
+
2117
+ progress_file = self.project_root / ".tapps-agents" / "progress.txt"
2118
+ progress_logger = ProgressLogger(progress_file)
2119
+
2120
+ # Extract files changed
2121
+ files_changed = [art.path for art in artifacts if art.path]
2122
+
2123
+ # Extract learnings from verification result
2124
+ learnings = []
2125
+ if verification_result and not passes:
2126
+ failed_criteria = [
2127
+ r["criterion"]
2128
+ for r in verification_result.get("results", [])
2129
+ if not r.get("passed", False)
2130
+ ]
2131
+ if failed_criteria:
2132
+ learnings.append(f"Acceptance criteria not met: {', '.join(failed_criteria)}")
2133
+
2134
+ # Log story completion
2135
+ progress_logger.log_story_completion(
2136
+ story_id=story_id,
2137
+ story_title=story_title or step.id,
2138
+ passes=passes,
2139
+ files_changed=files_changed if files_changed else None,
2140
+ learnings=learnings if learnings else None,
2141
+ )
2142
+ except Exception:
2143
+ # Don't fail workflow if progress logging fails
2144
+ import logging
2145
+ logger = logging.getLogger(__name__)
2146
+ logger.warning("Failed to log story completion to progress.txt", exc_info=True)
2147
+
2148
+ # If acceptance criteria not met, mark step as failed and raise exception
2149
+ if not passes:
2150
+ step_execution.status = "failed"
2151
+ step_execution.error = f"Acceptance criteria not met for story {story_id}"
2152
+ # Raise exception to prevent advancing to next step
2153
+ raise ValueError(f"Story {story_id} failed acceptance criteria verification")
2154
+
2155
+ def _advance_step(self) -> None:
2156
+ """Advance to the next workflow step."""
2157
+ if not self.workflow or not self.state:
2158
+ return
2159
+
2160
+ # Use auto-progression if enabled
2161
+ if self.auto_progression.should_auto_progress():
2162
+ current_step = self.get_current_step()
2163
+ if current_step:
2164
+ # Get progression decision
2165
+ step_execution = next(
2166
+ (se for se in self.state.step_executions if se.step_id == current_step.id),
2167
+ None
2168
+ )
2169
+ if step_execution:
2170
+ review_result = None
2171
+ if current_step.agent == "reviewer":
2172
+ review_result = self.state.variables.get("reviewer_result")
2173
+
2174
+ decision = self.auto_progression.handle_step_completion(
2175
+ step=current_step,
2176
+ state=self.state,
2177
+ step_execution=step_execution,
2178
+ review_result=review_result,
2179
+ )
2180
+
2181
+ next_step_id = self.auto_progression.get_next_step_id(
2182
+ step=current_step,
2183
+ decision=decision,
2184
+ workflow_steps=self.workflow.steps,
2185
+ )
2186
+
2187
+ if next_step_id:
2188
+ self.state.current_step = next_step_id
2189
+ else:
2190
+ # Workflow complete
2191
+ self.state.status = "completed"
2192
+ self.state.completed_at = datetime.now()
2193
+ self.state.current_step = None
2194
+ return
2195
+
2196
+ # Fallback to sequential progression
2197
+ current_index = None
2198
+ for i, step in enumerate(self.workflow.steps):
2199
+ if step.id == self.state.current_step:
2200
+ current_index = i
2201
+ break
2202
+
2203
+ if current_index is None:
2204
+ self.state.status = "failed"
2205
+ self.state.error = f"Current step {self.state.current_step} not found"
2206
+ return
2207
+
2208
+ # Move to next step
2209
+ if current_index + 1 < len(self.workflow.steps):
2210
+ self.state.current_step = self.workflow.steps[current_index + 1].id
2211
+ else:
2212
+ # All steps completed
2213
+ self.state.status = "completed"
2214
+ self.state.completed_at = datetime.now()
2215
+ self.state.current_step = None
2216
+
2217
+ def get_progression_status(self) -> dict[str, Any]:
2218
+ """
2219
+ Get current progression status and visibility information.
2220
+
2221
+ Returns:
2222
+ Dictionary with progression status
2223
+ """
2224
+ if not self.workflow or not self.state:
2225
+ return {"status": "not_started"}
2226
+
2227
+ return self.auto_progression.get_progression_status(
2228
+ state=self.state,
2229
+ workflow_steps=self.workflow.steps,
2230
+ )
2231
+
2232
+ def get_progression_history(self, step_id: str | None = None) -> list[dict[str, Any]]:
2233
+ """
2234
+ Get progression history.
2235
+
2236
+ Args:
2237
+ step_id: Optional step ID to filter by
2238
+
2239
+ Returns:
2240
+ List of progression history entries
2241
+ """
2242
+ history = self.auto_progression.get_progression_history(step_id=step_id)
2243
+ return [
2244
+ {
2245
+ "step_id": h.step_id,
2246
+ "timestamp": h.timestamp.isoformat(),
2247
+ "action": h.action.value,
2248
+ "reason": h.reason,
2249
+ "gate_result": h.gate_result,
2250
+ "metadata": h.metadata,
2251
+ }
2252
+ for h in history
2253
+ ]
2254
+
2255
+ def pause_workflow(self) -> None:
2256
+ """
2257
+ Pause workflow execution.
2258
+
2259
+ Epic 10: Progression Control
2260
+ """
2261
+ if not self.state:
2262
+ raise ValueError("Workflow not started")
2263
+
2264
+ if self.state.status == "running":
2265
+ self.state.status = "paused"
2266
+ self.save_state()
2267
+ if self.logger:
2268
+ self.logger.info("Workflow paused by user")
2269
+ self.auto_progression.record_progression(
2270
+ step_id=self.state.current_step or "unknown",
2271
+ action=ProgressionAction.PAUSE,
2272
+ reason="Workflow paused by user",
2273
+ )
2274
+
2275
+ def resume_workflow(self) -> None:
2276
+ """
2277
+ Resume paused workflow execution.
2278
+
2279
+ Epic 10: Progression Control
2280
+ """
2281
+ if not self.state:
2282
+ raise ValueError("Workflow not started")
2283
+
2284
+ if self.state.status == "paused":
2285
+ self.state.status = "running"
2286
+ self.save_state()
2287
+ if self.logger:
2288
+ self.logger.info("Workflow resumed by user")
2289
+ self.auto_progression.record_progression(
2290
+ step_id=self.state.current_step or "unknown",
2291
+ action=ProgressionAction.CONTINUE,
2292
+ reason="Workflow resumed by user",
2293
+ )
2294
+
2295
+ def skip_step(self, step_id: str | None = None) -> None:
2296
+ """
2297
+ Skip a workflow step.
2298
+
2299
+ Args:
2300
+ step_id: Step ID to skip (defaults to current step)
2301
+
2302
+ Epic 10: Progression Control
2303
+ """
2304
+ if not self.state or not self.workflow:
2305
+ raise ValueError("Workflow not started")
2306
+
2307
+ step_id = step_id or self.state.current_step
2308
+ if not step_id:
2309
+ raise ValueError("No step to skip")
2310
+
2311
+ # Find the step
2312
+ step = next((s for s in self.workflow.steps if s.id == step_id), None)
2313
+ if not step:
2314
+ raise ValueError(f"Step {step_id} not found")
2315
+
2316
+ # Record skip in progression history
2317
+ self.auto_progression.record_progression(
2318
+ step_id=step_id,
2319
+ action=ProgressionAction.SKIP,
2320
+ reason="Step skipped by user",
2321
+ )
2322
+
2323
+ # Advance to next step
2324
+ if step.next:
2325
+ self.state.current_step = step.next
2326
+ self.save_state()
2327
+ if self.logger:
2328
+ self.logger.info(f"Step {step_id} skipped, advancing to {step.next}")
2329
+ else:
2330
+ # No next step - workflow complete
2331
+ self.state.status = "completed"
2332
+ self.state.completed_at = datetime.now()
2333
+ self.state.current_step = None
2334
+ self.save_state()
2335
+ if self.logger:
2336
+ self.logger.info(f"Step {step_id} skipped, workflow completed")
2337
+