tapps-agents 3.5.39__py3-none-any.whl → 3.5.41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tapps_agents/__init__.py +2 -2
- tapps_agents/agents/enhancer/agent.py +2728 -2728
- tapps_agents/agents/implementer/agent.py +35 -13
- tapps_agents/agents/reviewer/agent.py +43 -10
- tapps_agents/agents/reviewer/scoring.py +59 -68
- tapps_agents/agents/reviewer/tools/__init__.py +24 -0
- tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -0
- tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -0
- tapps_agents/beads/__init__.py +11 -0
- tapps_agents/beads/hydration.py +213 -0
- tapps_agents/beads/specs.py +206 -0
- tapps_agents/cli/commands/health.py +19 -3
- tapps_agents/cli/commands/simple_mode.py +842 -676
- tapps_agents/cli/commands/task.py +227 -0
- tapps_agents/cli/commands/top_level.py +13 -0
- tapps_agents/cli/main.py +658 -651
- tapps_agents/cli/parsers/top_level.py +1978 -1881
- tapps_agents/core/config.py +1622 -1622
- tapps_agents/core/init_project.py +3012 -2897
- tapps_agents/epic/markdown_sync.py +105 -0
- tapps_agents/epic/orchestrator.py +1 -2
- tapps_agents/epic/parser.py +427 -423
- tapps_agents/experts/adaptive_domain_detector.py +0 -2
- tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +15 -15
- tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +19 -44
- tapps_agents/health/checks/outcomes.backup_20260204_064058.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064256.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064600.py +324 -0
- tapps_agents/health/checks/outcomes.py +134 -46
- tapps_agents/health/orchestrator.py +12 -4
- tapps_agents/hooks/__init__.py +33 -0
- tapps_agents/hooks/config.py +140 -0
- tapps_agents/hooks/events.py +135 -0
- tapps_agents/hooks/executor.py +128 -0
- tapps_agents/hooks/manager.py +143 -0
- tapps_agents/session/__init__.py +19 -0
- tapps_agents/session/manager.py +256 -0
- tapps_agents/simple_mode/code_snippet_handler.py +382 -0
- tapps_agents/simple_mode/intent_parser.py +29 -4
- tapps_agents/simple_mode/orchestrators/base.py +185 -59
- tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2667 -2642
- tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +2 -2
- tapps_agents/simple_mode/workflow_suggester.py +37 -3
- tapps_agents/workflow/agent_handlers/implementer_handler.py +18 -3
- tapps_agents/workflow/cursor_executor.py +2337 -2118
- tapps_agents/workflow/direct_execution_fallback.py +16 -3
- tapps_agents/workflow/message_formatter.py +2 -1
- tapps_agents/workflow/models.py +38 -1
- tapps_agents/workflow/parallel_executor.py +43 -4
- tapps_agents/workflow/parser.py +375 -357
- tapps_agents/workflow/rules_generator.py +337 -337
- tapps_agents/workflow/skill_invoker.py +9 -3
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/METADATA +5 -1
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/RECORD +58 -54
- tapps_agents/agents/analyst/SKILL.md +0 -85
- tapps_agents/agents/architect/SKILL.md +0 -80
- tapps_agents/agents/debugger/SKILL.md +0 -66
- tapps_agents/agents/designer/SKILL.md +0 -78
- tapps_agents/agents/documenter/SKILL.md +0 -95
- tapps_agents/agents/enhancer/SKILL.md +0 -189
- tapps_agents/agents/implementer/SKILL.md +0 -117
- tapps_agents/agents/improver/SKILL.md +0 -55
- tapps_agents/agents/ops/SKILL.md +0 -64
- tapps_agents/agents/orchestrator/SKILL.md +0 -238
- tapps_agents/agents/planner/story_template.md +0 -37
- tapps_agents/agents/reviewer/templates/quality-dashboard.html.j2 +0 -150
- tapps_agents/agents/tester/SKILL.md +0 -71
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/WHEEL +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/entry_points.txt +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/licenses/LICENSE +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/top_level.txt +0 -0
|
@@ -1,2118 +1,2337 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Cursor-Native Workflow Executor.
|
|
3
|
-
|
|
4
|
-
This module provides a Cursor-native execution model that uses Cursor Skills
|
|
5
|
-
and direct execution for LLM operations.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
# @ai-prime-directive: This file implements the Cursor-native workflow executor for Cursor Skills integration.
|
|
9
|
-
# This executor is used when running in Cursor mode (TAPPS_AGENTS_MODE=cursor) and invokes Cursor Skills
|
|
10
|
-
# for LLM operations instead of direct API calls. Do not modify the Skill invocation pattern without
|
|
11
|
-
# updating Cursor Skills integration and tests.
|
|
12
|
-
|
|
13
|
-
# @ai-constraints:
|
|
14
|
-
# - Must only execute in Cursor mode (is_cursor_mode() must return True)
|
|
15
|
-
# - Must use SkillInvoker for all LLM operations - do not make direct API calls
|
|
16
|
-
# - Workflow state must be compatible with WorkflowExecutor for cross-mode compatibility
|
|
17
|
-
# - Performance: Skill invocation should complete in <5s for typical operations
|
|
18
|
-
# - Must maintain backward compatibility with WorkflowExecutor workflow definitions
|
|
19
|
-
|
|
20
|
-
# @note[
|
|
21
|
-
# The framework
|
|
22
|
-
#
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
import
|
|
28
|
-
import
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
from
|
|
32
|
-
from
|
|
33
|
-
from
|
|
34
|
-
from
|
|
35
|
-
|
|
36
|
-
from
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
from .
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
from .
|
|
52
|
-
from .
|
|
53
|
-
from .
|
|
54
|
-
from .
|
|
55
|
-
from .
|
|
56
|
-
from .
|
|
57
|
-
from .
|
|
58
|
-
from .
|
|
59
|
-
from .
|
|
60
|
-
from .
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
self.
|
|
94
|
-
self.
|
|
95
|
-
self.
|
|
96
|
-
self.
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
self.
|
|
102
|
-
self.
|
|
103
|
-
self.
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
self.
|
|
109
|
-
|
|
110
|
-
#
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
frequency = CheckpointFrequency
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
#
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
print(f" - {
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
#
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
#
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
#
|
|
265
|
-
self.
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
state_vars
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
"
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
#
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
self.
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
return
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
"
|
|
443
|
-
"
|
|
444
|
-
"
|
|
445
|
-
"
|
|
446
|
-
"
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
"
|
|
452
|
-
"
|
|
453
|
-
"
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
"
|
|
463
|
-
"
|
|
464
|
-
"
|
|
465
|
-
"
|
|
466
|
-
"
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
#
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
#
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
"
|
|
573
|
-
"
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
)
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
#
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
)
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
)
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
#
|
|
1219
|
-
if
|
|
1220
|
-
self.state.
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
)
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
#
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
)
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
flush=True
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
|
|
1506
|
-
|
|
1507
|
-
|
|
1508
|
-
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
|
|
1671
|
-
|
|
1672
|
-
|
|
1673
|
-
|
|
1674
|
-
|
|
1675
|
-
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
|
|
1681
|
-
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
|
|
1689
|
-
|
|
1690
|
-
|
|
1691
|
-
|
|
1692
|
-
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1738
|
-
|
|
1739
|
-
|
|
1740
|
-
|
|
1741
|
-
|
|
1742
|
-
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
|
|
1747
|
-
|
|
1748
|
-
|
|
1749
|
-
|
|
1750
|
-
|
|
1751
|
-
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1755
|
-
|
|
1756
|
-
|
|
1757
|
-
|
|
1758
|
-
|
|
1759
|
-
|
|
1760
|
-
|
|
1761
|
-
|
|
1762
|
-
|
|
1763
|
-
|
|
1764
|
-
|
|
1765
|
-
|
|
1766
|
-
|
|
1767
|
-
|
|
1768
|
-
|
|
1769
|
-
|
|
1770
|
-
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
|
|
1777
|
-
|
|
1778
|
-
|
|
1779
|
-
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
|
|
1785
|
-
|
|
1786
|
-
|
|
1787
|
-
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
#
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
|
|
1807
|
-
|
|
1808
|
-
|
|
1809
|
-
|
|
1810
|
-
|
|
1811
|
-
|
|
1812
|
-
|
|
1813
|
-
|
|
1814
|
-
|
|
1815
|
-
|
|
1816
|
-
|
|
1817
|
-
|
|
1818
|
-
|
|
1819
|
-
|
|
1820
|
-
|
|
1821
|
-
|
|
1822
|
-
|
|
1823
|
-
|
|
1824
|
-
|
|
1825
|
-
|
|
1826
|
-
|
|
1827
|
-
|
|
1828
|
-
|
|
1829
|
-
|
|
1830
|
-
|
|
1831
|
-
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
|
|
1841
|
-
|
|
1842
|
-
|
|
1843
|
-
|
|
1844
|
-
|
|
1845
|
-
|
|
1846
|
-
|
|
1847
|
-
|
|
1848
|
-
|
|
1849
|
-
|
|
1850
|
-
|
|
1851
|
-
|
|
1852
|
-
|
|
1853
|
-
|
|
1854
|
-
|
|
1855
|
-
|
|
1856
|
-
|
|
1857
|
-
|
|
1858
|
-
|
|
1859
|
-
|
|
1860
|
-
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
#
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
1889
|
-
|
|
1890
|
-
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
|
|
1899
|
-
|
|
1900
|
-
|
|
1901
|
-
|
|
1902
|
-
|
|
1903
|
-
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
|
|
1913
|
-
|
|
1914
|
-
|
|
1915
|
-
#
|
|
1916
|
-
|
|
1917
|
-
|
|
1918
|
-
|
|
1919
|
-
|
|
1920
|
-
|
|
1921
|
-
|
|
1922
|
-
|
|
1923
|
-
|
|
1924
|
-
|
|
1925
|
-
|
|
1926
|
-
|
|
1927
|
-
|
|
1928
|
-
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
|
|
1932
|
-
|
|
1933
|
-
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
|
|
1937
|
-
|
|
1938
|
-
|
|
1939
|
-
|
|
1940
|
-
|
|
1941
|
-
|
|
1942
|
-
|
|
1943
|
-
|
|
1944
|
-
|
|
1945
|
-
|
|
1946
|
-
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
|
|
1962
|
-
|
|
1963
|
-
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
|
|
1981
|
-
|
|
1982
|
-
|
|
1983
|
-
|
|
1984
|
-
|
|
1985
|
-
|
|
1986
|
-
|
|
1987
|
-
|
|
1988
|
-
|
|
1989
|
-
|
|
1990
|
-
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
|
-
|
|
1995
|
-
|
|
1996
|
-
|
|
1997
|
-
|
|
1998
|
-
|
|
1999
|
-
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
2004
|
-
|
|
2005
|
-
|
|
2006
|
-
|
|
2007
|
-
|
|
2008
|
-
|
|
2009
|
-
|
|
2010
|
-
|
|
2011
|
-
|
|
2012
|
-
|
|
2013
|
-
|
|
2014
|
-
|
|
2015
|
-
|
|
2016
|
-
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
|
|
2020
|
-
|
|
2021
|
-
|
|
2022
|
-
|
|
2023
|
-
|
|
2024
|
-
|
|
2025
|
-
|
|
2026
|
-
|
|
2027
|
-
|
|
2028
|
-
|
|
2029
|
-
|
|
2030
|
-
|
|
2031
|
-
|
|
2032
|
-
|
|
2033
|
-
|
|
2034
|
-
|
|
2035
|
-
|
|
2036
|
-
|
|
2037
|
-
|
|
2038
|
-
|
|
2039
|
-
|
|
2040
|
-
|
|
2041
|
-
|
|
2042
|
-
|
|
2043
|
-
|
|
2044
|
-
|
|
2045
|
-
|
|
2046
|
-
self.state.
|
|
2047
|
-
|
|
2048
|
-
if self.
|
|
2049
|
-
self.
|
|
2050
|
-
|
|
2051
|
-
|
|
2052
|
-
|
|
2053
|
-
|
|
2054
|
-
|
|
2055
|
-
|
|
2056
|
-
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
|
|
2065
|
-
|
|
2066
|
-
|
|
2067
|
-
|
|
2068
|
-
|
|
2069
|
-
|
|
2070
|
-
|
|
2071
|
-
|
|
2072
|
-
|
|
2073
|
-
|
|
2074
|
-
|
|
2075
|
-
|
|
2076
|
-
|
|
2077
|
-
|
|
2078
|
-
|
|
2079
|
-
|
|
2080
|
-
|
|
2081
|
-
|
|
2082
|
-
|
|
2083
|
-
|
|
2084
|
-
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
|
|
2095
|
-
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
|
|
2103
|
-
|
|
2104
|
-
|
|
2105
|
-
|
|
2106
|
-
self.state.
|
|
2107
|
-
|
|
2108
|
-
|
|
2109
|
-
|
|
2110
|
-
|
|
2111
|
-
|
|
2112
|
-
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
|
|
2116
|
-
|
|
2117
|
-
|
|
2118
|
-
|
|
1
|
+
"""
|
|
2
|
+
Cursor-Native Workflow Executor.
|
|
3
|
+
|
|
4
|
+
This module provides a Cursor-native execution model that uses Cursor Skills
|
|
5
|
+
and direct execution for LLM operations.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
# @ai-prime-directive: This file implements the Cursor-native workflow executor for Cursor Skills integration.
|
|
9
|
+
# This executor is used when running in Cursor mode (TAPPS_AGENTS_MODE=cursor) and invokes Cursor Skills
|
|
10
|
+
# for LLM operations instead of direct API calls. Do not modify the Skill invocation pattern without
|
|
11
|
+
# updating Cursor Skills integration and tests.
|
|
12
|
+
|
|
13
|
+
# @ai-constraints:
|
|
14
|
+
# - Must only execute in Cursor mode (is_cursor_mode() must return True)
|
|
15
|
+
# - Must use SkillInvoker for all LLM operations - do not make direct API calls
|
|
16
|
+
# - Workflow state must be compatible with WorkflowExecutor for cross-mode compatibility
|
|
17
|
+
# - Performance: Skill invocation should complete in <5s for typical operations
|
|
18
|
+
# - Must maintain backward compatibility with WorkflowExecutor workflow definitions
|
|
19
|
+
|
|
20
|
+
# @note[2026-02-03]: Equal platform support policy per ADR-002.
|
|
21
|
+
# The framework provides equal support for Claude Desktop, Cursor IDE, and Claude Code CLI.
|
|
22
|
+
# Uses handler-first execution (AgentHandlerRegistry) before platform-specific features.
|
|
23
|
+
# See docs/architecture/decisions/ADR-002-equal-platform-support.md
|
|
24
|
+
|
|
25
|
+
from __future__ import annotations
|
|
26
|
+
|
|
27
|
+
import asyncio
|
|
28
|
+
import hashlib
|
|
29
|
+
import os
|
|
30
|
+
import traceback
|
|
31
|
+
from collections.abc import AsyncIterator
|
|
32
|
+
from contextlib import asynccontextmanager
|
|
33
|
+
from dataclasses import asdict
|
|
34
|
+
from datetime import datetime
|
|
35
|
+
from pathlib import Path
|
|
36
|
+
from typing import Any
|
|
37
|
+
|
|
38
|
+
from ..core.project_profile import (
|
|
39
|
+
ProjectProfile,
|
|
40
|
+
ProjectProfileDetector,
|
|
41
|
+
load_project_profile,
|
|
42
|
+
save_project_profile,
|
|
43
|
+
)
|
|
44
|
+
from ..core.runtime_mode import is_cursor_mode
|
|
45
|
+
from .auto_progression import AutoProgressionManager, ProgressionAction
|
|
46
|
+
from .checkpoint_manager import (
|
|
47
|
+
CheckpointConfig,
|
|
48
|
+
CheckpointFrequency,
|
|
49
|
+
WorkflowCheckpointManager,
|
|
50
|
+
)
|
|
51
|
+
from .error_recovery import ErrorContext, ErrorRecoveryManager
|
|
52
|
+
from .event_bus import FileBasedEventBus
|
|
53
|
+
from .events import EventType, WorkflowEvent
|
|
54
|
+
from .logging_helper import WorkflowLogger
|
|
55
|
+
from .marker_writer import MarkerWriter
|
|
56
|
+
from .models import Artifact, StepExecution, StepResult, Workflow, WorkflowState, WorkflowStep
|
|
57
|
+
from .parallel_executor import ParallelStepExecutor
|
|
58
|
+
from .progress_manager import ProgressUpdateManager
|
|
59
|
+
from .skill_invoker import SkillInvoker
|
|
60
|
+
from .state_manager import AdvancedStateManager
|
|
61
|
+
from .state_persistence_config import StatePersistenceConfigManager
|
|
62
|
+
from .worktree_manager import WorktreeManager
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class CursorWorkflowExecutor:
|
|
66
|
+
"""
|
|
67
|
+
Cursor-native workflow executor that uses Skills.
|
|
68
|
+
|
|
69
|
+
This executor is used when running in Cursor mode (TAPPS_AGENTS_MODE=cursor).
|
|
70
|
+
It invokes Cursor Skills for LLM operations.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(
|
|
74
|
+
self,
|
|
75
|
+
project_root: Path | None = None,
|
|
76
|
+
expert_registry: Any | None = None,
|
|
77
|
+
auto_mode: bool = False,
|
|
78
|
+
):
|
|
79
|
+
"""
|
|
80
|
+
Initialize Cursor-native workflow executor.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
project_root: Root directory for the project
|
|
84
|
+
expert_registry: Optional ExpertRegistry instance for expert consultation
|
|
85
|
+
auto_mode: Whether to run in fully automated mode (no prompts)
|
|
86
|
+
"""
|
|
87
|
+
if not is_cursor_mode():
|
|
88
|
+
raise RuntimeError(
|
|
89
|
+
"CursorWorkflowExecutor can only be used in Cursor mode. "
|
|
90
|
+
"Use WorkflowExecutor for headless mode."
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
self.project_root = project_root or Path.cwd()
|
|
94
|
+
self.state: WorkflowState | None = None
|
|
95
|
+
self.workflow: Workflow | None = None
|
|
96
|
+
self.expert_registry = expert_registry
|
|
97
|
+
self.auto_mode = auto_mode
|
|
98
|
+
self.skill_invoker = SkillInvoker(
|
|
99
|
+
project_root=self.project_root, use_api=True
|
|
100
|
+
)
|
|
101
|
+
self.worktree_manager = WorktreeManager(project_root=self.project_root)
|
|
102
|
+
self.project_profile: ProjectProfile | None = None
|
|
103
|
+
self.parallel_executor = ParallelStepExecutor(max_parallel=8, default_timeout_seconds=3600.0)
|
|
104
|
+
self.logger: WorkflowLogger | None = None # Initialized in start() with workflow_id
|
|
105
|
+
self.progress_manager: ProgressUpdateManager | None = None # Initialized in start() with workflow
|
|
106
|
+
|
|
107
|
+
# Issue fix: Support for continue-from and skip-steps flags
|
|
108
|
+
self.continue_from: str | None = None
|
|
109
|
+
self.skip_steps: list[str] = []
|
|
110
|
+
self.print_paths: bool = True # Issue fix: Print artifact paths after each step
|
|
111
|
+
|
|
112
|
+
# Initialize event bus for event-driven communication (Phase 2)
|
|
113
|
+
self.event_bus = FileBasedEventBus(project_root=self.project_root)
|
|
114
|
+
|
|
115
|
+
# Initialize auto-progression manager (Epic 10)
|
|
116
|
+
auto_progression_enabled = os.getenv("TAPPS_AGENTS_AUTO_PROGRESSION", "true").lower() == "true"
|
|
117
|
+
self.auto_progression = AutoProgressionManager(
|
|
118
|
+
auto_progression_enabled=auto_progression_enabled,
|
|
119
|
+
auto_retry_enabled=True,
|
|
120
|
+
max_retries=3,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Initialize error recovery manager (Epic 14)
|
|
124
|
+
error_recovery_enabled = os.getenv("TAPPS_AGENTS_ERROR_RECOVERY", "true").lower() == "true"
|
|
125
|
+
self.error_recovery = ErrorRecoveryManager(
|
|
126
|
+
enable_auto_retry=error_recovery_enabled,
|
|
127
|
+
max_retries=3,
|
|
128
|
+
) if error_recovery_enabled else None
|
|
129
|
+
|
|
130
|
+
# Initialize state persistence configuration manager (Epic 12 - Story 12.6)
|
|
131
|
+
self.state_config_manager = StatePersistenceConfigManager(project_root=self.project_root)
|
|
132
|
+
|
|
133
|
+
# Initialize checkpoint manager (Epic 12)
|
|
134
|
+
# Use configuration from state persistence config if available
|
|
135
|
+
state_config = self.state_config_manager.config
|
|
136
|
+
if state_config and state_config.checkpoint:
|
|
137
|
+
checkpoint_frequency = state_config.checkpoint.mode
|
|
138
|
+
checkpoint_interval = state_config.checkpoint.interval
|
|
139
|
+
checkpoint_enabled = state_config.checkpoint.enabled
|
|
140
|
+
else:
|
|
141
|
+
# Fall back to environment variables
|
|
142
|
+
checkpoint_frequency = os.getenv("TAPPS_AGENTS_CHECKPOINT_FREQUENCY", "every_step")
|
|
143
|
+
checkpoint_interval = int(os.getenv("TAPPS_AGENTS_CHECKPOINT_INTERVAL", "1"))
|
|
144
|
+
checkpoint_enabled = os.getenv("TAPPS_AGENTS_CHECKPOINT_ENABLED", "true").lower() == "true"
|
|
145
|
+
|
|
146
|
+
try:
|
|
147
|
+
frequency = CheckpointFrequency(checkpoint_frequency)
|
|
148
|
+
except ValueError:
|
|
149
|
+
frequency = CheckpointFrequency.EVERY_STEP
|
|
150
|
+
|
|
151
|
+
checkpoint_config = CheckpointConfig(
|
|
152
|
+
frequency=frequency,
|
|
153
|
+
interval=checkpoint_interval,
|
|
154
|
+
enabled=checkpoint_enabled,
|
|
155
|
+
)
|
|
156
|
+
self.checkpoint_manager = WorkflowCheckpointManager(config=checkpoint_config)
|
|
157
|
+
|
|
158
|
+
# Initialize state manager
|
|
159
|
+
# Use storage location from config
|
|
160
|
+
if state_config and state_config.enabled:
|
|
161
|
+
state_dir = self.state_config_manager.get_storage_path()
|
|
162
|
+
compression = state_config.compression
|
|
163
|
+
else:
|
|
164
|
+
state_dir = self._state_dir()
|
|
165
|
+
compression = False
|
|
166
|
+
self.state_manager = AdvancedStateManager(state_dir, compression=compression)
|
|
167
|
+
|
|
168
|
+
# Always use direct execution via Skills (Background Agents removed)
|
|
169
|
+
|
|
170
|
+
# Initialize marker writer for durable step completion tracking
|
|
171
|
+
self.marker_writer = MarkerWriter(project_root=self.project_root)
|
|
172
|
+
|
|
173
|
+
def _state_dir(self) -> Path:
|
|
174
|
+
"""Get state directory path."""
|
|
175
|
+
return self.project_root / ".tapps-agents" / "workflow-state"
|
|
176
|
+
|
|
177
|
+
def _print_step_artifacts(
|
|
178
|
+
self,
|
|
179
|
+
step: Any,
|
|
180
|
+
artifacts: dict[str, Any],
|
|
181
|
+
step_execution: Any,
|
|
182
|
+
) -> None:
|
|
183
|
+
"""
|
|
184
|
+
Print artifact paths after step completion (Issue fix: Hidden workflow state).
|
|
185
|
+
|
|
186
|
+
Provides clear visibility into where workflow outputs are saved.
|
|
187
|
+
"""
|
|
188
|
+
from ..core.unicode_safe import safe_print
|
|
189
|
+
|
|
190
|
+
duration = step_execution.duration_seconds if step_execution else 0
|
|
191
|
+
duration_str = f"{duration:.1f}s" if duration else "N/A"
|
|
192
|
+
|
|
193
|
+
safe_print(f"\n[OK] Step '{step.id}' completed ({duration_str})")
|
|
194
|
+
|
|
195
|
+
if artifacts:
|
|
196
|
+
print(" 📄 Artifacts created:")
|
|
197
|
+
for art_name, art_data in artifacts.items():
|
|
198
|
+
if isinstance(art_data, dict):
|
|
199
|
+
path = art_data.get("path", "")
|
|
200
|
+
if path:
|
|
201
|
+
print(f" - {path}")
|
|
202
|
+
else:
|
|
203
|
+
print(f" - {art_name} (in-memory)")
|
|
204
|
+
else:
|
|
205
|
+
print(f" - {art_name}")
|
|
206
|
+
|
|
207
|
+
# Also print workflow state location for reference
|
|
208
|
+
if self.state:
|
|
209
|
+
state_dir = self._state_dir()
|
|
210
|
+
print(f" 📁 State: {state_dir / self.state.workflow_id}")
|
|
211
|
+
|
|
212
|
+
def _profile_project(self) -> None:
|
|
213
|
+
"""
|
|
214
|
+
Perform project profiling before workflow execution.
|
|
215
|
+
|
|
216
|
+
Loads existing profile if available, otherwise detects and saves a new one.
|
|
217
|
+
The profile is stored in workflow state and passed to all Skills via context.
|
|
218
|
+
"""
|
|
219
|
+
# Try to load existing profile first
|
|
220
|
+
self.project_profile = load_project_profile(project_root=self.project_root)
|
|
221
|
+
|
|
222
|
+
# If no profile exists, detect and save it
|
|
223
|
+
if not self.project_profile:
|
|
224
|
+
detector = ProjectProfileDetector(project_root=self.project_root)
|
|
225
|
+
self.project_profile = detector.detect_profile()
|
|
226
|
+
save_project_profile(profile=self.project_profile, project_root=self.project_root)
|
|
227
|
+
|
|
228
|
+
async def start(
|
|
229
|
+
self,
|
|
230
|
+
workflow: Workflow,
|
|
231
|
+
user_prompt: str | None = None,
|
|
232
|
+
) -> WorkflowState:
|
|
233
|
+
"""
|
|
234
|
+
Start a new workflow execution.
|
|
235
|
+
|
|
236
|
+
Also executes state cleanup if configured for "on_startup" schedule.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
workflow: Workflow to execute
|
|
240
|
+
user_prompt: Optional user prompt for the workflow
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
Initial workflow state
|
|
244
|
+
"""
|
|
245
|
+
# Execute cleanup on startup if configured (Epic 12 - Story 12.6)
|
|
246
|
+
if self.state_config_manager.config and self.state_config_manager.config.cleanup:
|
|
247
|
+
if self.state_config_manager.config.cleanup.cleanup_schedule == "on_startup":
|
|
248
|
+
cleanup_result = self.state_config_manager.execute_cleanup()
|
|
249
|
+
if self.logger:
|
|
250
|
+
self.logger.info(
|
|
251
|
+
f"State cleanup on startup: {cleanup_result}",
|
|
252
|
+
cleanup_result=cleanup_result,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
self.workflow = workflow
|
|
256
|
+
|
|
257
|
+
# Check workflow metadata for auto-execution override (per-workflow config)
|
|
258
|
+
# Always use direct execution via Skills (Background Agents removed)
|
|
259
|
+
|
|
260
|
+
# Use consistent workflow_id format: {workflow.id}-{timestamp}
|
|
261
|
+
# Include microseconds to ensure uniqueness for parallel workflows (BUG-001 fix)
|
|
262
|
+
workflow_id = f"{workflow.id}-{datetime.now().strftime('%Y%m%d-%H%M%S-%f')}"
|
|
263
|
+
|
|
264
|
+
# Initialize logger with workflow_id for correlation
|
|
265
|
+
self.logger = WorkflowLogger(workflow_id=workflow_id)
|
|
266
|
+
|
|
267
|
+
# Perform project profiling before workflow execution
|
|
268
|
+
self._profile_project()
|
|
269
|
+
|
|
270
|
+
self.state = WorkflowState(
|
|
271
|
+
workflow_id=workflow_id,
|
|
272
|
+
started_at=datetime.now(),
|
|
273
|
+
current_step=workflow.steps[0].id if workflow.steps else None,
|
|
274
|
+
status="running",
|
|
275
|
+
variables={
|
|
276
|
+
"user_prompt": user_prompt or "",
|
|
277
|
+
"project_profile": self.project_profile.to_dict() if self.project_profile else None,
|
|
278
|
+
"workflow_name": workflow.name, # Store in variables for reference
|
|
279
|
+
},
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
# Beads: create workflow issue when enabled (store for close in run finally)
|
|
283
|
+
try:
|
|
284
|
+
from ..core.config import load_config
|
|
285
|
+
from ..beads import require_beads
|
|
286
|
+
from ..simple_mode.beads_hooks import create_workflow_issue
|
|
287
|
+
|
|
288
|
+
config = load_config(self.project_root / ".tapps-agents" / "config.yaml")
|
|
289
|
+
require_beads(config, self.project_root)
|
|
290
|
+
state_vars = self.state.variables or {}
|
|
291
|
+
# On resume: reuse id from .beads_issue_id file (same layout as *build)
|
|
292
|
+
state_dir = self._state_dir()
|
|
293
|
+
wf_dir = state_dir / workflow_id
|
|
294
|
+
beads_file = wf_dir / ".beads_issue_id"
|
|
295
|
+
if beads_file.exists():
|
|
296
|
+
try:
|
|
297
|
+
bid = beads_file.read_text(encoding="utf-8").strip() or None
|
|
298
|
+
if bid:
|
|
299
|
+
state_vars["_beads_issue_id"] = bid
|
|
300
|
+
self.state.variables = state_vars
|
|
301
|
+
except OSError:
|
|
302
|
+
pass
|
|
303
|
+
if "_beads_issue_id" not in state_vars:
|
|
304
|
+
bid = create_workflow_issue(
|
|
305
|
+
self.project_root,
|
|
306
|
+
config,
|
|
307
|
+
workflow.name,
|
|
308
|
+
user_prompt or state_vars.get("target_file", "") or "",
|
|
309
|
+
)
|
|
310
|
+
if bid:
|
|
311
|
+
state_vars["_beads_issue_id"] = bid
|
|
312
|
+
self.state.variables = state_vars
|
|
313
|
+
try:
|
|
314
|
+
wf_dir.mkdir(parents=True, exist_ok=True)
|
|
315
|
+
beads_file.write_text(bid, encoding="utf-8")
|
|
316
|
+
except OSError:
|
|
317
|
+
pass
|
|
318
|
+
except Exception as e:
|
|
319
|
+
from ..beads import BeadsRequiredError
|
|
320
|
+
|
|
321
|
+
if isinstance(e, BeadsRequiredError):
|
|
322
|
+
raise
|
|
323
|
+
pass # log-and-continue: do not fail start for other beads errors
|
|
324
|
+
|
|
325
|
+
# Generate and save execution plan (Epic 6 - Story 6.7)
|
|
326
|
+
try:
|
|
327
|
+
from .execution_plan import generate_execution_plan, save_execution_plan
|
|
328
|
+
execution_plan = generate_execution_plan(workflow)
|
|
329
|
+
state_dir = self._state_dir()
|
|
330
|
+
plan_path = save_execution_plan(execution_plan, state_dir, workflow_id)
|
|
331
|
+
if self.logger:
|
|
332
|
+
self.logger.info(
|
|
333
|
+
f"Execution plan generated: {plan_path}",
|
|
334
|
+
execution_plan_path=str(plan_path),
|
|
335
|
+
)
|
|
336
|
+
except Exception as e:
|
|
337
|
+
# Don't fail workflow start if execution plan generation fails
|
|
338
|
+
if self.logger:
|
|
339
|
+
self.logger.warning(f"Failed to generate execution plan: {e}")
|
|
340
|
+
|
|
341
|
+
self.logger.info(
|
|
342
|
+
"Workflow started",
|
|
343
|
+
workflow_name=workflow.name,
|
|
344
|
+
workflow_version=workflow.version,
|
|
345
|
+
step_count=len(workflow.steps),
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
# Publish workflow started event (Phase 2)
|
|
349
|
+
await self.event_bus.publish(
|
|
350
|
+
WorkflowEvent(
|
|
351
|
+
event_type=EventType.WORKFLOW_STARTED,
|
|
352
|
+
workflow_id=workflow_id,
|
|
353
|
+
step_id=None,
|
|
354
|
+
data={
|
|
355
|
+
"workflow_name": workflow.name,
|
|
356
|
+
"workflow_version": workflow.version,
|
|
357
|
+
"step_count": len(workflow.steps),
|
|
358
|
+
"user_prompt": user_prompt or "",
|
|
359
|
+
},
|
|
360
|
+
timestamp=datetime.now(),
|
|
361
|
+
correlation_id=workflow_id,
|
|
362
|
+
)
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
# Initialize progress update manager
|
|
366
|
+
self.progress_manager = ProgressUpdateManager(
|
|
367
|
+
workflow=workflow,
|
|
368
|
+
state=self.state,
|
|
369
|
+
project_root=self.project_root,
|
|
370
|
+
enable_updates=True,
|
|
371
|
+
)
|
|
372
|
+
# Connect event bus to status monitor (Phase 2)
|
|
373
|
+
if self.progress_manager.status_monitor:
|
|
374
|
+
self.progress_manager.status_monitor.event_bus = self.event_bus
|
|
375
|
+
# Start progress monitoring (non-blocking)
|
|
376
|
+
import asyncio
|
|
377
|
+
try:
|
|
378
|
+
asyncio.get_running_loop()
|
|
379
|
+
asyncio.create_task(self.progress_manager.start())
|
|
380
|
+
except RuntimeError:
|
|
381
|
+
# No running event loop - progress manager will start when event loop is available
|
|
382
|
+
pass
|
|
383
|
+
|
|
384
|
+
self.save_state()
|
|
385
|
+
|
|
386
|
+
# Generate task manifest (Epic 7)
|
|
387
|
+
self._generate_manifest()
|
|
388
|
+
|
|
389
|
+
return self.state
|
|
390
|
+
|
|
391
|
+
def save_state(self) -> None:
|
|
392
|
+
"""Save workflow state to disk."""
|
|
393
|
+
if not self.state:
|
|
394
|
+
return
|
|
395
|
+
|
|
396
|
+
def _make_json_serializable(obj: Any) -> Any:
|
|
397
|
+
"""Recursively convert objects to JSON-serializable format."""
|
|
398
|
+
# Handle ProjectProfile objects
|
|
399
|
+
if hasattr(obj, "to_dict") and hasattr(obj, "compliance_requirements"):
|
|
400
|
+
try:
|
|
401
|
+
from ..core.project_profile import ProjectProfile
|
|
402
|
+
if isinstance(obj, ProjectProfile):
|
|
403
|
+
return obj.to_dict()
|
|
404
|
+
except (ImportError, AttributeError):
|
|
405
|
+
pass
|
|
406
|
+
|
|
407
|
+
# Handle ComplianceRequirement objects
|
|
408
|
+
if hasattr(obj, "name") and hasattr(obj, "confidence") and hasattr(obj, "indicators"):
|
|
409
|
+
try:
|
|
410
|
+
from ..core.project_profile import ComplianceRequirement
|
|
411
|
+
if isinstance(obj, ComplianceRequirement):
|
|
412
|
+
return asdict(obj)
|
|
413
|
+
except (ImportError, AttributeError):
|
|
414
|
+
pass
|
|
415
|
+
|
|
416
|
+
# Handle dictionaries recursively
|
|
417
|
+
if isinstance(obj, dict):
|
|
418
|
+
return {k: _make_json_serializable(v) for k, v in obj.items()}
|
|
419
|
+
|
|
420
|
+
# Handle lists recursively
|
|
421
|
+
if isinstance(obj, list):
|
|
422
|
+
return [_make_json_serializable(item) for item in obj]
|
|
423
|
+
|
|
424
|
+
# Handle other non-serializable types
|
|
425
|
+
try:
|
|
426
|
+
import json
|
|
427
|
+
json.dumps(obj)
|
|
428
|
+
return obj
|
|
429
|
+
except (TypeError, ValueError):
|
|
430
|
+
# For non-serializable types, convert to string as fallback
|
|
431
|
+
return str(obj)
|
|
432
|
+
|
|
433
|
+
state_file = self._state_dir() / f"{self.state.workflow_id}.json"
|
|
434
|
+
state_file.parent.mkdir(parents=True, exist_ok=True)
|
|
435
|
+
|
|
436
|
+
# Convert variables to JSON-serializable format
|
|
437
|
+
variables = self.state.variables or {}
|
|
438
|
+
serializable_variables = _make_json_serializable(variables)
|
|
439
|
+
|
|
440
|
+
# Convert to dict for JSON serialization
|
|
441
|
+
state_dict = {
|
|
442
|
+
"workflow_id": self.state.workflow_id,
|
|
443
|
+
"status": self.state.status,
|
|
444
|
+
"current_step": self.state.current_step,
|
|
445
|
+
"started_at": self.state.started_at.isoformat() if self.state.started_at else None,
|
|
446
|
+
"completed_steps": self.state.completed_steps,
|
|
447
|
+
"skipped_steps": self.state.skipped_steps,
|
|
448
|
+
"variables": serializable_variables,
|
|
449
|
+
"artifacts": {
|
|
450
|
+
name: {
|
|
451
|
+
"name": a.name,
|
|
452
|
+
"path": a.path,
|
|
453
|
+
"status": a.status,
|
|
454
|
+
"created_by": a.created_by,
|
|
455
|
+
"created_at": a.created_at.isoformat() if a.created_at else None,
|
|
456
|
+
"metadata": a.metadata,
|
|
457
|
+
}
|
|
458
|
+
for name, a in self.state.artifacts.items()
|
|
459
|
+
},
|
|
460
|
+
"step_executions": [
|
|
461
|
+
{
|
|
462
|
+
"step_id": se.step_id,
|
|
463
|
+
"agent": se.agent,
|
|
464
|
+
"action": se.action,
|
|
465
|
+
"started_at": se.started_at.isoformat() if se.started_at else None,
|
|
466
|
+
"completed_at": se.completed_at.isoformat() if se.completed_at else None,
|
|
467
|
+
"duration_seconds": se.duration_seconds,
|
|
468
|
+
"status": se.status,
|
|
469
|
+
"error": se.error,
|
|
470
|
+
}
|
|
471
|
+
for se in self.state.step_executions
|
|
472
|
+
],
|
|
473
|
+
"error": self.state.error,
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
from .file_utils import atomic_write_json
|
|
477
|
+
|
|
478
|
+
atomic_write_json(state_file, state_dict, indent=2)
|
|
479
|
+
|
|
480
|
+
# Also save to history
|
|
481
|
+
history_dir = state_file.parent / "history"
|
|
482
|
+
history_dir.mkdir(exist_ok=True)
|
|
483
|
+
history_file = history_dir / state_file.name
|
|
484
|
+
atomic_write_json(history_file, state_dict, indent=2)
|
|
485
|
+
|
|
486
|
+
# Generate task manifest (Epic 7)
|
|
487
|
+
self._generate_manifest()
|
|
488
|
+
|
|
489
|
+
def _generate_manifest(self) -> None:
|
|
490
|
+
"""
|
|
491
|
+
Generate and save task manifest (Epic 7).
|
|
492
|
+
|
|
493
|
+
Generates manifest on workflow start, step completion, and state save.
|
|
494
|
+
"""
|
|
495
|
+
if not self.workflow or not self.state:
|
|
496
|
+
return
|
|
497
|
+
|
|
498
|
+
try:
|
|
499
|
+
from .manifest import (
|
|
500
|
+
generate_manifest,
|
|
501
|
+
save_manifest,
|
|
502
|
+
sync_manifest_to_project_root,
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
# Generate manifest
|
|
506
|
+
manifest_content = generate_manifest(self.workflow, self.state)
|
|
507
|
+
|
|
508
|
+
# Save to state directory
|
|
509
|
+
state_dir = self._state_dir()
|
|
510
|
+
manifest_path = save_manifest(manifest_content, state_dir, self.state.workflow_id)
|
|
511
|
+
|
|
512
|
+
# Optional: Sync to project root if configured
|
|
513
|
+
sync_enabled = os.getenv("TAPPS_AGENTS_MANIFEST_SYNC", "false").lower() == "true"
|
|
514
|
+
if sync_enabled:
|
|
515
|
+
sync_path = sync_manifest_to_project_root(manifest_content, self.project_root)
|
|
516
|
+
if self.logger:
|
|
517
|
+
self.logger.debug(
|
|
518
|
+
"Task manifest synced to project root",
|
|
519
|
+
manifest_path=str(manifest_path),
|
|
520
|
+
sync_path=str(sync_path),
|
|
521
|
+
)
|
|
522
|
+
elif self.logger:
|
|
523
|
+
self.logger.debug(
|
|
524
|
+
"Task manifest generated",
|
|
525
|
+
manifest_path=str(manifest_path),
|
|
526
|
+
)
|
|
527
|
+
except Exception as e:
|
|
528
|
+
# Don't fail workflow if manifest generation fails
|
|
529
|
+
if self.logger:
|
|
530
|
+
self.logger.warning(
|
|
531
|
+
"Failed to generate task manifest",
|
|
532
|
+
error=str(e),
|
|
533
|
+
)
|
|
534
|
+
|
|
535
|
+
async def run(
|
|
536
|
+
self,
|
|
537
|
+
workflow: Workflow | None = None,
|
|
538
|
+
target_file: str | None = None,
|
|
539
|
+
max_steps: int = 100,
|
|
540
|
+
) -> WorkflowState:
|
|
541
|
+
"""
|
|
542
|
+
Run workflow to completion with timeout protection.
|
|
543
|
+
|
|
544
|
+
Args:
|
|
545
|
+
workflow: Workflow to execute (if not already loaded)
|
|
546
|
+
target_file: Optional target file path
|
|
547
|
+
max_steps: Maximum number of steps to execute
|
|
548
|
+
|
|
549
|
+
Returns:
|
|
550
|
+
Final workflow state
|
|
551
|
+
"""
|
|
552
|
+
import asyncio
|
|
553
|
+
from datetime import datetime
|
|
554
|
+
|
|
555
|
+
from tapps_agents.core.config import load_config
|
|
556
|
+
|
|
557
|
+
config = load_config()
|
|
558
|
+
# Use 2x step timeout for overall workflow timeout (default: 2 hours)
|
|
559
|
+
workflow_timeout = getattr(config.workflow, 'timeout_seconds', 3600.0) * 2
|
|
560
|
+
|
|
561
|
+
async def _run_workflow_inner() -> WorkflowState:
|
|
562
|
+
"""Inner function to wrap actual execution for timeout protection."""
|
|
563
|
+
# Initialize execution
|
|
564
|
+
target_path = await self._initialize_run(workflow, target_file)
|
|
565
|
+
|
|
566
|
+
# Log workflow start
|
|
567
|
+
start_time = datetime.now()
|
|
568
|
+
if self.logger:
|
|
569
|
+
self.logger.info(
|
|
570
|
+
"Starting workflow execution",
|
|
571
|
+
extra={
|
|
572
|
+
"workflow_id": self.state.workflow_id if self.state else None,
|
|
573
|
+
"workflow_name": workflow.name if workflow else (self.workflow.name if self.workflow else None),
|
|
574
|
+
"max_steps": max_steps,
|
|
575
|
+
"total_steps": len(workflow.steps) if workflow else (len(self.workflow.steps) if self.workflow else 0),
|
|
576
|
+
"workflow_timeout": workflow_timeout,
|
|
577
|
+
}
|
|
578
|
+
)
|
|
579
|
+
|
|
580
|
+
# Use parallel execution for independent steps
|
|
581
|
+
steps_executed = 0
|
|
582
|
+
completed_step_ids = set(self.state.completed_steps)
|
|
583
|
+
running_step_ids: set[str] = set()
|
|
584
|
+
|
|
585
|
+
while (
|
|
586
|
+
self.state
|
|
587
|
+
and self.workflow
|
|
588
|
+
and self.state.status == "running"
|
|
589
|
+
):
|
|
590
|
+
if steps_executed >= max_steps:
|
|
591
|
+
self._handle_max_steps_exceeded(max_steps)
|
|
592
|
+
break
|
|
593
|
+
|
|
594
|
+
# Find steps ready to execute (dependencies met)
|
|
595
|
+
ready_steps = self._find_ready_steps(
|
|
596
|
+
completed_step_ids, running_step_ids
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
if not ready_steps:
|
|
600
|
+
if self._handle_no_ready_steps(completed_step_ids):
|
|
601
|
+
break
|
|
602
|
+
continue
|
|
603
|
+
|
|
604
|
+
# Execute ready steps in parallel
|
|
605
|
+
running_step_ids.update(step.id for step in ready_steps)
|
|
606
|
+
|
|
607
|
+
# Store completed steps with their results for dependency validation (BUG-003B)
|
|
608
|
+
completed_step_results: dict[str, StepResult] = {}
|
|
609
|
+
|
|
610
|
+
async def execute_step_wrapper(step: WorkflowStep) -> dict[str, Any]:
|
|
611
|
+
"""Wrapper to adapt _execute_step_for_parallel to parallel executor interface (BUG-003B fix)."""
|
|
612
|
+
# Validate dependencies before execution (BUG-003B)
|
|
613
|
+
can_execute, skip_reason = self._can_execute_step(step, completed_step_results)
|
|
614
|
+
|
|
615
|
+
if not can_execute:
|
|
616
|
+
# Create skipped StepResult
|
|
617
|
+
now = datetime.now()
|
|
618
|
+
skipped_result = StepResult(
|
|
619
|
+
step_id=step.id,
|
|
620
|
+
status="skipped",
|
|
621
|
+
success=False,
|
|
622
|
+
duration=0.0,
|
|
623
|
+
started_at=now,
|
|
624
|
+
completed_at=now,
|
|
625
|
+
skip_reason=skip_reason,
|
|
626
|
+
artifacts=[],
|
|
627
|
+
)
|
|
628
|
+
completed_step_results[step.id] = skipped_result
|
|
629
|
+
|
|
630
|
+
# Print skip message
|
|
631
|
+
from ..core.unicode_safe import safe_print
|
|
632
|
+
safe_print(f"\n⏭️ Skipping step '{step.id}': {skip_reason}\n")
|
|
633
|
+
|
|
634
|
+
# Return empty artifacts (step was skipped)
|
|
635
|
+
return {}
|
|
636
|
+
|
|
637
|
+
# Execute step
|
|
638
|
+
step_result = await self._execute_step_for_parallel(step=step, target_path=target_path)
|
|
639
|
+
completed_step_results[step.id] = step_result
|
|
640
|
+
|
|
641
|
+
# Check if step failed (BUG-003B)
|
|
642
|
+
if not step_result.success:
|
|
643
|
+
# Check if step is required
|
|
644
|
+
is_required = step.condition == "required"
|
|
645
|
+
|
|
646
|
+
if is_required:
|
|
647
|
+
# Halt workflow for required step failure
|
|
648
|
+
from ..core.unicode_safe import safe_print
|
|
649
|
+
safe_print(
|
|
650
|
+
f"\n❌ Workflow halted: Required step '{step.id}' failed\n"
|
|
651
|
+
f"Error: {step_result.error}\n"
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
# Update workflow status
|
|
655
|
+
if self.state:
|
|
656
|
+
self.state.status = "blocked"
|
|
657
|
+
self.state.error = step_result.error
|
|
658
|
+
|
|
659
|
+
# Raise error to stop execution
|
|
660
|
+
raise RuntimeError(step_result.error or "Step failed")
|
|
661
|
+
|
|
662
|
+
# Convert StepResult artifacts (list of names) back to dict format for compatibility
|
|
663
|
+
artifacts_dict: dict[str, dict[str, Any]] = {}
|
|
664
|
+
for artifact_name in step_result.artifacts:
|
|
665
|
+
artifacts_dict[artifact_name] = {
|
|
666
|
+
"name": artifact_name,
|
|
667
|
+
"path": artifact_name,
|
|
668
|
+
"status": "complete",
|
|
669
|
+
"created_by": step.id,
|
|
670
|
+
"created_at": step_result.completed_at.isoformat(),
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
return artifacts_dict
|
|
674
|
+
|
|
675
|
+
try:
|
|
676
|
+
results = await self.parallel_executor.execute_parallel(
|
|
677
|
+
steps=ready_steps,
|
|
678
|
+
execute_fn=execute_step_wrapper,
|
|
679
|
+
state=self.state,
|
|
680
|
+
)
|
|
681
|
+
|
|
682
|
+
# Process results and update state
|
|
683
|
+
should_break = await self._process_parallel_results(
|
|
684
|
+
results, completed_step_ids, running_step_ids
|
|
685
|
+
)
|
|
686
|
+
if should_break:
|
|
687
|
+
break
|
|
688
|
+
|
|
689
|
+
steps_executed += len(ready_steps)
|
|
690
|
+
self.save_state()
|
|
691
|
+
|
|
692
|
+
# Generate task manifest after step completion (Epic 7)
|
|
693
|
+
self._generate_manifest()
|
|
694
|
+
|
|
695
|
+
# Log progress every 10 steps
|
|
696
|
+
if steps_executed % 10 == 0 and self.logger:
|
|
697
|
+
elapsed = (datetime.now() - start_time).total_seconds()
|
|
698
|
+
self.logger.info(
|
|
699
|
+
f"Workflow progress: {steps_executed} steps executed in {elapsed:.1f}s",
|
|
700
|
+
extra={
|
|
701
|
+
"steps_executed": steps_executed,
|
|
702
|
+
"completed_steps": len(completed_step_ids),
|
|
703
|
+
"total_steps": len(self.workflow.steps),
|
|
704
|
+
"elapsed_seconds": elapsed,
|
|
705
|
+
}
|
|
706
|
+
)
|
|
707
|
+
|
|
708
|
+
except Exception as e:
|
|
709
|
+
self._handle_execution_error(e)
|
|
710
|
+
break
|
|
711
|
+
|
|
712
|
+
return await self._finalize_run(completed_step_ids)
|
|
713
|
+
|
|
714
|
+
# Wrap execution with timeout
|
|
715
|
+
try:
|
|
716
|
+
return await asyncio.wait_for(
|
|
717
|
+
_run_workflow_inner(),
|
|
718
|
+
timeout=workflow_timeout
|
|
719
|
+
)
|
|
720
|
+
except TimeoutError:
|
|
721
|
+
if self.state:
|
|
722
|
+
self.state.status = "failed"
|
|
723
|
+
self.state.error = f"Workflow timeout after {workflow_timeout}s"
|
|
724
|
+
self.save_state()
|
|
725
|
+
if self.logger:
|
|
726
|
+
self.logger.error(
|
|
727
|
+
f"Workflow execution exceeded {workflow_timeout}s timeout",
|
|
728
|
+
extra={
|
|
729
|
+
"workflow_id": self.state.workflow_id,
|
|
730
|
+
"timeout_seconds": workflow_timeout,
|
|
731
|
+
}
|
|
732
|
+
)
|
|
733
|
+
raise TimeoutError(
|
|
734
|
+
f"Workflow execution exceeded {workflow_timeout}s timeout. "
|
|
735
|
+
f"Increase timeout in config (workflow.timeout_seconds) or check for blocking operations."
|
|
736
|
+
) from None
|
|
737
|
+
finally:
|
|
738
|
+
variables = (getattr(self.state, "variables", None) or {}) if self.state else {}
|
|
739
|
+
beads_issue_id = variables.get("_beads_issue_id")
|
|
740
|
+
if beads_issue_id is None and self.state:
|
|
741
|
+
wf_id = getattr(self.state, "workflow_id", None)
|
|
742
|
+
if wf_id:
|
|
743
|
+
beads_file = self._state_dir() / wf_id / ".beads_issue_id"
|
|
744
|
+
if beads_file.exists():
|
|
745
|
+
try:
|
|
746
|
+
beads_issue_id = beads_file.read_text(
|
|
747
|
+
encoding="utf-8"
|
|
748
|
+
).strip() or None
|
|
749
|
+
except OSError:
|
|
750
|
+
pass
|
|
751
|
+
from ..simple_mode.beads_hooks import close_issue
|
|
752
|
+
close_issue(self.project_root, beads_issue_id)
|
|
753
|
+
|
|
754
|
+
async def _initialize_run(
|
|
755
|
+
self,
|
|
756
|
+
workflow: Workflow | None,
|
|
757
|
+
target_file: str | None,
|
|
758
|
+
) -> Path | None:
|
|
759
|
+
"""Initialize workflow execution with validation and return target path."""
|
|
760
|
+
if workflow:
|
|
761
|
+
self.workflow = workflow
|
|
762
|
+
if not self.workflow:
|
|
763
|
+
raise ValueError(
|
|
764
|
+
"No workflow loaded. Call start() or pass workflow."
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
# Validate workflow has steps
|
|
768
|
+
if not self.workflow.steps:
|
|
769
|
+
raise ValueError("Workflow has no steps to execute")
|
|
770
|
+
|
|
771
|
+
# Ensure we have a state
|
|
772
|
+
if not self.state or not self.state.workflow_id.startswith(f"{self.workflow.id}-"):
|
|
773
|
+
await self.start(workflow=self.workflow)
|
|
774
|
+
|
|
775
|
+
# Validate first step can be executed (no dependencies)
|
|
776
|
+
first_step = self.workflow.steps[0]
|
|
777
|
+
if not first_step.requires: # No dependencies
|
|
778
|
+
# First step should always be ready
|
|
779
|
+
if self.logger:
|
|
780
|
+
self.logger.info(
|
|
781
|
+
f"First step {first_step.id} has no dependencies - ready to execute",
|
|
782
|
+
extra={
|
|
783
|
+
"step_id": first_step.id,
|
|
784
|
+
"agent": first_step.agent,
|
|
785
|
+
"action": first_step.action,
|
|
786
|
+
}
|
|
787
|
+
)
|
|
788
|
+
|
|
789
|
+
# Establish target file
|
|
790
|
+
target_path: Path | None = None
|
|
791
|
+
if target_file:
|
|
792
|
+
target_path = (
|
|
793
|
+
(self.project_root / target_file)
|
|
794
|
+
if not Path(target_file).is_absolute()
|
|
795
|
+
else Path(target_file)
|
|
796
|
+
)
|
|
797
|
+
else:
|
|
798
|
+
target_path = self._default_target_file()
|
|
799
|
+
|
|
800
|
+
if target_path and self.state:
|
|
801
|
+
self.state.variables["target_file"] = str(target_path)
|
|
802
|
+
|
|
803
|
+
return target_path
|
|
804
|
+
|
|
805
|
+
def _handle_max_steps_exceeded(self, max_steps: int) -> None:
|
|
806
|
+
"""Handle max steps exceeded."""
|
|
807
|
+
self.state.status = "failed"
|
|
808
|
+
self.state.error = f"Max steps exceeded ({max_steps}). Aborting."
|
|
809
|
+
self.save_state()
|
|
810
|
+
|
|
811
|
+
def get_workflow_health(self) -> dict[str, Any]:
|
|
812
|
+
"""
|
|
813
|
+
Get workflow health diagnostics.
|
|
814
|
+
|
|
815
|
+
Returns:
|
|
816
|
+
Dictionary with workflow health information including:
|
|
817
|
+
- status: Current workflow status
|
|
818
|
+
- elapsed_seconds: Time since workflow started
|
|
819
|
+
- completed_steps: Number of completed steps
|
|
820
|
+
- total_steps: Total number of steps
|
|
821
|
+
- progress_percent: Percentage of steps completed
|
|
822
|
+
- time_since_last_step: Seconds since last step completed
|
|
823
|
+
- is_stuck: Whether workflow appears to be stuck (no progress in 5 minutes)
|
|
824
|
+
- current_step: Current step ID
|
|
825
|
+
- error: Error message if any
|
|
826
|
+
"""
|
|
827
|
+
if not self.state:
|
|
828
|
+
return {"status": "not_started", "message": "Workflow not started"}
|
|
829
|
+
|
|
830
|
+
elapsed = (
|
|
831
|
+
(datetime.now() - self.state.started_at).total_seconds()
|
|
832
|
+
if self.state.started_at else 0
|
|
833
|
+
)
|
|
834
|
+
completed = len(self.state.completed_steps)
|
|
835
|
+
total = len(self.workflow.steps) if self.workflow else 0
|
|
836
|
+
|
|
837
|
+
# Check if stuck (no progress in last 5 minutes)
|
|
838
|
+
last_step_time = None
|
|
839
|
+
if self.state.step_executions:
|
|
840
|
+
completed_times = [
|
|
841
|
+
se.completed_at for se in self.state.step_executions
|
|
842
|
+
if se.completed_at
|
|
843
|
+
]
|
|
844
|
+
if completed_times:
|
|
845
|
+
last_step_time = max(completed_times)
|
|
846
|
+
|
|
847
|
+
if not last_step_time:
|
|
848
|
+
last_step_time = self.state.started_at
|
|
849
|
+
|
|
850
|
+
time_since_last_step = (
|
|
851
|
+
(datetime.now() - last_step_time).total_seconds()
|
|
852
|
+
if last_step_time else elapsed
|
|
853
|
+
)
|
|
854
|
+
is_stuck = time_since_last_step > 300 # 5 minutes
|
|
855
|
+
|
|
856
|
+
return {
|
|
857
|
+
"status": self.state.status,
|
|
858
|
+
"elapsed_seconds": elapsed,
|
|
859
|
+
"completed_steps": completed,
|
|
860
|
+
"total_steps": total,
|
|
861
|
+
"progress_percent": (completed / total * 100) if total > 0 else 0,
|
|
862
|
+
"time_since_last_step": time_since_last_step,
|
|
863
|
+
"is_stuck": is_stuck,
|
|
864
|
+
"current_step": self.state.current_step,
|
|
865
|
+
"error": self.state.error,
|
|
866
|
+
}
|
|
867
|
+
|
|
868
|
+
def _find_ready_steps(
|
|
869
|
+
self,
|
|
870
|
+
completed_step_ids: set[str],
|
|
871
|
+
running_step_ids: set[str],
|
|
872
|
+
) -> list[WorkflowStep]:
|
|
873
|
+
"""Find steps ready to execute (dependencies met)."""
|
|
874
|
+
available_artifacts = set(self.state.artifacts.keys())
|
|
875
|
+
return self.parallel_executor.find_ready_steps(
|
|
876
|
+
workflow_steps=self.workflow.steps,
|
|
877
|
+
completed_step_ids=completed_step_ids,
|
|
878
|
+
running_step_ids=running_step_ids,
|
|
879
|
+
available_artifacts=available_artifacts,
|
|
880
|
+
)
|
|
881
|
+
|
|
882
|
+
def _handle_no_ready_steps(self, completed_step_ids: set[str]) -> bool:
|
|
883
|
+
"""Handle case when no steps are ready with better diagnostics. Returns True if workflow should stop."""
|
|
884
|
+
if len(completed_step_ids) >= len(self.workflow.steps):
|
|
885
|
+
# Workflow is complete
|
|
886
|
+
self.state.status = "completed"
|
|
887
|
+
self.state.current_step = None
|
|
888
|
+
self.save_state()
|
|
889
|
+
return True
|
|
890
|
+
else:
|
|
891
|
+
# Workflow is blocked - provide diagnostics
|
|
892
|
+
available_artifacts = set(self.state.artifacts.keys())
|
|
893
|
+
pending_steps = [
|
|
894
|
+
s for s in self.workflow.steps
|
|
895
|
+
if s.id not in completed_step_ids
|
|
896
|
+
]
|
|
897
|
+
|
|
898
|
+
# Check what's blocking
|
|
899
|
+
blocking_info = []
|
|
900
|
+
for step in pending_steps:
|
|
901
|
+
missing = [req for req in (step.requires or []) if req not in available_artifacts]
|
|
902
|
+
if missing:
|
|
903
|
+
blocking_info.append(f"Step {step.id} ({step.agent}/{step.action}): missing {missing}")
|
|
904
|
+
|
|
905
|
+
error_msg = (
|
|
906
|
+
f"Workflow blocked: no ready steps and workflow not complete. "
|
|
907
|
+
f"Completed: {len(completed_step_ids)}/{len(self.workflow.steps)}. "
|
|
908
|
+
f"Blocking issues: {blocking_info if blocking_info else 'Unknown - check step dependencies'}"
|
|
909
|
+
)
|
|
910
|
+
|
|
911
|
+
self.state.status = "failed"
|
|
912
|
+
self.state.error = error_msg
|
|
913
|
+
self.save_state()
|
|
914
|
+
|
|
915
|
+
# Log detailed diagnostics
|
|
916
|
+
if self.logger:
|
|
917
|
+
self.logger.error(
|
|
918
|
+
"Workflow blocked - no ready steps",
|
|
919
|
+
extra={
|
|
920
|
+
"completed_steps": list(completed_step_ids),
|
|
921
|
+
"pending_steps": [s.id for s in pending_steps],
|
|
922
|
+
"available_artifacts": list(available_artifacts),
|
|
923
|
+
"blocking_info": blocking_info,
|
|
924
|
+
}
|
|
925
|
+
)
|
|
926
|
+
|
|
927
|
+
return True
|
|
928
|
+
|
|
929
|
+
async def _process_parallel_results(
|
|
930
|
+
self,
|
|
931
|
+
results: list[Any],
|
|
932
|
+
completed_step_ids: set[str],
|
|
933
|
+
running_step_ids: set[str],
|
|
934
|
+
) -> bool:
|
|
935
|
+
"""
|
|
936
|
+
Process results from parallel execution.
|
|
937
|
+
Returns True if workflow should stop (failed or aborted).
|
|
938
|
+
"""
|
|
939
|
+
for result in results:
|
|
940
|
+
step_logger = self.logger.with_context(
|
|
941
|
+
step_id=result.step.id,
|
|
942
|
+
agent=result.step.agent,
|
|
943
|
+
) if self.logger else None
|
|
944
|
+
|
|
945
|
+
if result.error:
|
|
946
|
+
should_break = await self._handle_step_error(
|
|
947
|
+
result, step_logger, completed_step_ids, running_step_ids
|
|
948
|
+
)
|
|
949
|
+
if should_break:
|
|
950
|
+
return True
|
|
951
|
+
continue
|
|
952
|
+
|
|
953
|
+
# Handle successful step completion
|
|
954
|
+
await self._handle_step_success(
|
|
955
|
+
result, step_logger, completed_step_ids, running_step_ids
|
|
956
|
+
)
|
|
957
|
+
|
|
958
|
+
return False
|
|
959
|
+
|
|
960
|
+
async def _handle_step_error(
|
|
961
|
+
self,
|
|
962
|
+
result: Any,
|
|
963
|
+
step_logger: Any,
|
|
964
|
+
completed_step_ids: set[str],
|
|
965
|
+
running_step_ids: set[str],
|
|
966
|
+
) -> bool:
|
|
967
|
+
"""Handle step error. Returns True if workflow should stop."""
|
|
968
|
+
# Publish step failed event (Phase 2)
|
|
969
|
+
await self.event_bus.publish(
|
|
970
|
+
WorkflowEvent(
|
|
971
|
+
event_type=EventType.STEP_FAILED,
|
|
972
|
+
workflow_id=self.state.workflow_id,
|
|
973
|
+
step_id=result.step.id,
|
|
974
|
+
data={
|
|
975
|
+
"agent": result.step.agent,
|
|
976
|
+
"action": result.step.action,
|
|
977
|
+
"error": str(result.error),
|
|
978
|
+
"attempts": getattr(result, "attempts", 1),
|
|
979
|
+
},
|
|
980
|
+
timestamp=datetime.now(),
|
|
981
|
+
correlation_id=f"{self.state.workflow_id}:{result.step.id}",
|
|
982
|
+
)
|
|
983
|
+
)
|
|
984
|
+
|
|
985
|
+
# Step failed - use error recovery and auto-progression (Epic 14)
|
|
986
|
+
error_context = ErrorContext(
|
|
987
|
+
workflow_id=self.state.workflow_id,
|
|
988
|
+
step_id=result.step.id,
|
|
989
|
+
agent=result.step.agent,
|
|
990
|
+
action=result.step.action,
|
|
991
|
+
step_number=None,
|
|
992
|
+
total_steps=len(self.workflow.steps),
|
|
993
|
+
workflow_status=self.state.status,
|
|
994
|
+
)
|
|
995
|
+
|
|
996
|
+
# Handle error with recovery manager (Epic 14)
|
|
997
|
+
recovery_result = None
|
|
998
|
+
user_friendly_error = None
|
|
999
|
+
if self.error_recovery:
|
|
1000
|
+
recovery_result = self.error_recovery.handle_error(
|
|
1001
|
+
error=result.error,
|
|
1002
|
+
context=error_context,
|
|
1003
|
+
attempt=getattr(result, "attempts", 1),
|
|
1004
|
+
)
|
|
1005
|
+
|
|
1006
|
+
# Store user-friendly message (can't modify frozen dataclass)
|
|
1007
|
+
if recovery_result.get("user_message"):
|
|
1008
|
+
user_friendly_error = recovery_result["user_message"]
|
|
1009
|
+
|
|
1010
|
+
if self.auto_progression.should_auto_progress():
|
|
1011
|
+
# Get review result if this was a reviewer step
|
|
1012
|
+
review_result = None
|
|
1013
|
+
if result.step.agent == "reviewer":
|
|
1014
|
+
review_result = self.state.variables.get("reviewer_result")
|
|
1015
|
+
|
|
1016
|
+
decision = self.auto_progression.handle_step_completion(
|
|
1017
|
+
step=result.step,
|
|
1018
|
+
state=self.state,
|
|
1019
|
+
step_execution=result.step_execution,
|
|
1020
|
+
review_result=review_result,
|
|
1021
|
+
)
|
|
1022
|
+
|
|
1023
|
+
if decision.action == ProgressionAction.RETRY:
|
|
1024
|
+
# Retry the step - remove from completed and add back to ready
|
|
1025
|
+
completed_step_ids.discard(result.step.id)
|
|
1026
|
+
running_step_ids.discard(result.step.id)
|
|
1027
|
+
# Apply backoff if specified
|
|
1028
|
+
if decision.metadata.get("backoff_seconds"):
|
|
1029
|
+
await asyncio.sleep(decision.metadata["backoff_seconds"])
|
|
1030
|
+
if step_logger:
|
|
1031
|
+
step_logger.info(
|
|
1032
|
+
f"Retrying step {result.step.id} (attempt {decision.retry_count})",
|
|
1033
|
+
)
|
|
1034
|
+
return False
|
|
1035
|
+
elif decision.action == ProgressionAction.SKIP:
|
|
1036
|
+
# Skip this step
|
|
1037
|
+
completed_step_ids.add(result.step.id)
|
|
1038
|
+
running_step_ids.discard(result.step.id)
|
|
1039
|
+
if result.step.id not in self.state.skipped_steps:
|
|
1040
|
+
self.state.skipped_steps.append(result.step.id)
|
|
1041
|
+
if step_logger:
|
|
1042
|
+
step_logger.warning(
|
|
1043
|
+
f"Skipping step {result.step.id}: {decision.reason}",
|
|
1044
|
+
)
|
|
1045
|
+
return False
|
|
1046
|
+
elif decision.action == ProgressionAction.ABORT:
|
|
1047
|
+
# Abort workflow
|
|
1048
|
+
self.state.status = "failed"
|
|
1049
|
+
self.state.error = decision.reason
|
|
1050
|
+
if step_logger:
|
|
1051
|
+
step_logger.error(
|
|
1052
|
+
f"Workflow aborted: {decision.reason}",
|
|
1053
|
+
)
|
|
1054
|
+
|
|
1055
|
+
# Publish workflow failed event (Phase 2)
|
|
1056
|
+
await self.event_bus.publish(
|
|
1057
|
+
WorkflowEvent(
|
|
1058
|
+
event_type=EventType.WORKFLOW_FAILED,
|
|
1059
|
+
workflow_id=self.state.workflow_id,
|
|
1060
|
+
step_id=result.step.id,
|
|
1061
|
+
data={
|
|
1062
|
+
"error": decision.reason,
|
|
1063
|
+
"step_id": result.step.id,
|
|
1064
|
+
},
|
|
1065
|
+
timestamp=datetime.now(),
|
|
1066
|
+
correlation_id=f"{self.state.workflow_id}:{result.step.id}",
|
|
1067
|
+
)
|
|
1068
|
+
)
|
|
1069
|
+
|
|
1070
|
+
self.save_state()
|
|
1071
|
+
if self.progress_manager:
|
|
1072
|
+
await self.progress_manager.send_workflow_failed(decision.reason)
|
|
1073
|
+
await self.progress_manager.stop()
|
|
1074
|
+
return True
|
|
1075
|
+
elif decision.action == ProgressionAction.CONTINUE:
|
|
1076
|
+
# Continue despite error (recoverable)
|
|
1077
|
+
completed_step_ids.add(result.step.id)
|
|
1078
|
+
running_step_ids.discard(result.step.id)
|
|
1079
|
+
if step_logger:
|
|
1080
|
+
step_logger.warning(
|
|
1081
|
+
f"Step {result.step.id} failed but continuing: {decision.reason}",
|
|
1082
|
+
)
|
|
1083
|
+
return False
|
|
1084
|
+
|
|
1085
|
+
# Fallback: WorkflowFailureConfig when auto-progression disabled (plan 3.1)
|
|
1086
|
+
error_message = user_friendly_error if user_friendly_error else str(result.error)
|
|
1087
|
+
try:
|
|
1088
|
+
from ..core.config import load_config
|
|
1089
|
+
|
|
1090
|
+
cfg = load_config()
|
|
1091
|
+
wf = getattr(cfg, "workflow", None)
|
|
1092
|
+
fail_cfg = getattr(wf, "failure", None) if wf else None
|
|
1093
|
+
except Exception: # pylint: disable=broad-except
|
|
1094
|
+
fail_cfg = None
|
|
1095
|
+
on_fail = getattr(fail_cfg, "on_step_fail", "fail") or "fail"
|
|
1096
|
+
retry_count = getattr(fail_cfg, "retry_count", 1) or 0
|
|
1097
|
+
escalate_pause = getattr(fail_cfg, "escalate_to_pause", True)
|
|
1098
|
+
|
|
1099
|
+
raw = self.state.variables.get("_step_retries")
|
|
1100
|
+
retries_var = raw if isinstance(raw, dict) else {}
|
|
1101
|
+
self.state.variables["_step_retries"] = retries_var
|
|
1102
|
+
retries_used = retries_var.get(result.step.id, 0)
|
|
1103
|
+
|
|
1104
|
+
if on_fail == "retry" and retries_used < retry_count:
|
|
1105
|
+
retries_var[result.step.id] = retries_used + 1
|
|
1106
|
+
completed_step_ids.discard(result.step.id)
|
|
1107
|
+
running_step_ids.discard(result.step.id)
|
|
1108
|
+
if step_logger:
|
|
1109
|
+
step_logger.info(f"Retrying step {result.step.id} (attempt {retries_used + 1}/{retry_count})")
|
|
1110
|
+
return False
|
|
1111
|
+
|
|
1112
|
+
if on_fail == "skip":
|
|
1113
|
+
completed_step_ids.add(result.step.id)
|
|
1114
|
+
running_step_ids.discard(result.step.id)
|
|
1115
|
+
if result.step.id not in self.state.skipped_steps:
|
|
1116
|
+
self.state.skipped_steps.append(result.step.id)
|
|
1117
|
+
if step_logger:
|
|
1118
|
+
step_logger.warning(f"Skipping step {result.step.id}: {error_message}")
|
|
1119
|
+
return False
|
|
1120
|
+
|
|
1121
|
+
# fail or escalate: stop workflow
|
|
1122
|
+
self.state.status = "paused" if (on_fail == "escalate" and escalate_pause) else "failed"
|
|
1123
|
+
self.state.error = f"Step {result.step.id} failed: {error_message}"
|
|
1124
|
+
suggest = None
|
|
1125
|
+
if on_fail == "escalate" and recovery_result and recovery_result.get("suggestions"):
|
|
1126
|
+
suggest = [getattr(s, "action", str(s)) for s in recovery_result["suggestions"][:3]]
|
|
1127
|
+
|
|
1128
|
+
# Publish workflow failed event (Phase 2)
|
|
1129
|
+
await self.event_bus.publish(
|
|
1130
|
+
WorkflowEvent(
|
|
1131
|
+
event_type=EventType.WORKFLOW_FAILED,
|
|
1132
|
+
workflow_id=self.state.workflow_id,
|
|
1133
|
+
step_id=result.step.id,
|
|
1134
|
+
data={
|
|
1135
|
+
"error": error_message,
|
|
1136
|
+
"step_id": result.step.id,
|
|
1137
|
+
"behavior": on_fail,
|
|
1138
|
+
"suggestions": suggest,
|
|
1139
|
+
},
|
|
1140
|
+
timestamp=datetime.now(),
|
|
1141
|
+
correlation_id=f"{self.state.workflow_id}:{result.step.id}",
|
|
1142
|
+
)
|
|
1143
|
+
)
|
|
1144
|
+
|
|
1145
|
+
self.save_state()
|
|
1146
|
+
|
|
1147
|
+
# Send failure update
|
|
1148
|
+
if self.progress_manager:
|
|
1149
|
+
await self.progress_manager.send_workflow_failed(error_message)
|
|
1150
|
+
await self.progress_manager.stop()
|
|
1151
|
+
return True
|
|
1152
|
+
|
|
1153
|
+
async def _handle_step_success(
|
|
1154
|
+
self,
|
|
1155
|
+
result: Any,
|
|
1156
|
+
step_logger: Any,
|
|
1157
|
+
completed_step_ids: set[str],
|
|
1158
|
+
running_step_ids: set[str],
|
|
1159
|
+
) -> None:
|
|
1160
|
+
"""Handle successful step completion."""
|
|
1161
|
+
# Mark step as completed
|
|
1162
|
+
completed_step_ids.add(result.step.id)
|
|
1163
|
+
running_step_ids.discard(result.step.id)
|
|
1164
|
+
|
|
1165
|
+
# Get review result if this was a reviewer step (for gate evaluation)
|
|
1166
|
+
review_result = None
|
|
1167
|
+
if result.step.agent == "reviewer":
|
|
1168
|
+
review_result = self.state.variables.get("reviewer_result")
|
|
1169
|
+
|
|
1170
|
+
# Issue fix: Print artifact paths after each step (Hidden workflow state)
|
|
1171
|
+
if self.print_paths and result.artifacts:
|
|
1172
|
+
self._print_step_artifacts(result.step, result.artifacts, result.step_execution)
|
|
1173
|
+
|
|
1174
|
+
# Publish step completed event (Phase 2)
|
|
1175
|
+
await self.event_bus.publish(
|
|
1176
|
+
WorkflowEvent(
|
|
1177
|
+
event_type=EventType.STEP_COMPLETED,
|
|
1178
|
+
workflow_id=self.state.workflow_id,
|
|
1179
|
+
step_id=result.step.id,
|
|
1180
|
+
data={
|
|
1181
|
+
"agent": result.step.agent,
|
|
1182
|
+
"action": result.step.action,
|
|
1183
|
+
"duration_seconds": result.step_execution.duration_seconds,
|
|
1184
|
+
"artifact_count": len(result.artifacts) if result.artifacts else 0,
|
|
1185
|
+
},
|
|
1186
|
+
timestamp=datetime.now(),
|
|
1187
|
+
correlation_id=f"{self.state.workflow_id}:{result.step.id}",
|
|
1188
|
+
)
|
|
1189
|
+
)
|
|
1190
|
+
|
|
1191
|
+
# Publish artifact created events (Phase 2)
|
|
1192
|
+
if result.artifacts:
|
|
1193
|
+
for artifact_name, artifact_data in result.artifacts.items():
|
|
1194
|
+
await self.event_bus.publish(
|
|
1195
|
+
WorkflowEvent(
|
|
1196
|
+
event_type=EventType.ARTIFACT_CREATED,
|
|
1197
|
+
workflow_id=self.state.workflow_id,
|
|
1198
|
+
step_id=result.step.id,
|
|
1199
|
+
data={
|
|
1200
|
+
"artifact_name": artifact_name,
|
|
1201
|
+
"artifact_path": artifact_data.get("path", ""),
|
|
1202
|
+
"created_by": result.step.id,
|
|
1203
|
+
},
|
|
1204
|
+
timestamp=datetime.now(),
|
|
1205
|
+
correlation_id=f"{self.state.workflow_id}:{result.step.id}",
|
|
1206
|
+
)
|
|
1207
|
+
)
|
|
1208
|
+
|
|
1209
|
+
# Use auto-progression to handle step completion and gate evaluation
|
|
1210
|
+
if self.auto_progression.should_auto_progress():
|
|
1211
|
+
decision = self.auto_progression.handle_step_completion(
|
|
1212
|
+
step=result.step,
|
|
1213
|
+
state=self.state,
|
|
1214
|
+
step_execution=result.step_execution,
|
|
1215
|
+
review_result=review_result,
|
|
1216
|
+
)
|
|
1217
|
+
|
|
1218
|
+
# Update current step based on gate decision if needed
|
|
1219
|
+
if decision.next_step_id:
|
|
1220
|
+
self.state.current_step = decision.next_step_id
|
|
1221
|
+
|
|
1222
|
+
if step_logger:
|
|
1223
|
+
step_logger.info(
|
|
1224
|
+
f"Step completed: {decision.reason}",
|
|
1225
|
+
action=result.step.action,
|
|
1226
|
+
duration_seconds=result.step_execution.duration_seconds,
|
|
1227
|
+
artifact_count=len(result.artifacts) if result.artifacts else 0,
|
|
1228
|
+
next_step=decision.next_step_id,
|
|
1229
|
+
)
|
|
1230
|
+
else:
|
|
1231
|
+
if step_logger:
|
|
1232
|
+
step_logger.info(
|
|
1233
|
+
"Step completed",
|
|
1234
|
+
action=result.step.action,
|
|
1235
|
+
duration_seconds=result.step_execution.duration_seconds,
|
|
1236
|
+
artifact_count=len(result.artifacts) if result.artifacts else 0,
|
|
1237
|
+
)
|
|
1238
|
+
|
|
1239
|
+
# Send step completed update (Epic 11: Include gate result for quality dashboard)
|
|
1240
|
+
is_gate_step = result.step.agent == "reviewer" and result.step.gate is not None
|
|
1241
|
+
if self.progress_manager:
|
|
1242
|
+
# Extract gate result if this was a reviewer step
|
|
1243
|
+
gate_result = None
|
|
1244
|
+
if result.step.agent == "reviewer" and review_result:
|
|
1245
|
+
# Get gate result from state variables (set by auto-progression)
|
|
1246
|
+
gate_last = self.state.variables.get("gate_last", {})
|
|
1247
|
+
if gate_last:
|
|
1248
|
+
gate_result = gate_last
|
|
1249
|
+
|
|
1250
|
+
# Publish gate evaluated event (Phase 2)
|
|
1251
|
+
await self.event_bus.publish(
|
|
1252
|
+
WorkflowEvent(
|
|
1253
|
+
event_type=EventType.GATE_EVALUATED,
|
|
1254
|
+
workflow_id=self.state.workflow_id,
|
|
1255
|
+
step_id=result.step.id,
|
|
1256
|
+
data={
|
|
1257
|
+
"gate_result": gate_result,
|
|
1258
|
+
"passed": gate_result.get("passed", False),
|
|
1259
|
+
},
|
|
1260
|
+
timestamp=datetime.now(),
|
|
1261
|
+
correlation_id=f"{self.state.workflow_id}:{result.step.id}",
|
|
1262
|
+
)
|
|
1263
|
+
)
|
|
1264
|
+
|
|
1265
|
+
await self.progress_manager.send_step_completed(
|
|
1266
|
+
step_id=result.step.id,
|
|
1267
|
+
agent=result.step.agent,
|
|
1268
|
+
action=result.step.action,
|
|
1269
|
+
duration=result.step_execution.duration_seconds,
|
|
1270
|
+
gate_result=gate_result,
|
|
1271
|
+
)
|
|
1272
|
+
|
|
1273
|
+
# Epic 12: Automatic checkpointing after step completion
|
|
1274
|
+
if self.checkpoint_manager.should_checkpoint(
|
|
1275
|
+
step=result.step,
|
|
1276
|
+
state=self.state,
|
|
1277
|
+
is_gate_step=is_gate_step,
|
|
1278
|
+
):
|
|
1279
|
+
# Enhance state with checkpoint metadata before saving
|
|
1280
|
+
checkpoint_metadata = self.checkpoint_manager.get_checkpoint_metadata(
|
|
1281
|
+
state=self.state,
|
|
1282
|
+
step=result.step,
|
|
1283
|
+
)
|
|
1284
|
+
# Store metadata in state variables for persistence
|
|
1285
|
+
if "_checkpoint_metadata" not in self.state.variables:
|
|
1286
|
+
self.state.variables["_checkpoint_metadata"] = {}
|
|
1287
|
+
self.state.variables["_checkpoint_metadata"].update(checkpoint_metadata)
|
|
1288
|
+
|
|
1289
|
+
# Save checkpoint
|
|
1290
|
+
self.save_state()
|
|
1291
|
+
self.checkpoint_manager.record_checkpoint(result.step.id)
|
|
1292
|
+
|
|
1293
|
+
if self.logger:
|
|
1294
|
+
self.logger.info(
|
|
1295
|
+
f"Checkpoint created after step {result.step.id}",
|
|
1296
|
+
checkpoint_metadata=checkpoint_metadata,
|
|
1297
|
+
)
|
|
1298
|
+
|
|
1299
|
+
# Update artifacts from result
|
|
1300
|
+
if result.artifacts and isinstance(result.artifacts, dict):
|
|
1301
|
+
for art_name, art_data in result.artifacts.items():
|
|
1302
|
+
if isinstance(art_data, dict):
|
|
1303
|
+
artifact = Artifact(
|
|
1304
|
+
name=art_data.get("name", art_name),
|
|
1305
|
+
path=art_data.get("path", ""),
|
|
1306
|
+
status="complete",
|
|
1307
|
+
created_by=result.step.id,
|
|
1308
|
+
created_at=datetime.now(),
|
|
1309
|
+
metadata=art_data.get("metadata", {}),
|
|
1310
|
+
)
|
|
1311
|
+
self.state.artifacts[artifact.name] = artifact
|
|
1312
|
+
|
|
1313
|
+
def _handle_execution_error(self, error: Exception) -> None:
|
|
1314
|
+
"""Handle execution error."""
|
|
1315
|
+
self.state.status = "failed"
|
|
1316
|
+
self.state.error = str(error)
|
|
1317
|
+
if self.logger:
|
|
1318
|
+
self.logger.error(
|
|
1319
|
+
"Workflow execution failed",
|
|
1320
|
+
error=str(error),
|
|
1321
|
+
exc_info=True,
|
|
1322
|
+
)
|
|
1323
|
+
self.save_state()
|
|
1324
|
+
|
|
1325
|
+
async def _finalize_run(self, completed_step_ids: set[str]) -> WorkflowState:
|
|
1326
|
+
"""Finalize workflow execution and return final state."""
|
|
1327
|
+
if not self.state:
|
|
1328
|
+
raise RuntimeError("Workflow state lost during execution")
|
|
1329
|
+
|
|
1330
|
+
# Mark as completed if no error
|
|
1331
|
+
if self.state.status == "running":
|
|
1332
|
+
self.state.status = "completed"
|
|
1333
|
+
if self.logger:
|
|
1334
|
+
self.logger.info(
|
|
1335
|
+
"Workflow completed",
|
|
1336
|
+
completed_steps=len(completed_step_ids),
|
|
1337
|
+
total_steps=len(self.workflow.steps) if self.workflow else 0,
|
|
1338
|
+
)
|
|
1339
|
+
|
|
1340
|
+
# Publish workflow completed event (Phase 2)
|
|
1341
|
+
await self.event_bus.publish(
|
|
1342
|
+
WorkflowEvent(
|
|
1343
|
+
event_type=EventType.WORKFLOW_COMPLETED,
|
|
1344
|
+
workflow_id=self.state.workflow_id,
|
|
1345
|
+
step_id=None,
|
|
1346
|
+
data={
|
|
1347
|
+
"completed_steps": len(completed_step_ids),
|
|
1348
|
+
"total_steps": len(self.workflow.steps) if self.workflow else 0,
|
|
1349
|
+
},
|
|
1350
|
+
timestamp=datetime.now(),
|
|
1351
|
+
correlation_id=self.state.workflow_id,
|
|
1352
|
+
)
|
|
1353
|
+
)
|
|
1354
|
+
|
|
1355
|
+
self.save_state()
|
|
1356
|
+
|
|
1357
|
+
# Send completion summary
|
|
1358
|
+
if self.progress_manager:
|
|
1359
|
+
await self.progress_manager.send_workflow_completed()
|
|
1360
|
+
await self.progress_manager.stop()
|
|
1361
|
+
|
|
1362
|
+
# Best-effort cleanup of worktrees created during this run
|
|
1363
|
+
try:
|
|
1364
|
+
await self.worktree_manager.cleanup_all()
|
|
1365
|
+
except Exception:
|
|
1366
|
+
pass
|
|
1367
|
+
|
|
1368
|
+
# Dual-write workflow completion to analytics (best-effort)
|
|
1369
|
+
if self.state.status in ("completed", "failed") and self.workflow:
|
|
1370
|
+
try:
|
|
1371
|
+
from .analytics_dual_write import record_workflow_execution_to_analytics
|
|
1372
|
+
|
|
1373
|
+
duration_sec = 0.0
|
|
1374
|
+
if self.state.started_at:
|
|
1375
|
+
end = datetime.now()
|
|
1376
|
+
duration_sec = (end - self.state.started_at).total_seconds()
|
|
1377
|
+
record_workflow_execution_to_analytics(
|
|
1378
|
+
project_root=self.project_root,
|
|
1379
|
+
workflow_id=self.state.workflow_id,
|
|
1380
|
+
workflow_name=self.workflow.name or self.state.workflow_id,
|
|
1381
|
+
duration_seconds=duration_sec,
|
|
1382
|
+
steps=len(self.workflow.steps),
|
|
1383
|
+
success=(self.state.status == "completed"),
|
|
1384
|
+
)
|
|
1385
|
+
except Exception: # pylint: disable=broad-except
|
|
1386
|
+
pass
|
|
1387
|
+
|
|
1388
|
+
return self.state
|
|
1389
|
+
|
|
1390
|
+
async def _execute_step_for_parallel(
|
|
1391
|
+
self, step: WorkflowStep, target_path: Path | None
|
|
1392
|
+
) -> StepResult:
|
|
1393
|
+
"""
|
|
1394
|
+
Execute a single workflow step using Cursor Skills and return result (BUG-003B fix).
|
|
1395
|
+
|
|
1396
|
+
This method now returns StepResult with proper error handling:
|
|
1397
|
+
- success=True + artifacts on success
|
|
1398
|
+
- success=False + error details on failure (no exception raised)
|
|
1399
|
+
|
|
1400
|
+
State updates (step_execution tracking) are handled by ParallelStepExecutor.
|
|
1401
|
+
"""
|
|
1402
|
+
if not self.state or not self.workflow:
|
|
1403
|
+
raise ValueError("Workflow not started")
|
|
1404
|
+
|
|
1405
|
+
action = self._normalize_action(step.action)
|
|
1406
|
+
agent_name = (step.agent or "").strip().lower()
|
|
1407
|
+
|
|
1408
|
+
# Publish step started event (Phase 2)
|
|
1409
|
+
await self.event_bus.publish(
|
|
1410
|
+
WorkflowEvent(
|
|
1411
|
+
event_type=EventType.STEP_STARTED,
|
|
1412
|
+
workflow_id=self.state.workflow_id,
|
|
1413
|
+
step_id=step.id,
|
|
1414
|
+
data={
|
|
1415
|
+
"agent": agent_name,
|
|
1416
|
+
"action": action,
|
|
1417
|
+
"step_id": step.id,
|
|
1418
|
+
},
|
|
1419
|
+
timestamp=datetime.now(),
|
|
1420
|
+
correlation_id=f"{self.state.workflow_id}:{step.id}",
|
|
1421
|
+
)
|
|
1422
|
+
)
|
|
1423
|
+
|
|
1424
|
+
# Handle completion/finalization steps that don't require agent execution
|
|
1425
|
+
if agent_name == "orchestrator" and action in ["finalize", "complete"]:
|
|
1426
|
+
# Return successful result for completion steps (no artifacts)
|
|
1427
|
+
now = datetime.now()
|
|
1428
|
+
return StepResult(
|
|
1429
|
+
step_id=step.id,
|
|
1430
|
+
status="completed",
|
|
1431
|
+
success=True,
|
|
1432
|
+
duration=0.0,
|
|
1433
|
+
started_at=now,
|
|
1434
|
+
completed_at=now,
|
|
1435
|
+
artifacts=[],
|
|
1436
|
+
)
|
|
1437
|
+
|
|
1438
|
+
# Track step start time for duration calculation
|
|
1439
|
+
step_started_at = datetime.now()
|
|
1440
|
+
|
|
1441
|
+
# Use context manager for worktree lifecycle (guaranteed cleanup)
|
|
1442
|
+
async with self._worktree_context(step) as worktree_path:
|
|
1443
|
+
worktree_name = self._worktree_name_for_step(step.id)
|
|
1444
|
+
|
|
1445
|
+
# Try AgentHandlerRegistry first for context-aware execution (BUG-003 fix)
|
|
1446
|
+
# Falls back to SkillInvoker if no handler found
|
|
1447
|
+
from .agent_handlers import AgentHandlerRegistry
|
|
1448
|
+
|
|
1449
|
+
# Helper function to run agents (needed by handlers)
|
|
1450
|
+
async def run_agent(agent: str, command: str, **kwargs: Any) -> dict[str, Any]:
|
|
1451
|
+
"""Run agent by importing and invoking its class."""
|
|
1452
|
+
module = __import__(f"tapps_agents.agents.{agent}.agent", fromlist=["*"])
|
|
1453
|
+
class_name = f"{agent.title()}Agent"
|
|
1454
|
+
agent_cls = getattr(module, class_name)
|
|
1455
|
+
instance = agent_cls()
|
|
1456
|
+
await instance.activate(self.project_root)
|
|
1457
|
+
try:
|
|
1458
|
+
return await instance.run(command, **kwargs)
|
|
1459
|
+
finally:
|
|
1460
|
+
if hasattr(instance, 'close'):
|
|
1461
|
+
await instance.close()
|
|
1462
|
+
|
|
1463
|
+
# Create handler registry and try to find handler
|
|
1464
|
+
registry = AgentHandlerRegistry.create_registry(
|
|
1465
|
+
project_root=self.project_root,
|
|
1466
|
+
state=self.state,
|
|
1467
|
+
workflow=self.workflow,
|
|
1468
|
+
run_agent_fn=run_agent,
|
|
1469
|
+
executor=self,
|
|
1470
|
+
)
|
|
1471
|
+
|
|
1472
|
+
handler = registry.find_handler(agent_name, action)
|
|
1473
|
+
|
|
1474
|
+
try:
|
|
1475
|
+
from ..core.unicode_safe import safe_print
|
|
1476
|
+
|
|
1477
|
+
if handler:
|
|
1478
|
+
# Use handler for context-aware execution (e.g., ImplementerHandler)
|
|
1479
|
+
safe_print(f"\n[EXEC] Executing {agent_name}/{action} via handler...", flush=True)
|
|
1480
|
+
|
|
1481
|
+
# Execute handler and get artifacts directly
|
|
1482
|
+
# Note: Handler execution happens in main working directory, not worktree
|
|
1483
|
+
# Worktree is only used for skill invocation fallback
|
|
1484
|
+
created_artifacts_list = await handler.execute(step, action, target_path)
|
|
1485
|
+
|
|
1486
|
+
# Write success marker
|
|
1487
|
+
step_completed_at = datetime.now()
|
|
1488
|
+
duration = (step_completed_at - step_started_at).total_seconds()
|
|
1489
|
+
|
|
1490
|
+
found_artifact_paths = [art["path"] for art in (created_artifacts_list or [])]
|
|
1491
|
+
artifact_names = [art["name"] for art in (created_artifacts_list or [])]
|
|
1492
|
+
|
|
1493
|
+
marker_path = self.marker_writer.write_done_marker(
|
|
1494
|
+
workflow_id=self.state.workflow_id,
|
|
1495
|
+
step_id=step.id,
|
|
1496
|
+
agent=agent_name,
|
|
1497
|
+
action=action,
|
|
1498
|
+
worktree_name=worktree_name,
|
|
1499
|
+
worktree_path=str(worktree_path),
|
|
1500
|
+
expected_artifacts=step.creates or [],
|
|
1501
|
+
found_artifacts=found_artifact_paths,
|
|
1502
|
+
duration_seconds=duration,
|
|
1503
|
+
started_at=step_started_at,
|
|
1504
|
+
completed_at=step_completed_at,
|
|
1505
|
+
)
|
|
1506
|
+
|
|
1507
|
+
if self.logger:
|
|
1508
|
+
self.logger.debug(
|
|
1509
|
+
f"Handler execution complete for step {step.id}",
|
|
1510
|
+
marker_path=str(marker_path),
|
|
1511
|
+
)
|
|
1512
|
+
|
|
1513
|
+
# Return successful StepResult (BUG-003B fix)
|
|
1514
|
+
return StepResult(
|
|
1515
|
+
step_id=step.id,
|
|
1516
|
+
status="completed",
|
|
1517
|
+
success=True,
|
|
1518
|
+
duration=duration,
|
|
1519
|
+
started_at=step_started_at,
|
|
1520
|
+
completed_at=step_completed_at,
|
|
1521
|
+
artifacts=artifact_names,
|
|
1522
|
+
)
|
|
1523
|
+
else:
|
|
1524
|
+
# Fall back to SkillInvoker for steps without handlers
|
|
1525
|
+
safe_print(f"\n[EXEC] Executing {agent_name}/{action} via skill...", flush=True)
|
|
1526
|
+
await self.skill_invoker.invoke_skill(
|
|
1527
|
+
agent_name=agent_name,
|
|
1528
|
+
action=action,
|
|
1529
|
+
step=step,
|
|
1530
|
+
target_path=target_path,
|
|
1531
|
+
worktree_path=worktree_path,
|
|
1532
|
+
state=self.state,
|
|
1533
|
+
)
|
|
1534
|
+
# Skill invoker handles execution (direct execution or Cursor Skills)
|
|
1535
|
+
# Artifacts are extracted after completion
|
|
1536
|
+
|
|
1537
|
+
# Extract artifacts from worktree (skill_invoker path only)
|
|
1538
|
+
artifacts = await self.worktree_manager.extract_artifacts(
|
|
1539
|
+
worktree_path=worktree_path,
|
|
1540
|
+
step=step,
|
|
1541
|
+
)
|
|
1542
|
+
|
|
1543
|
+
# Extract artifact paths and names
|
|
1544
|
+
found_artifact_paths = []
|
|
1545
|
+
artifact_names = []
|
|
1546
|
+
for artifact in artifacts:
|
|
1547
|
+
found_artifact_paths.append(artifact.path)
|
|
1548
|
+
artifact_names.append(artifact.name)
|
|
1549
|
+
|
|
1550
|
+
# Write DONE marker for successful completion
|
|
1551
|
+
step_completed_at = datetime.now()
|
|
1552
|
+
duration = (step_completed_at - step_started_at).total_seconds()
|
|
1553
|
+
|
|
1554
|
+
marker_path = self.marker_writer.write_done_marker(
|
|
1555
|
+
workflow_id=self.state.workflow_id,
|
|
1556
|
+
step_id=step.id,
|
|
1557
|
+
agent=agent_name,
|
|
1558
|
+
action=action,
|
|
1559
|
+
worktree_name=worktree_name,
|
|
1560
|
+
worktree_path=str(worktree_path),
|
|
1561
|
+
expected_artifacts=step.creates or [],
|
|
1562
|
+
found_artifacts=found_artifact_paths,
|
|
1563
|
+
duration_seconds=duration,
|
|
1564
|
+
started_at=step_started_at,
|
|
1565
|
+
completed_at=step_completed_at,
|
|
1566
|
+
)
|
|
1567
|
+
|
|
1568
|
+
if self.logger:
|
|
1569
|
+
self.logger.debug(
|
|
1570
|
+
f"DONE marker written for step {step.id}",
|
|
1571
|
+
marker_path=str(marker_path),
|
|
1572
|
+
)
|
|
1573
|
+
|
|
1574
|
+
# Return successful StepResult (BUG-003B fix)
|
|
1575
|
+
# Worktree cleanup is handled by context manager
|
|
1576
|
+
return StepResult(
|
|
1577
|
+
step_id=step.id,
|
|
1578
|
+
status="completed",
|
|
1579
|
+
success=True,
|
|
1580
|
+
duration=duration,
|
|
1581
|
+
started_at=step_started_at,
|
|
1582
|
+
completed_at=step_completed_at,
|
|
1583
|
+
artifacts=artifact_names,
|
|
1584
|
+
)
|
|
1585
|
+
|
|
1586
|
+
except (TimeoutError, RuntimeError) as e:
|
|
1587
|
+
# Write FAILED marker for timeout or execution errors
|
|
1588
|
+
step_failed_at = datetime.now()
|
|
1589
|
+
duration = (step_failed_at - step_started_at).total_seconds()
|
|
1590
|
+
error_type = type(e).__name__
|
|
1591
|
+
error_msg = str(e)
|
|
1592
|
+
error_tb = traceback.format_exc()
|
|
1593
|
+
|
|
1594
|
+
# Try to get completion status if available (for missing artifacts)
|
|
1595
|
+
found_artifact_paths = []
|
|
1596
|
+
try:
|
|
1597
|
+
from .cursor_skill_helper import check_skill_completion
|
|
1598
|
+
completion_status = check_skill_completion(
|
|
1599
|
+
worktree_path=worktree_path,
|
|
1600
|
+
expected_artifacts=step.creates or [],
|
|
1601
|
+
)
|
|
1602
|
+
found_artifact_paths = completion_status.get("found_artifacts", [])
|
|
1603
|
+
except Exception:
|
|
1604
|
+
pass
|
|
1605
|
+
|
|
1606
|
+
marker_path = self.marker_writer.write_failed_marker(
|
|
1607
|
+
workflow_id=self.state.workflow_id,
|
|
1608
|
+
step_id=step.id,
|
|
1609
|
+
agent=agent_name,
|
|
1610
|
+
action=action,
|
|
1611
|
+
error=error_msg,
|
|
1612
|
+
worktree_name=worktree_name,
|
|
1613
|
+
worktree_path=str(worktree_path),
|
|
1614
|
+
expected_artifacts=step.creates or [],
|
|
1615
|
+
found_artifacts=found_artifact_paths,
|
|
1616
|
+
duration_seconds=duration,
|
|
1617
|
+
started_at=step_started_at,
|
|
1618
|
+
failed_at=step_failed_at,
|
|
1619
|
+
error_type=error_type,
|
|
1620
|
+
metadata={
|
|
1621
|
+
"marker_location": f".tapps-agents/workflows/markers/{self.state.workflow_id}/step-{step.id}/FAILED.json",
|
|
1622
|
+
},
|
|
1623
|
+
)
|
|
1624
|
+
|
|
1625
|
+
if self.logger:
|
|
1626
|
+
self.logger.warning(
|
|
1627
|
+
f"FAILED marker written for step {step.id}",
|
|
1628
|
+
marker_path=str(marker_path),
|
|
1629
|
+
error=error_msg,
|
|
1630
|
+
)
|
|
1631
|
+
|
|
1632
|
+
# Include marker location in error message for better troubleshooting
|
|
1633
|
+
from ..core.unicode_safe import safe_print
|
|
1634
|
+
safe_print(
|
|
1635
|
+
f"\n[INFO] Failure marker written to: {marker_path}",
|
|
1636
|
+
flush=True,
|
|
1637
|
+
)
|
|
1638
|
+
|
|
1639
|
+
# Return failed StepResult (BUG-003B fix - don't raise)
|
|
1640
|
+
return StepResult(
|
|
1641
|
+
step_id=step.id,
|
|
1642
|
+
status="failed",
|
|
1643
|
+
success=False,
|
|
1644
|
+
duration=duration,
|
|
1645
|
+
started_at=step_started_at,
|
|
1646
|
+
completed_at=step_failed_at,
|
|
1647
|
+
error=error_msg,
|
|
1648
|
+
error_traceback=error_tb,
|
|
1649
|
+
artifacts=[],
|
|
1650
|
+
)
|
|
1651
|
+
except Exception as e:
|
|
1652
|
+
# Write FAILED marker for unexpected errors
|
|
1653
|
+
step_failed_at = datetime.now()
|
|
1654
|
+
duration = (step_failed_at - step_started_at).total_seconds()
|
|
1655
|
+
error_type = type(e).__name__
|
|
1656
|
+
error_msg = str(e)
|
|
1657
|
+
error_tb = traceback.format_exc()
|
|
1658
|
+
|
|
1659
|
+
marker_path = self.marker_writer.write_failed_marker(
|
|
1660
|
+
workflow_id=self.state.workflow_id,
|
|
1661
|
+
step_id=step.id,
|
|
1662
|
+
agent=agent_name,
|
|
1663
|
+
action=action,
|
|
1664
|
+
error=error_msg,
|
|
1665
|
+
worktree_name=worktree_name,
|
|
1666
|
+
worktree_path=str(worktree_path) if 'worktree_path' in locals() else None,
|
|
1667
|
+
expected_artifacts=step.creates or [],
|
|
1668
|
+
found_artifacts=[],
|
|
1669
|
+
duration_seconds=duration,
|
|
1670
|
+
started_at=step_started_at,
|
|
1671
|
+
failed_at=step_failed_at,
|
|
1672
|
+
error_type=error_type,
|
|
1673
|
+
metadata={
|
|
1674
|
+
"marker_location": f".tapps-agents/workflows/markers/{self.state.workflow_id}/step-{step.id}/FAILED.json",
|
|
1675
|
+
},
|
|
1676
|
+
)
|
|
1677
|
+
|
|
1678
|
+
if self.logger:
|
|
1679
|
+
self.logger.error(
|
|
1680
|
+
f"FAILED marker written for step {step.id} (unexpected error)",
|
|
1681
|
+
marker_path=str(marker_path),
|
|
1682
|
+
error=error_msg,
|
|
1683
|
+
exc_info=True,
|
|
1684
|
+
)
|
|
1685
|
+
|
|
1686
|
+
# Return failed StepResult (BUG-003B fix - don't raise)
|
|
1687
|
+
return StepResult(
|
|
1688
|
+
step_id=step.id,
|
|
1689
|
+
status="failed",
|
|
1690
|
+
success=False,
|
|
1691
|
+
duration=duration,
|
|
1692
|
+
started_at=step_started_at,
|
|
1693
|
+
completed_at=step_failed_at,
|
|
1694
|
+
error=error_msg,
|
|
1695
|
+
error_traceback=error_tb,
|
|
1696
|
+
artifacts=[],
|
|
1697
|
+
)
|
|
1698
|
+
|
|
1699
|
+
@asynccontextmanager
|
|
1700
|
+
async def _worktree_context(
|
|
1701
|
+
self, step: WorkflowStep
|
|
1702
|
+
) -> AsyncIterator[Path]:
|
|
1703
|
+
"""
|
|
1704
|
+
Context manager for worktree lifecycle management.
|
|
1705
|
+
|
|
1706
|
+
Ensures worktree is properly cleaned up even on cancellation or exceptions.
|
|
1707
|
+
This is a 2025 best practice for resource management in async code.
|
|
1708
|
+
|
|
1709
|
+
Args:
|
|
1710
|
+
step: Workflow step that needs a worktree
|
|
1711
|
+
|
|
1712
|
+
Yields:
|
|
1713
|
+
Path to the worktree
|
|
1714
|
+
|
|
1715
|
+
Example:
|
|
1716
|
+
async with self._worktree_context(step) as worktree_path:
|
|
1717
|
+
# Use worktree_path here
|
|
1718
|
+
# Worktree automatically cleaned up on exit
|
|
1719
|
+
"""
|
|
1720
|
+
worktree_name = self._worktree_name_for_step(step.id)
|
|
1721
|
+
worktree_path: Path | None = None
|
|
1722
|
+
|
|
1723
|
+
try:
|
|
1724
|
+
# Create worktree
|
|
1725
|
+
worktree_path = await self.worktree_manager.create_worktree(
|
|
1726
|
+
worktree_name=worktree_name
|
|
1727
|
+
)
|
|
1728
|
+
|
|
1729
|
+
# Copy artifacts from previous steps to worktree
|
|
1730
|
+
artifacts_list = list(self.state.artifacts.values())
|
|
1731
|
+
await self.worktree_manager.copy_artifacts(
|
|
1732
|
+
worktree_path=worktree_path,
|
|
1733
|
+
artifacts=artifacts_list,
|
|
1734
|
+
)
|
|
1735
|
+
|
|
1736
|
+
# Yield worktree path
|
|
1737
|
+
yield worktree_path
|
|
1738
|
+
|
|
1739
|
+
finally:
|
|
1740
|
+
# Always cleanup, even on cancellation or exception
|
|
1741
|
+
if worktree_path:
|
|
1742
|
+
try:
|
|
1743
|
+
# Determine if we should delete the branch based on configuration
|
|
1744
|
+
from ..core.config import load_config
|
|
1745
|
+
config = load_config()
|
|
1746
|
+
should_delete = (
|
|
1747
|
+
config.workflow.branch_cleanup.delete_branches_on_cleanup
|
|
1748
|
+
if (
|
|
1749
|
+
config.workflow.branch_cleanup
|
|
1750
|
+
and config.workflow.branch_cleanup.enabled
|
|
1751
|
+
)
|
|
1752
|
+
else True # Default to True for backward compatibility (same as parameter default)
|
|
1753
|
+
)
|
|
1754
|
+
await self.worktree_manager.remove_worktree(
|
|
1755
|
+
worktree_name, delete_branch=should_delete
|
|
1756
|
+
)
|
|
1757
|
+
except Exception as e:
|
|
1758
|
+
# Log but don't raise - cleanup failures shouldn't break workflow
|
|
1759
|
+
if self.logger:
|
|
1760
|
+
self.logger.warning(
|
|
1761
|
+
f"Failed to cleanup worktree {worktree_name}: {e}",
|
|
1762
|
+
step_id=step.id,
|
|
1763
|
+
)
|
|
1764
|
+
|
|
1765
|
+
def _worktree_name_for_step(self, step_id: str) -> str:
|
|
1766
|
+
"""
|
|
1767
|
+
Deterministic, collision-resistant worktree name for a workflow step.
|
|
1768
|
+
|
|
1769
|
+
Keeps names short/safe for Windows while still traceable back to workflow+step.
|
|
1770
|
+
"""
|
|
1771
|
+
if not self.state:
|
|
1772
|
+
raise ValueError("Workflow not started")
|
|
1773
|
+
raw = f"workflow-{self.state.workflow_id}-step-{step_id}"
|
|
1774
|
+
digest = hashlib.sha256(raw.encode("utf-8")).hexdigest()[:8]
|
|
1775
|
+
base = f"{raw}-{digest}"
|
|
1776
|
+
return WorktreeManager._sanitize_component(base, max_len=80)
|
|
1777
|
+
|
|
1778
|
+
def get_current_step(self) -> WorkflowStep | None:
|
|
1779
|
+
"""Get the current workflow step."""
|
|
1780
|
+
if not self.workflow or not self.state:
|
|
1781
|
+
return None
|
|
1782
|
+
|
|
1783
|
+
for step in self.workflow.steps:
|
|
1784
|
+
if step.id == self.state.current_step:
|
|
1785
|
+
return step
|
|
1786
|
+
return None
|
|
1787
|
+
|
|
1788
|
+
def _default_target_file(self) -> Path | None:
|
|
1789
|
+
"""Get default target file path."""
|
|
1790
|
+
# Try common locations
|
|
1791
|
+
candidates = [
|
|
1792
|
+
self.project_root / "src" / "app.py",
|
|
1793
|
+
self.project_root / "app.py",
|
|
1794
|
+
self.project_root / "main.py",
|
|
1795
|
+
]
|
|
1796
|
+
for candidate in candidates:
|
|
1797
|
+
if candidate.exists():
|
|
1798
|
+
return candidate
|
|
1799
|
+
return None
|
|
1800
|
+
|
|
1801
|
+
async def _execute_step(
|
|
1802
|
+
self, step: WorkflowStep, target_path: Path | None
|
|
1803
|
+
) -> None:
|
|
1804
|
+
"""
|
|
1805
|
+
Execute a single workflow step using Cursor Skills.
|
|
1806
|
+
|
|
1807
|
+
Args:
|
|
1808
|
+
step: Workflow step to execute
|
|
1809
|
+
target_path: Optional target file path
|
|
1810
|
+
"""
|
|
1811
|
+
if not self.state or not self.workflow:
|
|
1812
|
+
raise ValueError("Workflow not started")
|
|
1813
|
+
|
|
1814
|
+
action = self._normalize_action(step.action)
|
|
1815
|
+
agent_name = (step.agent or "").strip().lower()
|
|
1816
|
+
|
|
1817
|
+
# Handle completion/finalization steps that don't require agent execution
|
|
1818
|
+
if agent_name == "orchestrator" and action in ["finalize", "complete"]:
|
|
1819
|
+
# Mark step as completed without executing an agent
|
|
1820
|
+
step_execution = StepExecution(
|
|
1821
|
+
step_id=step.id,
|
|
1822
|
+
agent=agent_name,
|
|
1823
|
+
action=action,
|
|
1824
|
+
started_at=datetime.now(),
|
|
1825
|
+
completed_at=datetime.now(),
|
|
1826
|
+
status="completed",
|
|
1827
|
+
)
|
|
1828
|
+
self.state.step_executions.append(step_execution)
|
|
1829
|
+
self._advance_step()
|
|
1830
|
+
self.save_state()
|
|
1831
|
+
return
|
|
1832
|
+
|
|
1833
|
+
# Create step execution tracking
|
|
1834
|
+
step_execution = StepExecution(
|
|
1835
|
+
step_id=step.id,
|
|
1836
|
+
agent=agent_name,
|
|
1837
|
+
action=action,
|
|
1838
|
+
started_at=datetime.now(),
|
|
1839
|
+
)
|
|
1840
|
+
self.state.step_executions.append(step_execution)
|
|
1841
|
+
|
|
1842
|
+
try:
|
|
1843
|
+
# Create worktree for this step
|
|
1844
|
+
worktree_name = self._worktree_name_for_step(step.id)
|
|
1845
|
+
worktree_path = await self.worktree_manager.create_worktree(
|
|
1846
|
+
worktree_name=worktree_name
|
|
1847
|
+
)
|
|
1848
|
+
|
|
1849
|
+
# Copy artifacts from previous steps to worktree
|
|
1850
|
+
artifacts_list = list(self.state.artifacts.values())
|
|
1851
|
+
await self.worktree_manager.copy_artifacts(
|
|
1852
|
+
worktree_path=worktree_path,
|
|
1853
|
+
artifacts=artifacts_list,
|
|
1854
|
+
)
|
|
1855
|
+
|
|
1856
|
+
# Invoke Skill via SkillInvoker (direct execution)
|
|
1857
|
+
result = await self.skill_invoker.invoke_skill(
|
|
1858
|
+
agent_name=agent_name,
|
|
1859
|
+
action=action,
|
|
1860
|
+
step=step,
|
|
1861
|
+
target_path=target_path,
|
|
1862
|
+
worktree_path=worktree_path,
|
|
1863
|
+
state=self.state,
|
|
1864
|
+
)
|
|
1865
|
+
|
|
1866
|
+
# Wait for Skill to complete (direct execution)
|
|
1867
|
+
# Poll for artifacts or completion marker
|
|
1868
|
+
import asyncio
|
|
1869
|
+
|
|
1870
|
+
from .cursor_skill_helper import check_skill_completion
|
|
1871
|
+
|
|
1872
|
+
max_wait_time = 3600 # 1 hour max wait
|
|
1873
|
+
poll_interval = 2 # Check every 2 seconds
|
|
1874
|
+
elapsed = 0
|
|
1875
|
+
|
|
1876
|
+
print(f"Waiting for {agent_name}/{action} to complete...")
|
|
1877
|
+
while elapsed < max_wait_time:
|
|
1878
|
+
completion_status = check_skill_completion(
|
|
1879
|
+
worktree_path=worktree_path,
|
|
1880
|
+
expected_artifacts=step.creates,
|
|
1881
|
+
)
|
|
1882
|
+
|
|
1883
|
+
if completion_status["completed"]:
|
|
1884
|
+
from ..core.unicode_safe import safe_print
|
|
1885
|
+
safe_print(f"[OK] {agent_name}/{action} completed - found artifacts: {completion_status['found_artifacts']}")
|
|
1886
|
+
break
|
|
1887
|
+
|
|
1888
|
+
await asyncio.sleep(poll_interval)
|
|
1889
|
+
elapsed += poll_interval
|
|
1890
|
+
|
|
1891
|
+
# Print progress every 10 seconds
|
|
1892
|
+
if elapsed % 10 == 0:
|
|
1893
|
+
print(f" Still waiting... ({elapsed}s elapsed)")
|
|
1894
|
+
else:
|
|
1895
|
+
raise TimeoutError(
|
|
1896
|
+
f"Skill {agent_name}/{action} did not complete within {max_wait_time}s. "
|
|
1897
|
+
f"Expected artifacts: {step.creates}, Missing: {completion_status.get('missing_artifacts', [])}"
|
|
1898
|
+
)
|
|
1899
|
+
|
|
1900
|
+
# Extract artifacts from worktree
|
|
1901
|
+
artifacts = await self.worktree_manager.extract_artifacts(
|
|
1902
|
+
worktree_path=worktree_path,
|
|
1903
|
+
step=step,
|
|
1904
|
+
)
|
|
1905
|
+
|
|
1906
|
+
# Update state with artifacts
|
|
1907
|
+
for artifact in artifacts:
|
|
1908
|
+
self.state.artifacts[artifact.name] = artifact
|
|
1909
|
+
|
|
1910
|
+
# Story-level step handling (Phase 3: Story-Level Granularity)
|
|
1911
|
+
# Verify acceptance criteria BEFORE marking step as completed
|
|
1912
|
+
if step.metadata and step.metadata.get("story_id"):
|
|
1913
|
+
self._handle_story_completion(step, artifacts, step_execution)
|
|
1914
|
+
|
|
1915
|
+
# Update step execution (after story verification)
|
|
1916
|
+
step_execution.completed_at = datetime.now()
|
|
1917
|
+
step_execution.status = "completed"
|
|
1918
|
+
step_execution.result = result
|
|
1919
|
+
|
|
1920
|
+
# Remove the worktree on success (keep on failure for debugging)
|
|
1921
|
+
try:
|
|
1922
|
+
# Determine if we should delete the branch based on configuration
|
|
1923
|
+
from ..core.config import load_config
|
|
1924
|
+
config = load_config()
|
|
1925
|
+
should_delete = (
|
|
1926
|
+
config.workflow.branch_cleanup.delete_branches_on_cleanup
|
|
1927
|
+
if (
|
|
1928
|
+
config.workflow.branch_cleanup
|
|
1929
|
+
and config.workflow.branch_cleanup.enabled
|
|
1930
|
+
)
|
|
1931
|
+
else True # Default to True for backward compatibility
|
|
1932
|
+
)
|
|
1933
|
+
await self.worktree_manager.remove_worktree(
|
|
1934
|
+
worktree_name, delete_branch=should_delete
|
|
1935
|
+
)
|
|
1936
|
+
except Exception:
|
|
1937
|
+
pass
|
|
1938
|
+
|
|
1939
|
+
# Advance to next step
|
|
1940
|
+
self._advance_step()
|
|
1941
|
+
|
|
1942
|
+
except Exception as e:
|
|
1943
|
+
step_execution.completed_at = datetime.now()
|
|
1944
|
+
step_execution.status = "failed"
|
|
1945
|
+
step_execution.error = str(e)
|
|
1946
|
+
raise
|
|
1947
|
+
|
|
1948
|
+
finally:
|
|
1949
|
+
self.save_state()
|
|
1950
|
+
|
|
1951
|
+
def _can_execute_step(
|
|
1952
|
+
self,
|
|
1953
|
+
step: WorkflowStep,
|
|
1954
|
+
completed_steps: dict[str, StepResult]
|
|
1955
|
+
) -> tuple[bool, str]:
|
|
1956
|
+
"""
|
|
1957
|
+
Check if step can execute based on dependencies (BUG-003B fix).
|
|
1958
|
+
|
|
1959
|
+
Validates that all required dependencies have been executed and succeeded.
|
|
1960
|
+
If any dependency is missing or failed, the step cannot execute.
|
|
1961
|
+
|
|
1962
|
+
Args:
|
|
1963
|
+
step: Step to check
|
|
1964
|
+
completed_steps: Results of previously executed steps
|
|
1965
|
+
|
|
1966
|
+
Returns:
|
|
1967
|
+
(can_execute, skip_reason) tuple:
|
|
1968
|
+
- (True, "") if all dependencies met
|
|
1969
|
+
- (False, reason) if dependencies not met
|
|
1970
|
+
|
|
1971
|
+
Example:
|
|
1972
|
+
can_run, reason = self._can_execute_step(step, completed_steps)
|
|
1973
|
+
if not can_run:
|
|
1974
|
+
# Skip step with reason
|
|
1975
|
+
skip_result = StepResult(status="skipped", skip_reason=reason, ...)
|
|
1976
|
+
"""
|
|
1977
|
+
for dep in step.requires or []:
|
|
1978
|
+
if dep not in completed_steps:
|
|
1979
|
+
return False, f"Dependency '{dep}' not executed"
|
|
1980
|
+
|
|
1981
|
+
dep_result = completed_steps[dep]
|
|
1982
|
+
if not dep_result.success:
|
|
1983
|
+
return False, f"Dependency '{dep}' failed: {dep_result.error}"
|
|
1984
|
+
|
|
1985
|
+
return True, ""
|
|
1986
|
+
|
|
1987
|
+
def _normalize_action(self, action: str) -> str:
|
|
1988
|
+
"""
|
|
1989
|
+
Normalize action name to use underscores (Python convention).
|
|
1990
|
+
|
|
1991
|
+
Converts hyphens to underscores so workflow YAMLs can use either format,
|
|
1992
|
+
but handlers always receive underscore format (e.g., "write_code").
|
|
1993
|
+
"""
|
|
1994
|
+
return action.replace("-", "_").lower()
|
|
1995
|
+
|
|
1996
|
+
def _get_step_params(self, step: WorkflowStep, target_path: Path | None) -> dict[str, Any]:
|
|
1997
|
+
"""
|
|
1998
|
+
Extract parameters for step execution.
|
|
1999
|
+
|
|
2000
|
+
Args:
|
|
2001
|
+
step: Workflow step
|
|
2002
|
+
target_path: Optional target file path
|
|
2003
|
+
|
|
2004
|
+
Returns:
|
|
2005
|
+
Dictionary of parameters for command building
|
|
2006
|
+
"""
|
|
2007
|
+
params: dict[str, Any] = {}
|
|
2008
|
+
|
|
2009
|
+
# Add target file if provided
|
|
2010
|
+
if target_path:
|
|
2011
|
+
try:
|
|
2012
|
+
# Try relative path first (most common case)
|
|
2013
|
+
resolved_target = Path(target_path).resolve()
|
|
2014
|
+
resolved_root = self.project_root.resolve()
|
|
2015
|
+
|
|
2016
|
+
# Use is_relative_to if available (Python 3.9+)
|
|
2017
|
+
try:
|
|
2018
|
+
if resolved_target.is_relative_to(resolved_root):
|
|
2019
|
+
params["target_file"] = str(resolved_target.relative_to(resolved_root))
|
|
2020
|
+
else:
|
|
2021
|
+
# Path is outside project root - use path normalizer
|
|
2022
|
+
from ...core.path_normalizer import normalize_for_cli
|
|
2023
|
+
params["target_file"] = normalize_for_cli(target_path, self.project_root)
|
|
2024
|
+
except AttributeError:
|
|
2025
|
+
# Python < 3.9 - use try/except
|
|
2026
|
+
try:
|
|
2027
|
+
params["target_file"] = str(resolved_target.relative_to(resolved_root))
|
|
2028
|
+
except ValueError:
|
|
2029
|
+
# Path is outside project root - use path normalizer
|
|
2030
|
+
from ...core.path_normalizer import normalize_for_cli
|
|
2031
|
+
params["target_file"] = normalize_for_cli(target_path, self.project_root)
|
|
2032
|
+
except Exception as e:
|
|
2033
|
+
# Fallback: use path normalizer for any error
|
|
2034
|
+
from ...core.path_normalizer import normalize_for_cli
|
|
2035
|
+
if self.logger:
|
|
2036
|
+
self.logger.warning(f"Path conversion error: {e}. Using path normalizer.")
|
|
2037
|
+
params["target_file"] = normalize_for_cli(target_path, self.project_root)
|
|
2038
|
+
|
|
2039
|
+
# Add step metadata
|
|
2040
|
+
if step.metadata:
|
|
2041
|
+
params.update(step.metadata)
|
|
2042
|
+
|
|
2043
|
+
# Add workflow variables
|
|
2044
|
+
if self.state and self.state.variables:
|
|
2045
|
+
# Include relevant variables (avoid exposing everything)
|
|
2046
|
+
if "user_prompt" in self.state.variables:
|
|
2047
|
+
params["user_prompt"] = self.state.variables["user_prompt"]
|
|
2048
|
+
if "target_file" in self.state.variables:
|
|
2049
|
+
params["target_file"] = self.state.variables["target_file"]
|
|
2050
|
+
|
|
2051
|
+
return params
|
|
2052
|
+
|
|
2053
|
+
def _handle_story_completion(
|
|
2054
|
+
self, step: WorkflowStep, artifacts: list[Artifact], step_execution: StepExecution
|
|
2055
|
+
) -> None:
|
|
2056
|
+
"""
|
|
2057
|
+
Handle story-level step completion (Phase 3: Story-Level Granularity).
|
|
2058
|
+
|
|
2059
|
+
Verifies acceptance criteria, logs to progress.txt, and tracks story completion.
|
|
2060
|
+
|
|
2061
|
+
Args:
|
|
2062
|
+
step: Completed workflow step with story metadata
|
|
2063
|
+
artifacts: Artifacts created by the step
|
|
2064
|
+
step_execution: Step execution record to update if criteria fail
|
|
2065
|
+
"""
|
|
2066
|
+
if not step.metadata:
|
|
2067
|
+
return
|
|
2068
|
+
|
|
2069
|
+
story_id = step.metadata.get("story_id")
|
|
2070
|
+
story_title = step.metadata.get("story_title")
|
|
2071
|
+
acceptance_criteria = step.metadata.get("acceptance_criteria", [])
|
|
2072
|
+
|
|
2073
|
+
if not story_id:
|
|
2074
|
+
return # Not a story-level step
|
|
2075
|
+
|
|
2076
|
+
# Verify acceptance criteria if provided
|
|
2077
|
+
passes = True
|
|
2078
|
+
verification_result = None
|
|
2079
|
+
|
|
2080
|
+
if acceptance_criteria:
|
|
2081
|
+
from .acceptance_verifier import AcceptanceCriteriaVerifier
|
|
2082
|
+
|
|
2083
|
+
# Convert artifacts list to dict
|
|
2084
|
+
artifacts_dict = {art.name: art for art in artifacts}
|
|
2085
|
+
|
|
2086
|
+
# Get code files from artifacts
|
|
2087
|
+
code_files = []
|
|
2088
|
+
for art in artifacts:
|
|
2089
|
+
if art.path:
|
|
2090
|
+
art_path = Path(art.path)
|
|
2091
|
+
if art_path.exists() and art_path.suffix in [".py", ".js", ".ts", ".tsx", ".jsx", ".java", ".go", ".rs"]:
|
|
2092
|
+
code_files.append(art_path)
|
|
2093
|
+
|
|
2094
|
+
# Verify criteria
|
|
2095
|
+
verifier = AcceptanceCriteriaVerifier()
|
|
2096
|
+
verification_result = verifier.verify(
|
|
2097
|
+
criteria=acceptance_criteria,
|
|
2098
|
+
artifacts=artifacts_dict,
|
|
2099
|
+
code_files=code_files if code_files else None,
|
|
2100
|
+
)
|
|
2101
|
+
passes = verification_result.get("all_passed", True)
|
|
2102
|
+
|
|
2103
|
+
# Store verification result in state variables
|
|
2104
|
+
if "story_verifications" not in self.state.variables:
|
|
2105
|
+
self.state.variables["story_verifications"] = {}
|
|
2106
|
+
self.state.variables["story_verifications"][story_id] = verification_result
|
|
2107
|
+
|
|
2108
|
+
# Track story completion in state.variables
|
|
2109
|
+
if "story_completions" not in self.state.variables:
|
|
2110
|
+
self.state.variables["story_completions"] = {}
|
|
2111
|
+
self.state.variables["story_completions"][story_id] = passes
|
|
2112
|
+
|
|
2113
|
+
# Log to progress.txt if progress logger is available
|
|
2114
|
+
try:
|
|
2115
|
+
from .progress_logger import ProgressLogger
|
|
2116
|
+
|
|
2117
|
+
progress_file = self.project_root / ".tapps-agents" / "progress.txt"
|
|
2118
|
+
progress_logger = ProgressLogger(progress_file)
|
|
2119
|
+
|
|
2120
|
+
# Extract files changed
|
|
2121
|
+
files_changed = [art.path for art in artifacts if art.path]
|
|
2122
|
+
|
|
2123
|
+
# Extract learnings from verification result
|
|
2124
|
+
learnings = []
|
|
2125
|
+
if verification_result and not passes:
|
|
2126
|
+
failed_criteria = [
|
|
2127
|
+
r["criterion"]
|
|
2128
|
+
for r in verification_result.get("results", [])
|
|
2129
|
+
if not r.get("passed", False)
|
|
2130
|
+
]
|
|
2131
|
+
if failed_criteria:
|
|
2132
|
+
learnings.append(f"Acceptance criteria not met: {', '.join(failed_criteria)}")
|
|
2133
|
+
|
|
2134
|
+
# Log story completion
|
|
2135
|
+
progress_logger.log_story_completion(
|
|
2136
|
+
story_id=story_id,
|
|
2137
|
+
story_title=story_title or step.id,
|
|
2138
|
+
passes=passes,
|
|
2139
|
+
files_changed=files_changed if files_changed else None,
|
|
2140
|
+
learnings=learnings if learnings else None,
|
|
2141
|
+
)
|
|
2142
|
+
except Exception:
|
|
2143
|
+
# Don't fail workflow if progress logging fails
|
|
2144
|
+
import logging
|
|
2145
|
+
logger = logging.getLogger(__name__)
|
|
2146
|
+
logger.warning("Failed to log story completion to progress.txt", exc_info=True)
|
|
2147
|
+
|
|
2148
|
+
# If acceptance criteria not met, mark step as failed and raise exception
|
|
2149
|
+
if not passes:
|
|
2150
|
+
step_execution.status = "failed"
|
|
2151
|
+
step_execution.error = f"Acceptance criteria not met for story {story_id}"
|
|
2152
|
+
# Raise exception to prevent advancing to next step
|
|
2153
|
+
raise ValueError(f"Story {story_id} failed acceptance criteria verification")
|
|
2154
|
+
|
|
2155
|
+
def _advance_step(self) -> None:
|
|
2156
|
+
"""Advance to the next workflow step."""
|
|
2157
|
+
if not self.workflow or not self.state:
|
|
2158
|
+
return
|
|
2159
|
+
|
|
2160
|
+
# Use auto-progression if enabled
|
|
2161
|
+
if self.auto_progression.should_auto_progress():
|
|
2162
|
+
current_step = self.get_current_step()
|
|
2163
|
+
if current_step:
|
|
2164
|
+
# Get progression decision
|
|
2165
|
+
step_execution = next(
|
|
2166
|
+
(se for se in self.state.step_executions if se.step_id == current_step.id),
|
|
2167
|
+
None
|
|
2168
|
+
)
|
|
2169
|
+
if step_execution:
|
|
2170
|
+
review_result = None
|
|
2171
|
+
if current_step.agent == "reviewer":
|
|
2172
|
+
review_result = self.state.variables.get("reviewer_result")
|
|
2173
|
+
|
|
2174
|
+
decision = self.auto_progression.handle_step_completion(
|
|
2175
|
+
step=current_step,
|
|
2176
|
+
state=self.state,
|
|
2177
|
+
step_execution=step_execution,
|
|
2178
|
+
review_result=review_result,
|
|
2179
|
+
)
|
|
2180
|
+
|
|
2181
|
+
next_step_id = self.auto_progression.get_next_step_id(
|
|
2182
|
+
step=current_step,
|
|
2183
|
+
decision=decision,
|
|
2184
|
+
workflow_steps=self.workflow.steps,
|
|
2185
|
+
)
|
|
2186
|
+
|
|
2187
|
+
if next_step_id:
|
|
2188
|
+
self.state.current_step = next_step_id
|
|
2189
|
+
else:
|
|
2190
|
+
# Workflow complete
|
|
2191
|
+
self.state.status = "completed"
|
|
2192
|
+
self.state.completed_at = datetime.now()
|
|
2193
|
+
self.state.current_step = None
|
|
2194
|
+
return
|
|
2195
|
+
|
|
2196
|
+
# Fallback to sequential progression
|
|
2197
|
+
current_index = None
|
|
2198
|
+
for i, step in enumerate(self.workflow.steps):
|
|
2199
|
+
if step.id == self.state.current_step:
|
|
2200
|
+
current_index = i
|
|
2201
|
+
break
|
|
2202
|
+
|
|
2203
|
+
if current_index is None:
|
|
2204
|
+
self.state.status = "failed"
|
|
2205
|
+
self.state.error = f"Current step {self.state.current_step} not found"
|
|
2206
|
+
return
|
|
2207
|
+
|
|
2208
|
+
# Move to next step
|
|
2209
|
+
if current_index + 1 < len(self.workflow.steps):
|
|
2210
|
+
self.state.current_step = self.workflow.steps[current_index + 1].id
|
|
2211
|
+
else:
|
|
2212
|
+
# All steps completed
|
|
2213
|
+
self.state.status = "completed"
|
|
2214
|
+
self.state.completed_at = datetime.now()
|
|
2215
|
+
self.state.current_step = None
|
|
2216
|
+
|
|
2217
|
+
def get_progression_status(self) -> dict[str, Any]:
|
|
2218
|
+
"""
|
|
2219
|
+
Get current progression status and visibility information.
|
|
2220
|
+
|
|
2221
|
+
Returns:
|
|
2222
|
+
Dictionary with progression status
|
|
2223
|
+
"""
|
|
2224
|
+
if not self.workflow or not self.state:
|
|
2225
|
+
return {"status": "not_started"}
|
|
2226
|
+
|
|
2227
|
+
return self.auto_progression.get_progression_status(
|
|
2228
|
+
state=self.state,
|
|
2229
|
+
workflow_steps=self.workflow.steps,
|
|
2230
|
+
)
|
|
2231
|
+
|
|
2232
|
+
def get_progression_history(self, step_id: str | None = None) -> list[dict[str, Any]]:
|
|
2233
|
+
"""
|
|
2234
|
+
Get progression history.
|
|
2235
|
+
|
|
2236
|
+
Args:
|
|
2237
|
+
step_id: Optional step ID to filter by
|
|
2238
|
+
|
|
2239
|
+
Returns:
|
|
2240
|
+
List of progression history entries
|
|
2241
|
+
"""
|
|
2242
|
+
history = self.auto_progression.get_progression_history(step_id=step_id)
|
|
2243
|
+
return [
|
|
2244
|
+
{
|
|
2245
|
+
"step_id": h.step_id,
|
|
2246
|
+
"timestamp": h.timestamp.isoformat(),
|
|
2247
|
+
"action": h.action.value,
|
|
2248
|
+
"reason": h.reason,
|
|
2249
|
+
"gate_result": h.gate_result,
|
|
2250
|
+
"metadata": h.metadata,
|
|
2251
|
+
}
|
|
2252
|
+
for h in history
|
|
2253
|
+
]
|
|
2254
|
+
|
|
2255
|
+
def pause_workflow(self) -> None:
|
|
2256
|
+
"""
|
|
2257
|
+
Pause workflow execution.
|
|
2258
|
+
|
|
2259
|
+
Epic 10: Progression Control
|
|
2260
|
+
"""
|
|
2261
|
+
if not self.state:
|
|
2262
|
+
raise ValueError("Workflow not started")
|
|
2263
|
+
|
|
2264
|
+
if self.state.status == "running":
|
|
2265
|
+
self.state.status = "paused"
|
|
2266
|
+
self.save_state()
|
|
2267
|
+
if self.logger:
|
|
2268
|
+
self.logger.info("Workflow paused by user")
|
|
2269
|
+
self.auto_progression.record_progression(
|
|
2270
|
+
step_id=self.state.current_step or "unknown",
|
|
2271
|
+
action=ProgressionAction.PAUSE,
|
|
2272
|
+
reason="Workflow paused by user",
|
|
2273
|
+
)
|
|
2274
|
+
|
|
2275
|
+
def resume_workflow(self) -> None:
|
|
2276
|
+
"""
|
|
2277
|
+
Resume paused workflow execution.
|
|
2278
|
+
|
|
2279
|
+
Epic 10: Progression Control
|
|
2280
|
+
"""
|
|
2281
|
+
if not self.state:
|
|
2282
|
+
raise ValueError("Workflow not started")
|
|
2283
|
+
|
|
2284
|
+
if self.state.status == "paused":
|
|
2285
|
+
self.state.status = "running"
|
|
2286
|
+
self.save_state()
|
|
2287
|
+
if self.logger:
|
|
2288
|
+
self.logger.info("Workflow resumed by user")
|
|
2289
|
+
self.auto_progression.record_progression(
|
|
2290
|
+
step_id=self.state.current_step or "unknown",
|
|
2291
|
+
action=ProgressionAction.CONTINUE,
|
|
2292
|
+
reason="Workflow resumed by user",
|
|
2293
|
+
)
|
|
2294
|
+
|
|
2295
|
+
def skip_step(self, step_id: str | None = None) -> None:
|
|
2296
|
+
"""
|
|
2297
|
+
Skip a workflow step.
|
|
2298
|
+
|
|
2299
|
+
Args:
|
|
2300
|
+
step_id: Step ID to skip (defaults to current step)
|
|
2301
|
+
|
|
2302
|
+
Epic 10: Progression Control
|
|
2303
|
+
"""
|
|
2304
|
+
if not self.state or not self.workflow:
|
|
2305
|
+
raise ValueError("Workflow not started")
|
|
2306
|
+
|
|
2307
|
+
step_id = step_id or self.state.current_step
|
|
2308
|
+
if not step_id:
|
|
2309
|
+
raise ValueError("No step to skip")
|
|
2310
|
+
|
|
2311
|
+
# Find the step
|
|
2312
|
+
step = next((s for s in self.workflow.steps if s.id == step_id), None)
|
|
2313
|
+
if not step:
|
|
2314
|
+
raise ValueError(f"Step {step_id} not found")
|
|
2315
|
+
|
|
2316
|
+
# Record skip in progression history
|
|
2317
|
+
self.auto_progression.record_progression(
|
|
2318
|
+
step_id=step_id,
|
|
2319
|
+
action=ProgressionAction.SKIP,
|
|
2320
|
+
reason="Step skipped by user",
|
|
2321
|
+
)
|
|
2322
|
+
|
|
2323
|
+
# Advance to next step
|
|
2324
|
+
if step.next:
|
|
2325
|
+
self.state.current_step = step.next
|
|
2326
|
+
self.save_state()
|
|
2327
|
+
if self.logger:
|
|
2328
|
+
self.logger.info(f"Step {step_id} skipped, advancing to {step.next}")
|
|
2329
|
+
else:
|
|
2330
|
+
# No next step - workflow complete
|
|
2331
|
+
self.state.status = "completed"
|
|
2332
|
+
self.state.completed_at = datetime.now()
|
|
2333
|
+
self.state.current_step = None
|
|
2334
|
+
self.save_state()
|
|
2335
|
+
if self.logger:
|
|
2336
|
+
self.logger.info(f"Step {step_id} skipped, workflow completed")
|
|
2337
|
+
|