@exaudeus/workrail 3.4.0 → 3.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/application/services/validation-engine.js +50 -0
- package/dist/config/feature-flags.js +8 -0
- package/dist/engine/engine-factory.js +4 -2
- package/dist/manifest.json +100 -52
- package/dist/mcp/handler-factory.js +21 -4
- package/dist/mcp/handlers/v2-execution/continue-rehydrate.d.ts +6 -1
- package/dist/mcp/handlers/v2-execution/continue-rehydrate.js +22 -4
- package/dist/mcp/handlers/v2-execution/index.d.ts +6 -1
- package/dist/mcp/handlers/v2-execution/index.js +13 -3
- package/dist/mcp/handlers/v2-execution/start.d.ts +9 -1
- package/dist/mcp/handlers/v2-execution/start.js +74 -36
- package/dist/mcp/handlers/v2-execution-helpers.d.ts +2 -0
- package/dist/mcp/handlers/v2-execution-helpers.js +2 -0
- package/dist/mcp/handlers/v2-reference-resolver.d.ts +14 -0
- package/dist/mcp/handlers/v2-reference-resolver.js +112 -0
- package/dist/mcp/handlers/v2-resolve-refs-envelope.d.ts +5 -0
- package/dist/mcp/handlers/v2-resolve-refs-envelope.js +17 -0
- package/dist/mcp/handlers/v2-workflow.js +2 -0
- package/dist/mcp/output-schemas.d.ts +38 -0
- package/dist/mcp/output-schemas.js +8 -0
- package/dist/mcp/render-envelope.d.ts +21 -0
- package/dist/mcp/render-envelope.js +59 -0
- package/dist/mcp/response-supplements.d.ts +17 -0
- package/dist/mcp/response-supplements.js +58 -0
- package/dist/mcp/step-content-envelope.d.ts +32 -0
- package/dist/mcp/step-content-envelope.js +13 -0
- package/dist/mcp/v2-response-formatter.d.ts +11 -1
- package/dist/mcp/v2-response-formatter.js +168 -1
- package/dist/mcp/workflow-protocol-contracts.js +9 -7
- package/dist/types/workflow-definition.d.ts +16 -0
- package/dist/types/workflow-definition.js +1 -0
- package/dist/utils/condition-evaluator.d.ts +1 -0
- package/dist/utils/condition-evaluator.js +7 -0
- package/dist/v2/durable-core/domain/context-template-resolver.d.ts +2 -0
- package/dist/v2/durable-core/domain/context-template-resolver.js +26 -0
- package/dist/v2/durable-core/domain/prompt-renderer.d.ts +2 -0
- package/dist/v2/durable-core/domain/prompt-renderer.js +93 -15
- package/dist/v2/durable-core/schemas/compiled-workflow/index.d.ts +256 -0
- package/dist/v2/durable-core/schemas/compiled-workflow/index.js +30 -0
- package/package.json +4 -1
- package/spec/authoring-spec.json +1373 -0
- package/spec/authoring-spec.provenance.json +77 -0
- package/spec/authoring-spec.schema.json +370 -0
- package/spec/workflow.schema.json +88 -2
- package/workflows/coding-task-workflow-agentic.lean.v2.json +132 -30
- package/workflows/cross-platform-code-conversion.v2.json +199 -0
- package/workflows/routines/parallel-work-partitioning.json +43 -0
- package/workflows/workflow-for-workflows.json +27 -1
- package/workflows/workflow-for-workflows.v2.json +186 -0
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
"id": "coding-task-workflow-agentic",
|
|
3
3
|
"name": "Agentic Task Dev Workflow (Lean • Notes-First • WorkRail Executor)",
|
|
4
4
|
"version": "1.0.0",
|
|
5
|
-
"description": "
|
|
5
|
+
"description": "The user guides the agent through understanding the task, selecting an approach, planning in slices, implementing incrementally, and verifying the result through explicit review and validation checkpoints.",
|
|
6
6
|
"recommendedPreferences": {
|
|
7
7
|
"recommendedAutonomy": "guided",
|
|
8
8
|
"recommendedRiskPolicy": "conservative"
|
|
@@ -16,22 +16,46 @@
|
|
|
16
16
|
"metaGuidance": [
|
|
17
17
|
"DEFAULT BEHAVIOR: self-execute with tools. Only ask the user for business decisions, missing external artifacts, or permissions you cannot resolve.",
|
|
18
18
|
"V2 DURABILITY: use output.notesMarkdown as the primary durable record. Do NOT mirror execution state into CONTEXT.md or any markdown checkpoint file.",
|
|
19
|
-
"ARTIFACT STRATEGY: `implementation_plan.md`
|
|
19
|
+
"ARTIFACT STRATEGY: `implementation_plan.md` drives execution. `spec.md`, when created, is canonical for observable behavior and serves as the verification anchor. Do not create extra artifacts unless they materially improve handoff.",
|
|
20
20
|
"OWNERSHIP & DELEGATION: the main agent owns strategy, decisions, synthesis, and implementation. Delegate only bounded cognitive routines via WorkRail Executor. Never hand off full task ownership or rely on named Builder/Researcher identities.",
|
|
21
21
|
"SUBAGENT SYNTHESIS: treat subagent output as evidence, not conclusions. State your hypothesis before delegating, then interrogate what came back: what was missed, wrong, or new? Say what changed your mind or what you still reject, and why.",
|
|
22
22
|
"PARALLELISM: when reads, audits, or delegations are independent, run them in parallel inside the phase. Parallelize cognition; serialize synthesis and canonical writes.",
|
|
23
23
|
"PHILOSOPHY LENS: apply the user's coding philosophy (from active session rules) as the evaluation lens. Flag violations by principle name, not as generic feedback. If principles conflict, surface the tension explicitly instead of silently choosing.",
|
|
24
|
-
"
|
|
25
|
-
"PHILOSOPHY CHECKS (cont): validate at boundaries, fail fast on invariant violations, prefer determinism and small pure functions, use data-driven control flow, DI at boundaries, YAGNI with discipline, and atomicity.",
|
|
26
|
-
"PHILOSOPHY CHECKS (cont): treat graceful degradation, observability, fakes over mocks, and focused interfaces as first-class review concerns.",
|
|
24
|
+
"VALIDATION: prefer static/compile-time safety over runtime checks. Use build, type-checking, and tests as the primary proof of correctness — in that order of reliability.",
|
|
27
25
|
"DRIFT HANDLING: when reality diverges from the plan, update the plan artifact and re-audit deliberately rather than accumulating undocumented drift.",
|
|
28
26
|
"NEVER COMMIT MARKDOWN FILES UNLESS USER EXPLICITLY ASKS."
|
|
29
27
|
],
|
|
28
|
+
"references": [
|
|
29
|
+
{
|
|
30
|
+
"id": "authoring-spec",
|
|
31
|
+
"title": "Authoring Specification",
|
|
32
|
+
"source": "./spec/authoring-spec.json",
|
|
33
|
+
"purpose": "Canonical rules and constraints for workflow authoring. Consult when making structural decisions about workflow design.",
|
|
34
|
+
"authoritative": true,
|
|
35
|
+
"resolveFrom": "package"
|
|
36
|
+
},
|
|
37
|
+
{
|
|
38
|
+
"id": "workflow-schema",
|
|
39
|
+
"title": "Workflow JSON Schema",
|
|
40
|
+
"source": "./spec/workflow.schema.json",
|
|
41
|
+
"purpose": "The JSON schema that all workflow definitions must conform to. Use as the structural contract reference.",
|
|
42
|
+
"authoritative": true,
|
|
43
|
+
"resolveFrom": "package"
|
|
44
|
+
},
|
|
45
|
+
{
|
|
46
|
+
"id": "authoring-provenance",
|
|
47
|
+
"title": "Workflow Authoring Provenance",
|
|
48
|
+
"source": "./spec/authoring-spec.provenance.json",
|
|
49
|
+
"purpose": "Source-of-truth map showing what is canonical, derived, and non-canonical in workflow authoring guidance.",
|
|
50
|
+
"authoritative": false,
|
|
51
|
+
"resolveFrom": "package"
|
|
52
|
+
}
|
|
53
|
+
],
|
|
30
54
|
"steps": [
|
|
31
55
|
{
|
|
32
56
|
"id": "phase-0-understand-and-classify",
|
|
33
57
|
"title": "Phase 0: Understand & Classify",
|
|
34
|
-
"prompt": "
|
|
58
|
+
"prompt": "Understand this before you touch anything.\n\nMake sure the expected behavior is clear enough to proceed. If it really isn't, ask me only what you can't answer yourself. Don't ask me things you can find with tools.\n\nThen dig through the code. Figure out:\n- where this starts and what the call chain looks like\n- which files, modules, and functions matter\n- what patterns this should follow\n- how this repo verifies similar work\n- what the real risks, invariants, and non-goals are\n\nFigure out what philosophy to use while doing the work. Prefer, in order: Memory MCP (`mcp_memory_conventions`, `mcp_memory_prefer`, `mcp_memory_recall`), active session/Firebender rules, repo patterns, then me only if those still conflict or aren't enough.\n\nRecord where that philosophy lives, not a summary. If the stated rules and repo patterns disagree, capture the conflict.\n\nOnce you actually understand the task, classify it:\n- `taskComplexity`: Small / Medium / Large\n- `riskLevel`: Low / Medium / High\n- `rigorMode`: QUICK / STANDARD / THOROUGH\n- `automationLevel`: High / Medium / Low\n- `prStrategy`: SinglePR / MultiPR\n\nUse this guidance:\n- QUICK: small, low-risk, clear path, little ambiguity\n- STANDARD: medium scope or moderate risk\n- THOROUGH: large scope, architectural uncertainty, or high-risk change\n\nThen force a context-clarity check. Score each from 0-2 and give one sentence of evidence for each score:\n- `entryPointClarity`: 0 = clear entry point and call chain, 1 = partial chain with gaps, 2 = still unclear where behavior starts or flows\n- `boundaryClarity`: 0 = clear boundary, 1 = likely boundary but some uncertainty, 2 = patch-vs-boundary decision still unclear\n- `invariantClarity`: 0 = important invariants are explicit, 1 = some are inferred or uncertain, 2 = important invariants are still unclear\n- `verificationClarity`: 0 = clear deterministic verification path, 1 = partial verification path, 2 = verification is still weak or unclear\n\nUse the rubric, not vibes:\n- QUICK: do not run the deeper context batch; if the rubric says you're missing too much context, your classification is probably wrong and you should reclassify upward before moving on\n- STANDARD: run the deeper context batch if the total score is 3 or more, or if `boundaryClarity`, `invariantClarity`, or `verificationClarity` is 2\n- THOROUGH: always run the deeper context batch\n\nThe deeper context batch is:\n- `routine-context-gathering` with `focus=COMPLETENESS`\n- `routine-context-gathering` with `focus=DEPTH`\n\nAfter the batch, synthesize what changed, what stayed the same, and what is still unknown. If the extra context changes the classification, update it before you leave this step.\n\nCapture:\n- `taskComplexity`\n- `riskLevel`\n- `rigorMode`\n- `automationLevel`\n- `prStrategy`\n- `contextSummary`\n- `candidateFiles`\n- `invariants`\n- `nonGoals`\n- `openQuestions` (only real human-decision questions)\n- `philosophySources`\n- `philosophyConflicts`",
|
|
35
59
|
"requireConfirmation": {
|
|
36
60
|
"or": [
|
|
37
61
|
{ "var": "taskComplexity", "equals": "Large" },
|
|
@@ -43,10 +67,12 @@
|
|
|
43
67
|
"id": "phase-1a-hypothesis",
|
|
44
68
|
"title": "Phase 1a: State Hypothesis",
|
|
45
69
|
"runCondition": {
|
|
46
|
-
"
|
|
47
|
-
|
|
70
|
+
"and": [
|
|
71
|
+
{ "var": "taskComplexity", "not_equals": "Small" },
|
|
72
|
+
{ "var": "rigorMode", "not_equals": "QUICK" }
|
|
73
|
+
]
|
|
48
74
|
},
|
|
49
|
-
"prompt": "Before
|
|
75
|
+
"prompt": "Before you do design work, tell me your current best guess.\n\nKeep it short:\n1. what you think the right approach is\n2. what worries you about it\n3. what would most likely make it wrong\n\nCapture:\n- `initialHypothesis`",
|
|
50
76
|
"requireConfirmation": false
|
|
51
77
|
},
|
|
52
78
|
{
|
|
@@ -84,7 +110,19 @@
|
|
|
84
110
|
"var": "taskComplexity",
|
|
85
111
|
"not_equals": "Small"
|
|
86
112
|
},
|
|
87
|
-
"prompt": "Read `design-candidates.md`, compare
|
|
113
|
+
"prompt": "Read `design-candidates.md`, compare it to your original guess, and make the call.\n\nBe explicit about three things:\n- what the design work confirmed\n- what changed your mind\n- what you missed the first time\n\nThen pressure-test the leading option:\n- what's the strongest case against it?\n- what assumption breaks it?\n\nAfter the challenge batch, say:\n- what changed your mind\n- what didn't\n- which findings you reject and why\n\nPick the approach yourself. Don't hide behind the artifact. If the simplest thing works, prefer it. If the front-runner stops looking right after challenge, switch.\n\nCapture:\n- `selectedApproach` — chosen design with rationale tied to tensions\n- `runnerUpApproach` — next-best option and why it lost\n- `architectureRationale` — tensions resolved vs accepted\n- `pivotTriggers` — conditions under which you'd switch to the runner-up\n- `keyRiskToMonitor` — failure mode of the selected approach\n- `acceptedTradeoffs`\n- `identifiedFailureModes`",
|
|
114
|
+
"promptFragments": [
|
|
115
|
+
{
|
|
116
|
+
"id": "phase-1c-challenge-standard",
|
|
117
|
+
"when": { "var": "rigorMode", "in": ["STANDARD", "THOROUGH"] },
|
|
118
|
+
"text": "Run `routine-hypothesis-challenge` on the leading option's failure modes before you decide."
|
|
119
|
+
},
|
|
120
|
+
{
|
|
121
|
+
"id": "phase-1c-challenge-thorough",
|
|
122
|
+
"when": { "var": "rigorMode", "equals": "THOROUGH" },
|
|
123
|
+
"text": "Also run `routine-execution-simulation` on the three most likely failure paths before you decide."
|
|
124
|
+
}
|
|
125
|
+
],
|
|
88
126
|
"requireConfirmation": {
|
|
89
127
|
"or": [
|
|
90
128
|
{ "var": "automationLevel", "equals": "Low" },
|
|
@@ -131,13 +169,20 @@
|
|
|
131
169
|
{
|
|
132
170
|
"id": "phase-2c-synthesize-design-review",
|
|
133
171
|
"title": "Synthesize Design Review Findings",
|
|
134
|
-
"prompt": "Read `design-review-findings.md` and
|
|
172
|
+
"prompt": "Read `design-review-findings.md` and turn the review into workflow-owned decisions.\n\nCompare it against `designReviewAssessment`:\n- what did the review confirm?\n- what did it surface that you missed?\n- what changed your mind and what held firm?\n\nIf the findings are real, fix the design before you continue (`selectedApproach`, `architectureRationale`, `pivotTriggers`, `acceptedTradeoffs`, `identifiedFailureModes`).\n\nAfter any extra challenge, synthesize explicitly:\n- which findings actually matter\n- what changed in the design\n- what you reject and why\n\nFor any finding that changes the decision, classify it as:\n- `Confirmed`: you checked it against primary evidence (code, artifacts, spec, tests/build, or direct workflow context)\n- `Plausible`: interesting, but not verified enough to drive the decision yet\n- `Rejected`: contradicted by fuller context or direct evidence\n\nSubagent agreement alone is not enough for `Confirmed`.\n\nCapture:\n- `designFindings`\n- `designRevised`",
|
|
173
|
+
"promptFragments": [
|
|
174
|
+
{
|
|
175
|
+
"id": "phase-2c-challenge-thorough",
|
|
176
|
+
"when": { "var": "rigorMode", "equals": "THOROUGH" },
|
|
177
|
+
"text": "If the review surfaced materially non-empty or surprising findings, run `routine-hypothesis-challenge` on the most serious finding and `routine-execution-simulation` on the most dangerous failure mode before you finalize the revised design."
|
|
178
|
+
}
|
|
179
|
+
],
|
|
135
180
|
"requireConfirmation": false
|
|
136
181
|
},
|
|
137
182
|
{
|
|
138
183
|
"id": "phase-2d-loop-decision",
|
|
139
184
|
"title": "Design Review Loop Decision",
|
|
140
|
-
"prompt": "
|
|
185
|
+
"prompt": "Decide whether the design needs another pass.\n\nIf `designFindings` is non-empty and the design was revised, keep going so the revision gets checked.\nIf `designFindings` is empty, stop.\nIf you've hit the limit, stop and record the remaining concerns.\n\nThen emit the required loop-control artifact in this shape (`decision` must be `continue` or `stop`):\n```json\n{\n \"artifacts\": [{\n \"kind\": \"wr.loop_control\",\n \"decision\": \"continue\"\n }]\n}\n```",
|
|
141
186
|
"requireConfirmation": false,
|
|
142
187
|
"outputContract": {
|
|
143
188
|
"contractRef": "wr.contracts.loop_control"
|
|
@@ -152,7 +197,24 @@
|
|
|
152
197
|
"var": "taskComplexity",
|
|
153
198
|
"not_equals": "Small"
|
|
154
199
|
},
|
|
155
|
-
"prompt": "
|
|
200
|
+
"prompt": "Turn the decision into a plan someone else could execute without guessing.\n\nUpdate `implementation_plan.md`.\n\nIt should cover:\n1. Problem statement\n2. Acceptance criteria (mirror `spec.md` if it exists; `spec.md` owns observable behavior)\n3. Non-goals\n4. Philosophy-driven constraints\n5. Invariants\n6. Selected approach + rationale + runner-up\n7. Vertical slices\n8. Work packages only if they actually help\n9. Test design\n10. Risk register\n11. PR packaging strategy\n12. Philosophy alignment per slice:\n - [principle] -> [satisfied / tension / violated + 1-line why]\n\nCapture:\n- `implementationPlan`\n- `slices`\n- `testDesign`\n- `estimatedPRCount`\n- `followUpTickets` (initialize if needed)\n- `unresolvedUnknownCount` — count of open issues that would materially affect implementation quality\n- `planConfidenceBand` — Low / Medium / High",
|
|
201
|
+
"requireConfirmation": false
|
|
202
|
+
},
|
|
203
|
+
{
|
|
204
|
+
"id": "phase-3b-spec",
|
|
205
|
+
"title": "Phase 3b: Spec (Observable Behavior)",
|
|
206
|
+
"runCondition": {
|
|
207
|
+
"and": [
|
|
208
|
+
{ "var": "taskComplexity", "not_equals": "Small" },
|
|
209
|
+
{
|
|
210
|
+
"or": [
|
|
211
|
+
{ "var": "taskComplexity", "equals": "Large" },
|
|
212
|
+
{ "var": "riskLevel", "equals": "High" }
|
|
213
|
+
]
|
|
214
|
+
}
|
|
215
|
+
]
|
|
216
|
+
},
|
|
217
|
+
"prompt": "Write `spec.md`.\n\nKeep it about what the feature does from the outside, not how you plan to build it.\n\nInclude:\n1. Feature summary\n2. Acceptance criteria\n3. Non-goals\n4. External API / interface contract if it matters\n5. Edge cases and failure modes\n6. How each acceptance criterion will be verified\n\nKeep it tight. If something can't be verified, it doesn't belong as an acceptance criterion.\n\n`spec.md` is canonical for observable behavior.",
|
|
156
218
|
"requireConfirmation": false
|
|
157
219
|
},
|
|
158
220
|
{
|
|
@@ -160,8 +222,10 @@
|
|
|
160
222
|
"type": "loop",
|
|
161
223
|
"title": "Phase 4: Plan Audit (Review, Fix, Decide)",
|
|
162
224
|
"runCondition": {
|
|
163
|
-
"
|
|
164
|
-
|
|
225
|
+
"and": [
|
|
226
|
+
{ "var": "taskComplexity", "not_equals": "Small" },
|
|
227
|
+
{ "var": "rigorMode", "not_equals": "QUICK" }
|
|
228
|
+
]
|
|
165
229
|
},
|
|
166
230
|
"loop": {
|
|
167
231
|
"type": "while",
|
|
@@ -176,13 +240,30 @@
|
|
|
176
240
|
{
|
|
177
241
|
"id": "phase-4a-audit-and-refocus",
|
|
178
242
|
"title": "Audit Plan and Apply Fixes",
|
|
179
|
-
"prompt": "Audit the plan and fix
|
|
243
|
+
"prompt": "Audit the plan and fix it in the same pass.\n\nLook for:\n- missing work\n- weak assumptions and risks\n- invariant gaps\n- bad slice boundaries\n- philosophy violations or tensions\n- regressions from things you already fixed\n- mismatches between `implementation_plan.md` and `spec.md` if there is a spec\n\nBefore you delegate, say what looks weakest right now and what you trust least.\n\nAfter the audit batch, synthesize explicitly:\n- what multiple auditors agreed on\n- what only one auditor raised\n- what you reject and why\n- what changed in the plan because of the audit\n\nFor any finding that changes the plan, classify it as:\n- `Confirmed`: you checked it against primary evidence (code, plan/spec artifacts, tests/build, or direct workflow context)\n- `Plausible`: interesting, but not verified enough to change the plan yet\n- `Rejected`: contradicted by fuller context or direct evidence\n\nSubagent agreement alone is not enough for `Confirmed`.\n\nThen fix the plan immediately:\n- update `implementation_plan.md`\n- update `spec.md` if acceptance criteria or other observable behavior changed\n- update `slices` if the shape changed\n- move out-of-scope work into `followUpTickets`\n- track resolved findings (cap at 10, drop oldest)\n\nCapture:\n- `planFindings`\n- `planConfidence`\n- `resolvedFindings`\n- `followUpTickets`\n\nIf the plan drifted, fix the plan. Don't just keep going.",
|
|
244
|
+
"promptFragments": [
|
|
245
|
+
{
|
|
246
|
+
"id": "phase-4a-delegation-quick",
|
|
247
|
+
"when": { "var": "rigorMode", "equals": "QUICK" },
|
|
248
|
+
"text": "Do this yourself."
|
|
249
|
+
},
|
|
250
|
+
{
|
|
251
|
+
"id": "phase-4a-delegation-standard",
|
|
252
|
+
"when": { "var": "rigorMode", "equals": "STANDARD" },
|
|
253
|
+
"text": "Run `routine-plan-analysis`, `routine-hypothesis-challenge`, and `routine-philosophy-alignment` in parallel before you decide whether the plan is good enough."
|
|
254
|
+
},
|
|
255
|
+
{
|
|
256
|
+
"id": "phase-4a-delegation-thorough",
|
|
257
|
+
"when": { "var": "rigorMode", "equals": "THOROUGH" },
|
|
258
|
+
"text": "Run `routine-plan-analysis`, `routine-hypothesis-challenge`, `routine-execution-simulation`, and `routine-philosophy-alignment` in parallel before you decide whether the plan is good enough."
|
|
259
|
+
}
|
|
260
|
+
],
|
|
180
261
|
"requireConfirmation": false
|
|
181
262
|
},
|
|
182
263
|
{
|
|
183
264
|
"id": "phase-4b-loop-decision",
|
|
184
265
|
"title": "Loop Exit Decision",
|
|
185
|
-
"prompt": "
|
|
266
|
+
"prompt": "Decide whether the plan needs another pass.\n\nIf `planFindings` is non-empty, keep going.\nIf it's empty, stop — but say what you checked so the clean pass means something.\nIf you've hit the limit, stop and record what still bothers you.\n\nThen emit the required loop-control artifact in this shape (`decision` must be `continue` or `stop`):\n```json\n{\n \"artifacts\": [{\n \"kind\": \"wr.loop_control\",\n \"decision\": \"continue\"\n }]\n}\n```",
|
|
186
267
|
"requireConfirmation": true,
|
|
187
268
|
"outputContract": {
|
|
188
269
|
"contractRef": "wr.contracts.loop_control"
|
|
@@ -219,11 +300,8 @@
|
|
|
219
300
|
{
|
|
220
301
|
"id": "phase-6a-implement-slice",
|
|
221
302
|
"title": "Implement Slice",
|
|
222
|
-
"prompt": "Implement slice `{{currentSlice.name}}`.\n\nBefore
|
|
223
|
-
"requireConfirmation":
|
|
224
|
-
"var": "prStrategy",
|
|
225
|
-
"equals": "MultiPR"
|
|
226
|
-
}
|
|
303
|
+
"prompt": "Implement only the current slice: `{{currentSlice.name}}`.\n\nBefore you code, check whether the plan is still valid:\n- if the pivot triggers fired or the assumptions went stale, stop and go back to planning\n- if the target files or symbols no longer match, stop and re-plan\n\nStay in this slice.\n- don't do the rest of the plan early\n- only pull forward later-slice work if you absolutely need it to make this slice compile or integrate, and count that as `unexpectedScopeChange = true`\n- keep the changes incremental\n- run tests and build to prove the slice works\n\nTrack whether this slice required:\n- a new special-case (`specialCaseIntroduced`)\n- an unplanned abstraction (`unplannedAbstractionIntroduced`)\n- unexpected file changes outside planned scope (`unexpectedScopeChange`)\n\nSet `verifyNeeded` to true if ANY of:\n- `sliceIndex` is odd (verify every 2 slices)\n- `prStrategy = MultiPR`\n- `specialCaseIntroduced = true`\n- `unplannedAbstractionIntroduced = true`\n- `unexpectedScopeChange = true`\n- tests or build failed\n\nCapture:\n- `specialCaseIntroduced`\n- `unplannedAbstractionIntroduced`\n- `unexpectedScopeChange`\n- `verifyNeeded`",
|
|
304
|
+
"requireConfirmation": false
|
|
227
305
|
},
|
|
228
306
|
{
|
|
229
307
|
"id": "phase-6b-verify-slice",
|
|
@@ -232,10 +310,34 @@
|
|
|
232
310
|
"var": "verifyNeeded",
|
|
233
311
|
"equals": true
|
|
234
312
|
},
|
|
235
|
-
"prompt": "
|
|
313
|
+
"prompt": "Take a fresh look at what you just changed.\n\nCheck whether:\n- it matches the plan's intent, not just the letter\n- it hides assumptions or skips edge cases\n- invariants still hold\n- it regressed against the user's philosophy\n- multiple unverified slices now need to be reviewed together\n- `unexpectedScopeChange` was just harmless integration work or real plan drift\n\nIf any of `specialCaseIntroduced`, `unplannedAbstractionIntroduced`, or `unexpectedScopeChange` is true, or if tests/build were shaky, run the verification batch before you decide this slice is done.\n\nAfter the verification batch, synthesize explicitly:\n- what multiple reviewers agreed on\n- what only one reviewer raised\n- what you reject and why\n- whether the drift was harmless integration work or real plan drift\n\nFor any finding that changes whether this slice is accepted, classify it as:\n- `Confirmed`: you checked it against primary evidence (code, plan/spec artifacts, tests/build, or direct workflow context)\n- `Plausible`: interesting, but not verified enough to accept or block the slice yet\n- `Rejected`: contradicted by fuller context or direct evidence\n\nSubagent agreement alone is not enough for `Confirmed`.\n\nSay where you're least confident.\n\nIf the slice drifted materially, update `implementation_plan.md` and `spec.md` if observable behavior changed. If the drift changed boundaries or makes the current plan unreliable, stop and go back to planning.\n\nIf the concerns are serious, stop and go back to planning or ask me. Don't wave this through just because the code exists.\n\nCapture:\n- `verificationFindings`\n- `verificationFailed`",
|
|
314
|
+
"promptFragments": [
|
|
315
|
+
{
|
|
316
|
+
"id": "phase-6b-delegation-quick",
|
|
317
|
+
"when": { "var": "rigorMode", "equals": "QUICK" },
|
|
318
|
+
"text": "Do the verification yourself."
|
|
319
|
+
},
|
|
320
|
+
{
|
|
321
|
+
"id": "phase-6b-delegation-standard",
|
|
322
|
+
"when": { "var": "rigorMode", "equals": "STANDARD" },
|
|
323
|
+
"text": "If any slice-risk trigger fired, run `routine-hypothesis-challenge` and `routine-philosophy-alignment` before you decide this slice is done."
|
|
324
|
+
},
|
|
325
|
+
{
|
|
326
|
+
"id": "phase-6b-delegation-thorough",
|
|
327
|
+
"when": { "var": "rigorMode", "equals": "THOROUGH" },
|
|
328
|
+
"text": "If any slice-risk trigger fired, also run `routine-execution-simulation` before you decide this slice is done."
|
|
329
|
+
},
|
|
330
|
+
{
|
|
331
|
+
"id": "phase-6b-multi-pr",
|
|
332
|
+
"when": { "var": "prStrategy", "equals": "MultiPR" },
|
|
333
|
+
"text": "If this slice is verified and ready, stop here and package it for review before you move to the next slice."
|
|
334
|
+
}
|
|
335
|
+
],
|
|
236
336
|
"requireConfirmation": {
|
|
237
|
-
"
|
|
238
|
-
|
|
337
|
+
"or": [
|
|
338
|
+
{ "var": "verificationFailed", "equals": true },
|
|
339
|
+
{ "var": "prStrategy", "equals": "MultiPR" }
|
|
340
|
+
]
|
|
239
341
|
}
|
|
240
342
|
}
|
|
241
343
|
]
|
|
@@ -243,7 +345,7 @@
|
|
|
243
345
|
{
|
|
244
346
|
"id": "phase-7-final-verification",
|
|
245
347
|
"type": "loop",
|
|
246
|
-
"title": "Phase 7: Final Verification
|
|
348
|
+
"title": "Phase 7: Final Verification Barrier (Verify, Fix, Re-Verify)",
|
|
247
349
|
"runCondition": {
|
|
248
350
|
"var": "taskComplexity",
|
|
249
351
|
"not_equals": "Small"
|
|
@@ -260,7 +362,7 @@
|
|
|
260
362
|
"body": [
|
|
261
363
|
{
|
|
262
364
|
"id": "phase-7a-final-verification-core",
|
|
263
|
-
"title": "Final Verification
|
|
365
|
+
"title": "Run Final Verification Batch",
|
|
264
366
|
"templateCall": {
|
|
265
367
|
"templateId": "wr.templates.routine.final-verification",
|
|
266
368
|
"args": {
|
|
@@ -271,14 +373,14 @@
|
|
|
271
373
|
},
|
|
272
374
|
{
|
|
273
375
|
"id": "phase-7b-fix-and-summarize",
|
|
274
|
-
"title": "Fix
|
|
275
|
-
"prompt": "Read `final-verification-findings.md` and
|
|
376
|
+
"title": "Synthesize Findings, Fix, and Re-Verify",
|
|
377
|
+
"prompt": "Read `final-verification-findings.md` and decide what actually needs fixing.\n\nDon't rubber-stamp it. The verifier is evidence, not the decision.\n\nIf `spec.md` exists, use it as the verification anchor and make sure every acceptance criterion is actually met.\n\nThis loop is verify, fix, then re-verify. If you fix anything here, the next pass exists to prove the fixes worked.\n\nSynthesize the verification output explicitly:\n- what the verifier found\n- what you agree with\n- what you reject and why\n- what changed because of the fixes\n\nFor any finding that changes final acceptance, classify it as:\n- `Confirmed`: you checked it against primary evidence (code, spec, tests/build, or direct workflow context)\n- `Plausible`: interesting, but not verified enough to accept or block final signoff yet\n- `Rejected`: contradicted by fuller context or direct evidence\n\nSubagent agreement alone is not enough for `Confirmed`.\n\nFix what has to be fixed now, rerun the affected verification, and update:\n- `implementation_plan.md` if the execution shape changed\n- `spec.md` if acceptance criteria, observable behavior, or external contracts changed\n\nCapture:\n- `integrationFindings`\n- `integrationPassed`\n- `regressionDetected`",
|
|
276
378
|
"requireConfirmation": false
|
|
277
379
|
},
|
|
278
380
|
{
|
|
279
381
|
"id": "phase-7c-loop-decision",
|
|
280
382
|
"title": "Final Verification Loop Decision",
|
|
281
|
-
"prompt": "
|
|
383
|
+
"prompt": "Decide whether final verification needs another pass or whether we're done.\n\nThis loop gets up to two verify/fix passes.\n- If verification found real issues and you fixed them, keep going so the fixes get re-verified.\n- If the issues are clean or resolved, stop.\n- If you've hit the limit, stop and record what remains.\n\nWhen you stop, include:\n- acceptance criteria status\n- invariant status\n- test/build summary\n- a concise PR/MR draft (why, test plan, rollout notes)\n- follow-up tickets\n- any philosophy tensions you accepted on purpose\n\nThen emit the required loop-control artifact in this shape (`decision` must be `continue` or `stop`):\n```json\n{\n \"artifacts\": [{\n \"kind\": \"wr.loop_control\",\n \"decision\": \"continue\"\n }]\n}\n```",
|
|
282
384
|
"requireConfirmation": true,
|
|
283
385
|
"outputContract": {
|
|
284
386
|
"contractRef": "wr.contracts.loop_control"
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "cross-platform-code-conversion",
|
|
3
|
+
"name": "Cross-Platform Code Conversion",
|
|
4
|
+
"version": "0.1.0",
|
|
5
|
+
"description": "Guides an agent through converting code from one platform to another (e.g., Android to iOS, iOS to Web). Triages files by difficulty, delegates easy literal translations to parallel subagents, then the main agent tackles platform-specific code requiring design decisions.",
|
|
6
|
+
"recommendedPreferences": {
|
|
7
|
+
"recommendedAutonomy": "guided",
|
|
8
|
+
"recommendedRiskPolicy": "conservative"
|
|
9
|
+
},
|
|
10
|
+
"features": [
|
|
11
|
+
"wr.features.subagent_guidance",
|
|
12
|
+
"wr.features.memory_context"
|
|
13
|
+
],
|
|
14
|
+
"preconditions": [
|
|
15
|
+
"User specifies source and target platforms.",
|
|
16
|
+
"Agent has read access to the source codebase.",
|
|
17
|
+
"Agent has write access to create target-platform files.",
|
|
18
|
+
"Agent can run build or typecheck tools for the target platform."
|
|
19
|
+
],
|
|
20
|
+
"metaGuidance": [
|
|
21
|
+
"IDIOMATIC CONVERSION: translate patterns and idioms, not syntax. A Kotlin sealed class becomes a Swift enum with associated values, not a class hierarchy workaround.",
|
|
22
|
+
"SCOPE DISCIPLINE: convert only what the user scoped. Do not expand to adjacent features or modules unless explicitly asked.",
|
|
23
|
+
"DEPENDENCY MAPPING: never assume a library exists on the target platform. Map each dependency to its target equivalent or flag it as needing a custom solution.",
|
|
24
|
+
"PLATFORM CONVENTIONS: follow the target platform's conventions for project structure, naming, error handling, concurrency, and testing.",
|
|
25
|
+
"BUILD PROOF: code that does not build is not done. Run build or typecheck after every conversion batch.",
|
|
26
|
+
"PRESERVE INTENT: the goal is functional equivalence, not line-by-line correspondence. Restructure when the target platform has a better way.",
|
|
27
|
+
"TRIAGE FIRST: not all code is equal. Separate trivial translations from code needing real design decisions. Delegate the easy stuff, focus on the hard stuff.",
|
|
28
|
+
"TARGET REPO DISCOVERY: find the target repo yourself before asking. Check workspace roots, sibling dirs, monorepo modules, and agent config files first.",
|
|
29
|
+
"PERSIST REPO MAPPINGS: once a target repo is confirmed, offer to save the source-to-target mapping in the source repo's agent config so future runs skip discovery.",
|
|
30
|
+
"DRIFT DETECTION: if a file turns out harder than its bucket classification during conversion, stop and reclassify it. Do not silently absorb complexity."
|
|
31
|
+
],
|
|
32
|
+
"steps": [
|
|
33
|
+
{
|
|
34
|
+
"id": "phase-0-scope",
|
|
35
|
+
"title": "Phase 0: Scope & Platform Analysis",
|
|
36
|
+
"prompt": "Understand what you're converting before you touch anything.\n\nFigure out:\n- What is being converted? (single file, module, feature, full component, entire app)\n- What is the source platform? (Android/Kotlin, iOS/Swift, Web/React, etc.)\n- What is the target platform?\n- How large is the conversion scope? (file count, rough LOC)\n- Where does the converted code go? Find the target repo yourself before asking the user.\n\nIf the user hasn't specified scope boundaries, ask. Don't guess at scope.\n\nThen classify the conversion:\n- `conversionComplexity`: Small (1-3 files, straightforward translation) / Medium (a module or feature, mixed difficulty) / Large (many modules, significant platform-specific code)\n\nUse this guidance:\n- Small: few files, mostly mechanical, low risk of idiom mismatch\n- Medium: a module or feature with some platform-specific code mixed in\n- Large: many files, deep platform coupling, multiple idiom mapping decisions\n\nCapture:\n- `sourcePlatform`\n- `targetPlatform`\n- `conversionScope`\n- `targetRepoPath`\n- `estimatedSize`\n- `conversionComplexity`",
|
|
37
|
+
"requireConfirmation": {
|
|
38
|
+
"var": "conversionComplexity",
|
|
39
|
+
"not_equals": "Small"
|
|
40
|
+
}
|
|
41
|
+
},
|
|
42
|
+
{
|
|
43
|
+
"id": "phase-1-understand-source",
|
|
44
|
+
"title": "Phase 1: Understand Source Code",
|
|
45
|
+
"prompt": "Read and analyze the source code through a conversion lens — what will be easy to convert, what will be hard, and why.\n\nMap out:\n- Architecture and module structure\n- Key patterns used (MVI, MVVM, dependency injection, etc.)\n- External dependencies and what they do\n- Entry points and public API surface\n- Platform coupling depth: is the code cleanly layered or is platform-specific code smeared throughout? This directly determines how much falls into easy vs. hard buckets.\n- Concurrency model: Coroutines, Combine, RxJS, async/await? This is often the single hardest mapping decision.\n- DI approach: Dagger/Hilt, Swinject, Koin? DI frameworks rarely map 1:1.\n- Test coverage shape: unit tests on business logic (convert easily), UI tests (likely rewrite), integration tests (depends on infra).\n- Shared code boundaries: is there already a shared/common module that might not need conversion at all?\n\nCapture:\n- `sourceArchitecture`\n- `dependencies`\n- `publicApiSurface`\n- `platformCouplingAssessment`\n- `concurrencyModel`\n- `testCoverageShape`",
|
|
46
|
+
"promptFragments": [
|
|
47
|
+
{
|
|
48
|
+
"id": "phase-1-small-light",
|
|
49
|
+
"when": { "var": "conversionComplexity", "equals": "Small" },
|
|
50
|
+
"text": "For Small conversions, keep this lightweight. A quick read of the files in scope is enough — don't map the entire architecture. Focus on identifying any platform-specific code that would prevent a straight translation."
|
|
51
|
+
}
|
|
52
|
+
],
|
|
53
|
+
"requireConfirmation": false
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
"id": "phase-small-fast-path",
|
|
57
|
+
"title": "Small Conversion Fast Path",
|
|
58
|
+
"runCondition": {
|
|
59
|
+
"var": "conversionComplexity",
|
|
60
|
+
"equals": "Small"
|
|
61
|
+
},
|
|
62
|
+
"prompt": "For Small conversions, skip triage and planning — just convert.\n\n- Translate the files to the target platform idiomatically\n- Follow target platform naming and structure conventions\n- Map any dependencies to target equivalents\n- Convert tests if they exist\n- Run build or typecheck to verify\n\nIf something turns out harder than expected (deep platform coupling, no clean dependency equivalent), update `conversionComplexity` to `Medium` and stop. The full triage and planning pipeline will activate for the remaining work.\n\nCapture:\n- `filesConverted`\n- `buildPassed`\n- `conversionComplexity`",
|
|
63
|
+
"requireConfirmation": false
|
|
64
|
+
},
|
|
65
|
+
{
|
|
66
|
+
"id": "phase-2-triage",
|
|
67
|
+
"title": "Phase 2: Triage & Sort",
|
|
68
|
+
"runCondition": {
|
|
69
|
+
"var": "conversionComplexity",
|
|
70
|
+
"not_equals": "Small"
|
|
71
|
+
},
|
|
72
|
+
"prompt": "Classify every file or module in scope into one of three buckets:\n\n**Bucket A — Literal translation**: Platform-agnostic business logic, data models, utilities, pure functions. These use no platform-specific APIs or libraries. Conversion is mechanical: translate the language syntax, follow target naming conventions, done. These will be delegated to subagents.\n\n**Bucket B — Library substitution**: Code that uses platform-specific libraries (networking, persistence, serialization, DI) but follows standard patterns. These need dependency mapping but the structure stays the same.\n\n**Bucket C — Platform-specific**: Code deeply tied to the platform (UI layer, lifecycle management, concurrency/threading, navigation, platform APIs). These need design decisions about target-platform idioms.\n\nFor each file or module, list:\n- File/module name\n- Bucket (A, B, or C)\n- One-line reason for classification\n- Dependencies it has on other files in scope (so we know conversion order)\n\nSort the work items within each bucket by dependency order (convert dependencies first).\n\nGroup Bucket A files into parallel batches of 3-5 files each. Each batch should contain files with no cross-dependencies so subagents can work independently.\n\nGroup Bucket B and C files into sequential batches by dependency order.\n\nEach batch should have: `name` (short label), `bucket` (A, B, or C), and `files` (list of file paths).\n\nCapture:\n- `bucketABatches` (parallel batches for subagent delegation)\n- `bucketBCBatches` (sequential batches for main agent)\n- `bucketACounts`\n- `bucketBCounts`\n- `bucketCCounts`",
|
|
73
|
+
"requireConfirmation": true
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
"id": "phase-3-plan-hard-items",
|
|
77
|
+
"title": "Phase 3: Plan Platform-Specific Conversions",
|
|
78
|
+
"runCondition": {
|
|
79
|
+
"var": "conversionComplexity",
|
|
80
|
+
"not_equals": "Small"
|
|
81
|
+
},
|
|
82
|
+
"prompt": "For Bucket B and Bucket C items, plan the conversion before writing code.\n\nFor Bucket B (library substitution):\n- Map each source dependency to its target-platform equivalent\n- If no equivalent exists, flag it and propose an alternative\n\nFor Bucket C (platform-specific):\n- Threading/concurrency model mapping\n- UI framework mapping\n- DI framework mapping\n- State management mapping\n- Error handling mapping\n- Navigation patterns\n- Lifecycle management approach\n- Testing framework mapping\n\nFor anything with no clean target equivalent, propose an idiomatic solution and explain the tradeoff.\n\nBucket A items don't need a plan. They're mechanical translation handled by subagents.\n\nCapture:\n- `idiomMapping`\n- `dependencyMapping`\n- `tradeoffs`",
|
|
83
|
+
"promptFragments": [
|
|
84
|
+
{
|
|
85
|
+
"id": "phase-3-medium-focused",
|
|
86
|
+
"when": { "var": "conversionComplexity", "equals": "Medium" },
|
|
87
|
+
"text": "For Medium conversions, focus the plan on the items that actually need design decisions. Don't exhaustively map every dimension — only the ones relevant to the files in scope."
|
|
88
|
+
}
|
|
89
|
+
],
|
|
90
|
+
"requireConfirmation": true
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
"id": "phase-4-delegate-bucket-a",
|
|
94
|
+
"title": "Phase 4: Delegate Bucket A (Parallel Subagents)",
|
|
95
|
+
"runCondition": {
|
|
96
|
+
"and": [
|
|
97
|
+
{ "var": "conversionComplexity", "not_equals": "Small" },
|
|
98
|
+
{ "var": "bucketACounts", "not_equals": 0 }
|
|
99
|
+
]
|
|
100
|
+
},
|
|
101
|
+
"prompt": "Delegate all Bucket A batches to subagents in parallel. If subagent delegation is not available in your environment, convert Bucket A files yourself sequentially — they're mechanical translations.\n\nFor each batch in `bucketABatches`, spawn a subagent with these instructions:\n- Source platform: `{{sourcePlatform}}`\n- Target platform: `{{targetPlatform}}`\n- Target repo: `{{targetRepoPath}}`\n- Files to convert: (list the specific files in this batch)\n- Task: translate these files from the source language to the target language. Follow target platform naming conventions. These are platform-agnostic files — no library substitution or idiom mapping needed. Preserve the public API. Convert tests if they exist.\n\nRun batches in parallel. Each subagent works independently on files with no cross-dependencies.\n\nWhen all subagents finish, review their output:\n- Spot-check a few converted files for quality\n- Flag any files a subagent misclassified as easy (actually needs library substitution or platform-specific handling)\n- Move misclassified files back to the appropriate bucket for main agent handling\n\nRun build or typecheck on the Bucket A output to catch issues early.\n\nCapture:\n- `bucketAComplete`\n- `bucketABuildPassed`\n- `reclassifiedFiles`",
|
|
102
|
+
"requireConfirmation": false
|
|
103
|
+
},
|
|
104
|
+
{
|
|
105
|
+
"id": "phase-5-convert-hard",
|
|
106
|
+
"type": "loop",
|
|
107
|
+
"title": "Phase 5: Convert Bucket B & C (Main Agent)",
|
|
108
|
+
"runCondition": {
|
|
109
|
+
"var": "conversionComplexity",
|
|
110
|
+
"not_equals": "Small"
|
|
111
|
+
},
|
|
112
|
+
"loop": {
|
|
113
|
+
"type": "forEach",
|
|
114
|
+
"items": "bucketBCBatches",
|
|
115
|
+
"itemVar": "currentBatch",
|
|
116
|
+
"indexVar": "batchIndex",
|
|
117
|
+
"maxIterations": 30
|
|
118
|
+
},
|
|
119
|
+
"body": [
|
|
120
|
+
{
|
|
121
|
+
"id": "phase-5a-convert-batch",
|
|
122
|
+
"title": "Convert Current Batch",
|
|
123
|
+
"prompt": "Convert the current batch: `{{currentBatch.name}}`\n\nThis is Bucket B or C code that needs your full context.\n\n- **Bucket B**: Follow the dependency mapping from Phase 3. Substitute libraries, keep structure.\n- **Bucket C**: Follow the idiom mapping from Phase 3. Restructure where the target platform has a better way.\n\nAlso handle any `reclassifiedFiles` that were moved back from Bucket A delegation.\n\nFor all files:\n- Follow target platform conventions\n- Preserve public API contracts where possible\n- Add TODO comments for anything uncertain\n- Convert tests alongside production code when source tests exist\n\nRun build or typecheck after this batch. If it fails, fix it before moving on.\n\nTrack whether this batch required:\n- `bucketDriftDetected`: a file turned out to be harder than its bucket classification (e.g., Bucket B file needed Bucket C-level design decisions)\n- `unexpectedDependency`: a dependency was discovered that wasn't in the Phase 3 mapping\n- `buildBroke`: build or typecheck failed after this batch\n\nCapture:\n- `batchFilesConverted`\n- `batchBuildPassed`\n- `batchIssues`\n- `bucketDriftDetected`\n- `unexpectedDependency`\n- `buildBroke`",
|
|
124
|
+
"requireConfirmation": false
|
|
125
|
+
},
|
|
126
|
+
{
|
|
127
|
+
"id": "phase-5b-verify-batch",
|
|
128
|
+
"title": "Verify Batch",
|
|
129
|
+
"runCondition": {
|
|
130
|
+
"or": [
|
|
131
|
+
{ "var": "bucketDriftDetected", "equals": true },
|
|
132
|
+
{ "var": "unexpectedDependency", "equals": true },
|
|
133
|
+
{ "var": "buildBroke", "equals": true }
|
|
134
|
+
]
|
|
135
|
+
},
|
|
136
|
+
"prompt": "Something unexpected happened in this batch. Before moving on, check what went wrong.\n\nIf `bucketDriftDetected`: the file was harder than classified. Update the idiom or dependency mapping from Phase 3 so downstream batches don't hit the same surprise. Record what changed.\n\nIf `unexpectedDependency`: a dependency wasn't in the Phase 3 plan. Map it now and check whether other batches depend on the same thing.\n\nIf `buildBroke`: diagnose whether the failure is local to this batch or a cross-batch integration issue. Fix it before continuing.\n\nIf the drift is severe enough that the Phase 3 plan is unreliable, say so. Don't silently absorb complexity.\n\nCapture:\n- `mappingUpdated`\n- `verificationPassed`",
|
|
137
|
+
"requireConfirmation": {
|
|
138
|
+
"var": "bucketDriftDetected",
|
|
139
|
+
"equals": true
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
]
|
|
143
|
+
},
|
|
144
|
+
{
|
|
145
|
+
"id": "phase-6-verify",
|
|
146
|
+
"type": "loop",
|
|
147
|
+
"title": "Phase 6: Final Verification",
|
|
148
|
+
"runCondition": {
|
|
149
|
+
"var": "conversionComplexity",
|
|
150
|
+
"not_equals": "Small"
|
|
151
|
+
},
|
|
152
|
+
"loop": {
|
|
153
|
+
"type": "while",
|
|
154
|
+
"conditionSource": {
|
|
155
|
+
"kind": "artifact_contract",
|
|
156
|
+
"contractRef": "wr.contracts.loop_control",
|
|
157
|
+
"loopId": "final_verification_loop"
|
|
158
|
+
},
|
|
159
|
+
"maxIterations": 3
|
|
160
|
+
},
|
|
161
|
+
"body": [
|
|
162
|
+
{
|
|
163
|
+
"id": "phase-6a-full-build",
|
|
164
|
+
"title": "Full Build and Integration Check",
|
|
165
|
+
"prompt": "Run a full build or typecheck on the entire converted codebase — both subagent-converted and main-agent-converted code together.\n\nCheck for:\n- Build/compile errors from cross-batch integration issues\n- Inconsistencies between subagent output and main agent output (naming, patterns)\n- Non-idiomatic patterns that slipped through\n- Missing error handling at module boundaries\n- Threading or concurrency issues across modules\n- Broken public API contracts\n\nFix each issue. If a fix is a band-aid over a deeper mapping problem, go back and fix the mapping.\n\nCapture:\n- `fullBuildPassed`\n- `integrationIssues`\n- `issuesFixed`",
|
|
166
|
+
"requireConfirmation": false
|
|
167
|
+
},
|
|
168
|
+
{
|
|
169
|
+
"id": "phase-6b-loop-decision",
|
|
170
|
+
"title": "Verification Loop Decision",
|
|
171
|
+
"prompt": "Decide whether verification needs another pass.\n\n- If the build fails or critical integration issues remain: continue.\n- If the build passes and remaining issues are minor: stop.\n- If you've hit the iteration limit: stop and record what remains.\n\nEmit the loop-control artifact:\n```json\n{\n \"artifacts\": [{\n \"kind\": \"wr.loop_control\",\n \"decision\": \"continue or stop\"\n }]\n}\n```",
|
|
172
|
+
"requireConfirmation": false,
|
|
173
|
+
"outputContract": {
|
|
174
|
+
"contractRef": "wr.contracts.loop_control"
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
]
|
|
178
|
+
},
|
|
179
|
+
{
|
|
180
|
+
"id": "phase-7-handoff",
|
|
181
|
+
"title": "Phase 7: Handoff",
|
|
182
|
+
"prompt": "Summarize what was converted.\n\nInclude:\n- Source and target platforms\n- Total files converted\n- Build/typecheck status\n- Known gaps, TODOs, or limitations\n- What would need manual attention\n\nKeep it concise. The converted code is the deliverable.",
|
|
183
|
+
"promptFragments": [
|
|
184
|
+
{
|
|
185
|
+
"id": "phase-7-small-summary",
|
|
186
|
+
"when": { "var": "conversionComplexity", "equals": "Small" },
|
|
187
|
+
"text": "For Small conversions, keep the summary brief — just list what was converted, build status, and any issues."
|
|
188
|
+
},
|
|
189
|
+
{
|
|
190
|
+
"id": "phase-7-full-summary",
|
|
191
|
+
"when": { "var": "conversionComplexity", "not_equals": "Small" },
|
|
192
|
+
"text": "Also include: bucket breakdown (A/B/C counts), delegation results (how many files delegated, subagent quality, any reclassified), key idiom mapping decisions, and dependency substitutions."
|
|
193
|
+
}
|
|
194
|
+
],
|
|
195
|
+
"notesOptional": true,
|
|
196
|
+
"requireConfirmation": false
|
|
197
|
+
}
|
|
198
|
+
]
|
|
199
|
+
}
|