@exaudeus/workrail 3.15.0 → 3.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. package/dist/application/services/workflow-service.d.ts +2 -0
  2. package/dist/application/services/workflow-service.js +3 -0
  3. package/dist/application/use-cases/raw-workflow-file-scanner.js +10 -13
  4. package/dist/cli/commands/index.d.ts +1 -1
  5. package/dist/cli/commands/index.js +2 -1
  6. package/dist/cli/commands/init.d.ts +10 -0
  7. package/dist/cli/commands/init.js +72 -0
  8. package/dist/cli.js +13 -1
  9. package/dist/config/config-file.d.ts +8 -0
  10. package/dist/config/config-file.js +141 -0
  11. package/dist/config/feature-flags.js +8 -0
  12. package/dist/console/assets/index-BZNM03t1.css +1 -0
  13. package/dist/console/assets/index-BwJelCXK.js +28 -0
  14. package/dist/console/index.html +2 -2
  15. package/dist/di/container.d.ts +1 -0
  16. package/dist/di/container.js +24 -7
  17. package/dist/infrastructure/session/HttpServer.d.ts +3 -4
  18. package/dist/infrastructure/session/HttpServer.js +58 -106
  19. package/dist/infrastructure/storage/caching-workflow-storage.d.ts +2 -0
  20. package/dist/infrastructure/storage/caching-workflow-storage.js +15 -6
  21. package/dist/infrastructure/storage/file-workflow-storage.js +3 -4
  22. package/dist/infrastructure/storage/schema-validating-workflow-storage.js +9 -8
  23. package/dist/manifest.json +303 -247
  24. package/dist/mcp/assert-output.d.ts +37 -0
  25. package/dist/mcp/assert-output.js +53 -0
  26. package/dist/mcp/boundary-coercion.d.ts +1 -0
  27. package/dist/mcp/boundary-coercion.js +44 -0
  28. package/dist/mcp/dev-mode.d.ts +2 -0
  29. package/dist/mcp/dev-mode.js +16 -0
  30. package/dist/mcp/handler-factory.d.ts +1 -1
  31. package/dist/mcp/handler-factory.js +20 -16
  32. package/dist/mcp/handlers/session.js +8 -9
  33. package/dist/mcp/handlers/shared/request-workflow-reader.d.ts +1 -0
  34. package/dist/mcp/handlers/shared/request-workflow-reader.js +90 -20
  35. package/dist/mcp/handlers/v2-advance-core/event-builders.d.ts +2 -0
  36. package/dist/mcp/handlers/v2-advance-core/event-builders.js +6 -6
  37. package/dist/mcp/handlers/v2-advance-core/index.d.ts +2 -0
  38. package/dist/mcp/handlers/v2-advance-core/index.js +4 -3
  39. package/dist/mcp/handlers/v2-advance-core/input-validation.d.ts +2 -0
  40. package/dist/mcp/handlers/v2-advance-core/input-validation.js +32 -9
  41. package/dist/mcp/handlers/v2-advance-core/outcome-blocked.d.ts +2 -0
  42. package/dist/mcp/handlers/v2-advance-core/outcome-blocked.js +1 -1
  43. package/dist/mcp/handlers/v2-advance-core/outcome-success.d.ts +2 -0
  44. package/dist/mcp/handlers/v2-advance-core/outcome-success.js +1 -1
  45. package/dist/mcp/handlers/v2-checkpoint.d.ts +1 -1
  46. package/dist/mcp/handlers/v2-checkpoint.js +5 -6
  47. package/dist/mcp/handlers/v2-execution/advance.d.ts +4 -2
  48. package/dist/mcp/handlers/v2-execution/advance.js +5 -7
  49. package/dist/mcp/handlers/v2-execution/continue-advance.d.ts +1 -0
  50. package/dist/mcp/handlers/v2-execution/continue-advance.js +59 -27
  51. package/dist/mcp/handlers/v2-execution/continue-rehydrate.d.ts +2 -1
  52. package/dist/mcp/handlers/v2-execution/continue-rehydrate.js +11 -10
  53. package/dist/mcp/handlers/v2-execution/index.js +2 -0
  54. package/dist/mcp/handlers/v2-execution/replay.d.ts +8 -4
  55. package/dist/mcp/handlers/v2-execution/replay.js +50 -30
  56. package/dist/mcp/handlers/v2-execution/start.d.ts +2 -3
  57. package/dist/mcp/handlers/v2-execution/start.js +58 -30
  58. package/dist/mcp/handlers/v2-execution/workflow-object-cache.d.ts +5 -0
  59. package/dist/mcp/handlers/v2-execution/workflow-object-cache.js +19 -0
  60. package/dist/mcp/handlers/v2-execution-helpers.d.ts +1 -0
  61. package/dist/mcp/handlers/v2-execution-helpers.js +23 -7
  62. package/dist/mcp/handlers/v2-resume.d.ts +1 -1
  63. package/dist/mcp/handlers/v2-resume.js +3 -4
  64. package/dist/mcp/handlers/v2-state-conversion.js +5 -1
  65. package/dist/mcp/handlers/v2-workflow.d.ts +80 -0
  66. package/dist/mcp/handlers/v2-workflow.js +40 -23
  67. package/dist/mcp/handlers/workflow.d.ts +2 -5
  68. package/dist/mcp/handlers/workflow.js +15 -12
  69. package/dist/mcp/output-schemas.d.ts +25 -27
  70. package/dist/mcp/output-schemas.js +7 -7
  71. package/dist/mcp/server.js +23 -4
  72. package/dist/mcp/tool-call-timing.d.ts +24 -0
  73. package/dist/mcp/tool-call-timing.js +85 -0
  74. package/dist/mcp/transports/http-entry.js +3 -2
  75. package/dist/mcp/transports/http-listener.d.ts +1 -0
  76. package/dist/mcp/transports/http-listener.js +25 -0
  77. package/dist/mcp/transports/shutdown-hooks.d.ts +4 -1
  78. package/dist/mcp/transports/shutdown-hooks.js +3 -2
  79. package/dist/mcp/transports/stdio-entry.js +6 -28
  80. package/dist/mcp/v2-response-formatter.d.ts +1 -1
  81. package/dist/mcp/v2-response-formatter.js +2 -5
  82. package/dist/mcp/validation/schema-introspection.d.ts +1 -0
  83. package/dist/mcp/validation/schema-introspection.js +15 -5
  84. package/dist/mcp/validation/suggestion-generator.js +2 -2
  85. package/dist/runtime/adapters/node-process-signals.d.ts +1 -0
  86. package/dist/runtime/adapters/node-process-signals.js +5 -0
  87. package/dist/runtime/adapters/noop-process-signals.d.ts +1 -0
  88. package/dist/runtime/adapters/noop-process-signals.js +2 -0
  89. package/dist/runtime/ports/process-signals.d.ts +1 -0
  90. package/dist/types/workflow-definition.d.ts +5 -1
  91. package/dist/types/workflow-definition.js +2 -0
  92. package/dist/types/workflow.d.ts +3 -0
  93. package/dist/types/workflow.js +35 -26
  94. package/dist/v2/durable-core/domain/context-template-resolver.js +2 -2
  95. package/dist/v2/durable-core/domain/function-definition-expander.js +2 -17
  96. package/dist/v2/durable-core/domain/prompt-renderer.d.ts +2 -0
  97. package/dist/v2/durable-core/domain/prompt-renderer.js +22 -18
  98. package/dist/v2/durable-core/domain/recap-recovery.js +23 -16
  99. package/dist/v2/durable-core/domain/retrieval-contract.js +13 -7
  100. package/dist/v2/durable-core/schemas/compiled-workflow/index.js +4 -3
  101. package/dist/v2/durable-core/session-index.d.ts +22 -0
  102. package/dist/v2/durable-core/session-index.js +58 -0
  103. package/dist/v2/durable-core/sorted-event-log.d.ts +6 -0
  104. package/dist/v2/durable-core/sorted-event-log.js +15 -0
  105. package/dist/v2/infra/local/fs/index.js +8 -8
  106. package/dist/v2/infra/local/pinned-workflow-store/index.d.ts +2 -0
  107. package/dist/v2/infra/local/pinned-workflow-store/index.js +49 -0
  108. package/dist/v2/infra/local/remembered-roots-store/index.d.ts +3 -1
  109. package/dist/v2/infra/local/remembered-roots-store/index.js +6 -3
  110. package/dist/v2/infra/local/session-store/index.d.ts +1 -1
  111. package/dist/v2/infra/local/session-store/index.js +71 -61
  112. package/dist/v2/infra/local/session-summary-provider/index.js +9 -4
  113. package/dist/v2/infra/local/snapshot-store/index.js +2 -1
  114. package/dist/v2/infra/local/workspace-anchor/index.js +4 -2
  115. package/dist/v2/ports/pinned-workflow-store.port.d.ts +2 -0
  116. package/dist/v2/ports/session-event-log-store.port.d.ts +1 -1
  117. package/dist/v2/projections/assessment-consequences.d.ts +2 -1
  118. package/dist/v2/projections/assessment-consequences.js +0 -5
  119. package/dist/v2/projections/assessments.d.ts +2 -1
  120. package/dist/v2/projections/assessments.js +2 -4
  121. package/dist/v2/projections/gaps.d.ts +2 -1
  122. package/dist/v2/projections/gaps.js +0 -5
  123. package/dist/v2/projections/preferences.d.ts +2 -1
  124. package/dist/v2/projections/preferences.js +0 -5
  125. package/dist/v2/projections/run-context.d.ts +2 -2
  126. package/dist/v2/projections/run-context.js +0 -5
  127. package/dist/v2/projections/run-dag.js +7 -1
  128. package/dist/v2/projections/run-execution-trace.d.ts +8 -0
  129. package/dist/v2/projections/run-execution-trace.js +124 -0
  130. package/dist/v2/projections/run-status-signals.d.ts +2 -2
  131. package/dist/v2/usecases/console-routes.d.ts +3 -1
  132. package/dist/v2/usecases/console-routes.js +124 -25
  133. package/dist/v2/usecases/console-service.d.ts +1 -0
  134. package/dist/v2/usecases/console-service.js +83 -25
  135. package/dist/v2/usecases/console-types.d.ts +53 -0
  136. package/dist/v2/usecases/worktree-service.js +32 -1
  137. package/package.json +6 -5
  138. package/spec/workflow.schema.json +18 -0
  139. package/workflows/adaptive-ticket-creation.json +23 -16
  140. package/workflows/architecture-scalability-audit.json +29 -22
  141. package/workflows/bug-investigation.agentic.v2.json +7 -0
  142. package/workflows/coding-task-workflow-agentic.json +7 -0
  143. package/workflows/coding-task-workflow-agentic.lean.v2.json +16 -8
  144. package/workflows/coding-task-workflow-agentic.v2.json +7 -0
  145. package/workflows/cross-platform-code-conversion.v2.json +7 -0
  146. package/workflows/document-creation-workflow.json +15 -8
  147. package/workflows/documentation-update-workflow.json +15 -8
  148. package/workflows/intelligent-test-case-generation.json +7 -0
  149. package/workflows/learner-centered-course-workflow.json +9 -2
  150. package/workflows/mr-review-workflow.agentic.v2.json +7 -0
  151. package/workflows/personal-learning-materials-creation-branched.json +15 -8
  152. package/workflows/presentation-creation.json +12 -5
  153. package/workflows/production-readiness-audit.json +7 -0
  154. package/workflows/relocation-workflow-us.json +39 -32
  155. package/workflows/scoped-documentation-workflow.json +33 -26
  156. package/workflows/ui-ux-design-workflow.json +7 -0
  157. package/workflows/workflow-diagnose-environment.json +6 -0
  158. package/workflows/workflow-for-workflows.json +7 -0
  159. package/workflows/workflow-for-workflows.v2.json +23 -11
  160. package/workflows/wr.discovery.json +8 -1
  161. package/dist/console/assets/index-BZYIjrzJ.js +0 -28
  162. package/dist/console/assets/index-OLCKbDdm.css +0 -1
  163. package/dist/mcp/handlers/v2-resolve-refs-envelope.d.ts +0 -5
  164. package/dist/mcp/handlers/v2-resolve-refs-envelope.js +0 -17
@@ -3,14 +3,21 @@
3
3
  "name": "Adaptive Ticket Creation Workflow",
4
4
  "version": "1.0.0",
5
5
  "description": "Use this to create high-quality Jira tickets for features, tasks, or epics. Automatically selects the right complexity path (Simple, Standard, or Epic) and generates properly structured tickets with acceptance criteria and estimates.",
6
+ "about": "## Adaptive Ticket Creation Workflow\n\nUse this to create well-structured Jira tickets for features, tasks, or epics. The workflow automatically selects the right complexity path (Simple, Standard, or Epic) based on the request, so you don't have to decide upfront how much process you need.\n\n### What it produces\n\n- **Simple path**: one complete, developer-ready Jira ticket with a context-rich description, checkbox-style acceptance criteria, and an effort estimate.\n- **Standard path**: a high-level plan plus a batch of related tickets covering all deliverables.\n- **Epic path**: everything in Standard, plus full epic decomposition, per-story estimates with risk ratings, dependency mapping, and a reusable team rules file at `.workflow_rules/ticket_creation.md` that future runs load automatically.\n\n### When to use it\n\n- You need to create one or more Jira tickets and want them to be genuinely developer-ready.\n- You have a feature request, bug, task, or epic that needs to be broken down and estimated.\n- Your team has specific ticket conventions (naming, sizing, labels) -- the workflow learns and stores these on the Epic path.\n\n### How to get good results\n\n- Provide as much context as you have: PRD links, design files, existing related tickets, and any known constraints.\n- If your team has a `.workflow_rules/ticket_creation.md` file, the workflow loads it automatically and applies your conventions.\n- On the Epic path, the workflow asks you to approve the high-level plan and the decomposition before generating tickets. Use these checkpoints to catch scope issues early.\n- Acceptance criteria are written as checkbox-style observable conditions, not restatements of requirements. If your team has a specific AC format, describe it in the rules file.",
7
+ "examples": [
8
+ "Create a Jira ticket for adding biometric authentication to the mobile login screen",
9
+ "Break down the new real-time notifications feature into an epic with stories and estimates",
10
+ "Write tickets for all backend work needed to support the v2 search API",
11
+ "Create a single bug ticket for the checkout crash when applying a promo code on iOS 17"
12
+ ],
6
13
  "preconditions": [
7
14
  "User has provided a description of the feature, task, or work to be ticketed.",
8
15
  "Agent has file system access for loading team preferences and persisting rules."
9
16
  ],
10
17
  "metaGuidance": [
11
- "ROLE: expert Product Manager and Mobile Tech Lead. Triage autonomously, write developer-ready tickets with full context, and produce objectively testable acceptance criteria \u2014 not user-story paraphrases.",
18
+ "ROLE: expert Product Manager and Mobile Tech Lead. Triage autonomously, write developer-ready tickets with full context, and produce objectively testable acceptance criteria not user-story paraphrases.",
12
19
  "EXPLORE FIRST: use tools to gather context before asking the user anything. Ask only for information you genuinely cannot determine with tools or from the request itself.",
13
- "TEAM RULES: load and follow ./.workflow_rules/ticket_creation.md when it exists. Preferences there override your defaults. Rules are captured only on the Epic path \u2014 complex sessions are where durable conventions emerge and where the investment pays off.",
20
+ "TEAM RULES: load and follow ./.workflow_rules/ticket_creation.md when it exists. Preferences there override your defaults. Rules are captured only on the Epic path complex sessions are where durable conventions emerge and where the investment pays off.",
14
21
  "AUTONOMOUS TRIAGE: decide pathComplexity (Simple / Standard / Epic) yourself from the request. Surface your reasoning, then wait for confirmation.",
15
22
  "QUALITY FLOOR: every ticket must have a context-rich description, checkbox-style acceptance criteria that are objectively testable, and an effort estimate."
16
23
  ],
@@ -21,7 +28,7 @@
21
28
  "promptBlocks": {
22
29
  "goal": "Analyze the request, gather available context, and select the right complexity path before doing any ticket work.",
23
30
  "constraints": [
24
- "Decide the path yourself \u2014 do not ask the user to choose.",
31
+ "Decide the path yourself do not ask the user to choose.",
25
32
  "Load ./.workflow_rules/ticket_creation.md if it exists and let it influence your triage. If the file does not exist, note this explicitly in your output so the user knows team conventions were not applied.",
26
33
  "Set pathComplexity to exactly one of: Simple, Standard, or Epic."
27
34
  ],
@@ -29,7 +36,7 @@
29
36
  "Read any attached documents, linked PRDs, or referenced specs.",
30
37
  "Identify complexity signals: scope breadth, number of distinct deliverables, cross-team dependencies, technical unknowns, and estimated ticket count.",
31
38
  "Apply the triage rubric: Simple = single ticket, clear requirements, no blocking unknowns, minimal dependencies. Standard = multiple related tickets, moderate scope, some analysis needed. Epic = complex feature requiring decomposition, multiple teams or significant unknowns, likely 6+ tickets.",
32
- "Upgrade triggers \u2014 escalate to Standard if: request implies more than one clearly separate work item. Escalate to Epic if: multiple teams are involved, architecture decisions are unresolved, or you estimate more than five tickets.",
39
+ "Upgrade triggers escalate to Standard if: request implies more than one clearly separate work item. Escalate to Epic if: multiple teams are involved, architecture decisions are unresolved, or you estimate more than five tickets.",
33
40
  "State your selected path and the top three reasons. Capture pathComplexity in context."
34
41
  ],
35
42
  "outputRequired": {
@@ -53,7 +60,7 @@
53
60
  "promptBlocks": {
54
61
  "goal": "Generate one complete, developer-ready Jira ticket for this request.",
55
62
  "constraints": [
56
- "Acceptance criteria must be phrased as observable, testable conditions \u2014 not user-story restatements.",
63
+ "Acceptance criteria must be phrased as observable, testable conditions not user-story restatements.",
57
64
  "Follow any team conventions from ./.workflow_rules/ticket_creation.md.",
58
65
  "Include all fields a developer needs to start work without asking follow-up questions."
59
66
  ],
@@ -103,7 +110,7 @@
103
110
  "Load ./.workflow_rules/ticket_creation.md and note any relevant team conventions.",
104
111
  "Identify: key stakeholders, team dependencies, technical constraints, known risks, and any conflicting requirements.",
105
112
  "Classify each gap as: Critical (blocks planning), Important (affects scope), or Nice-to-have (can proceed without it).",
106
- "For Critical and Important gaps that tools cannot resolve, ask the user \u2014 in a single consolidated question block, not one at a time.",
113
+ "For Critical and Important gaps that tools cannot resolve, ask the user in a single consolidated question block, not one at a time.",
107
114
  "After receiving answers, check whether any response reveals scope that would change `pathComplexity` (e.g. the user confirms three teams are involved, or the feature is narrower than initially assessed). If so, state the new classification and reasoning, and ask the user to confirm before continuing to Phase 2."
108
115
  ],
109
116
  "outputRequired": {
@@ -135,16 +142,16 @@
135
142
  "promptBlocks": {
136
143
  "goal": "Produce a structured plan that will drive ticket generation. This plan is the source of truth for scope.",
137
144
  "constraints": [
138
- "Be explicit about scope boundaries \u2014 ambiguous scope will produce ambiguous tickets.",
145
+ "Be explicit about scope boundaries ambiguous scope will produce ambiguous tickets.",
139
146
  "Success criteria must be measurable, not just descriptive.",
140
147
  "For Standard path: this plan feeds directly into batch ticket generation."
141
148
  ],
142
149
  "procedure": [
143
150
  "Write: Project Summary (2-3 sentences, what is being built and why).",
144
151
  "Write: Key Deliverables (bulleted list of distinct components or features).",
145
- "Write: In-Scope (explicit list \u2014 prevents scope creep).",
146
- "Write: Out-of-Scope (explicit exclusions \u2014 prevents misunderstandings).",
147
- "Write: Success Criteria (measurable definition of done \u2014 each item verifiable).",
152
+ "Write: In-Scope (explicit list prevents scope creep).",
153
+ "Write: Out-of-Scope (explicit exclusions prevents misunderstandings).",
154
+ "Write: Success Criteria (measurable definition of done each item verifiable).",
148
155
  "Write: High-Level Timeline (phases or milestones with rough sizing).",
149
156
  "Review: does every deliverable map clearly to implementable work? Is anything in scope that should be out?"
150
157
  ],
@@ -170,7 +177,7 @@
170
177
  "goal": "Break the approved plan into a logical work hierarchy that development teams can execute.",
171
178
  "constraints": [
172
179
  "Every item in the plan's In-Scope list must map to at least one work item in the hierarchy.",
173
- "Dependencies must be explicit \u2014 not implied by ordering alone.",
180
+ "Dependencies must be explicit not implied by ordering alone.",
174
181
  "Oversized stories (more than one sprint of work) should be split."
175
182
  ],
176
183
  "procedure": [
@@ -202,7 +209,7 @@
202
209
  "promptBlocks": {
203
210
  "goal": "Add effort estimates, risk assessments, and team assignments to each story in the hierarchy.",
204
211
  "constraints": [
205
- "Conservative estimates are better than optimistic ones \u2014 note uncertainty explicitly.",
212
+ "Conservative estimates are better than optimistic ones note uncertainty explicitly.",
206
213
  "Justify each estimate with one sentence of reasoning.",
207
214
  "Flag stories on the critical path."
208
215
  ],
@@ -212,7 +219,7 @@
212
219
  "Assign priority: must-have for MVP, should-have, nice-to-have.",
213
220
  "Note suggested team or skill area for each story.",
214
221
  "Identify critical path: which stories block the most downstream work? Surface these explicitly.",
215
- "Flag any stories whose estimates feel uncertain \u2014 surface the unknowns rather than hiding them in a range."
222
+ "Flag any stories whose estimates feel uncertain surface the unknowns rather than hiding them in a range."
216
223
  ],
217
224
  "outputRequired": {
218
225
  "notesMarkdown": "Total story point estimate, critical path items, high-risk stories."
@@ -277,7 +284,7 @@
277
284
  "promptBlocks": {
278
285
  "goal": "Extract actionable team preferences from this session and persist them so future runs use them automatically.",
279
286
  "constraints": [
280
- "Only write rules that are genuinely reusable across future tickets \u2014 skip one-off project specifics.",
287
+ "Only write rules that are genuinely reusable across future tickets skip one-off project specifics.",
281
288
  "Keep rules concise and actionable, not narrative.",
282
289
  "Append to ./.workflow_rules/ticket_creation.md rather than replacing it."
283
290
  ],
@@ -285,7 +292,7 @@
285
292
  "Review what conventions, preferences, or requirements emerged during this session.",
286
293
  "Identify patterns worth preserving: naming conventions, field usage, AC format preferences, estimation approach, labeling rules.",
287
294
  "Draft new rules as short, imperative statements (e.g., 'Use T-shirt sizing not Fibonacci', 'Always include a Figma link in design tickets').",
288
- "Check against existing rules \u2014 avoid duplicates or contradictions.",
295
+ "Check against existing rules avoid duplicates or contradictions.",
289
296
  "Append new rules to ./.workflow_rules/ticket_creation.md, creating the file if it does not exist."
290
297
  ],
291
298
  "outputRequired": {
@@ -300,4 +307,4 @@
300
307
  "requireConfirmation": false
301
308
  }
302
309
  ]
303
- }
310
+ }
@@ -1,8 +1,15 @@
1
1
  {
2
2
  "id": "architecture-scalability-audit",
3
- "name": "Architecture Scalability Audit (v1 \u2022 Evidence-Driven \u2022 Dimension-Scoped \u2022 rigorMode-Adaptive)",
3
+ "name": "Architecture Scalability Audit (v1 Evidence-Driven Dimension-Scoped rigorMode-Adaptive)",
4
4
  "version": "0.1.0",
5
5
  "description": "Use this to audit a bounded codebase scope for architecture scalability. Declare which scalability dimensions matter (load, data volume, team size, feature extensibility, operational); the workflow investigates each and produces evidence-grounded findings.",
6
+ "about": "## Architecture Scalability Audit\n\nThis workflow audits a bounded codebase scope for scalability across the dimensions you care about. It does not produce generic \"won't scale\" warnings -- every finding must cite a specific file, class, method, or pattern, and every concern must name a concrete growth scenario (e.g. 10x traffic, 100x records, 3x team size).\n\n**What it does:**\nYou declare the scope boundary and the scalability dimensions that matter for your context. The workflow reads the codebase to understand the architecture, assigns one dedicated reviewer family per dimension, runs them in parallel from a shared fact packet, reconciles contradictions and blind spots through a synthesis loop, and delivers a per-dimension verdict (will_break / risk / fine) with an overall scalability readiness verdict.\n\n**The five scalability dimensions you can select:**\n- **load** -- handles more requests, users, or throughput\n- **data_volume** -- handles more records, storage, or query size\n- **team_org** -- more teams or developers working on this scope without friction\n- **feature_extensibility** -- more features added without rearchitecting\n- **operational** -- more deployments, environments, or operational complexity\n\n**When to use it:**\n- Before investing significantly in a component you expect to grow\n- When planning capacity for a new traffic tier or data volume increase\n- When evaluating a codebase acquired through a merger, partnership, or open-source adoption\n- When a team is growing and you want to know if the architecture will hold under parallel development\n\n**What it produces:**\nAn overall scalability verdict, per-dimension findings with specific code references and growth scenarios, cross-cutting concerns that span multiple dimensions, a prioritized concern list, and explicit callouts of what is already well-designed for scale.\n\n**How to get good results:**\nBe specific about the scope boundary -- name the service, module, or feature explicitly and say what is out of scope. Choose the dimensions relevant to your actual growth pressures; the workflow will not add dimensions you did not select. If you know a specific growth target (e.g. \"we expect 50x user growth in 18 months\"), mention it.",
7
+ "examples": [
8
+ "Audit the search service for load and data_volume scalability before the Black Friday traffic ramp",
9
+ "Check the analytics pipeline for data_volume and operational scalability -- we are moving from 1M to 100M events/day",
10
+ "Scalability audit of the user management module for team_org and feature_extensibility as we split into three squads",
11
+ "Audit the cart and checkout services for load scalability -- scope is /cart and /checkout only"
12
+ ],
6
13
  "recommendedPreferences": {
7
14
  "recommendedAutonomy": "guided",
8
15
  "recommendedRiskPolicy": "conservative"
@@ -20,7 +27,7 @@
20
27
  "DEFAULT BEHAVIOR: self-execute with tools. Ask only for true scope or dimension decisions you cannot resolve yourself.",
21
28
  "V2 DURABILITY: keep workflow truth in output.notesMarkdown and explicit context fields. Human-facing markdown artifacts are optional companions only.",
22
29
  "OWNERSHIP: the main agent owns the fact packet, synthesis, verdict calibration, and final handoff. Delegated dimension audits are evidence, not authority.",
23
- "DIMENSION DISCIPLINE: audit only the dimensions the user declared. Do not add dimensions the user did not select, even if they look relevant \u2014 surface them as advisory notes instead.",
30
+ "DIMENSION DISCIPLINE: audit only the dimensions the user declared. Do not add dimensions the user did not select, even if they look relevant surface them as advisory notes instead.",
24
31
  "EVIDENCE FIRST: every risk or will_break finding must cite a specific file, class, method, or pattern in the codebase. Technology name alone is not evidence.",
25
32
  "GROWTH SCENARIO: every concern must name a growth scenario (e.g. 10x traffic, 100x records, 3x team size). Generic 'won't scale' findings are not acceptable.",
26
33
  "VERDICT TIERS: use will_break / risk / fine. Do not force a cleaner answer than the evidence supports.",
@@ -44,10 +51,10 @@
44
51
  ],
45
52
  "procedure": [
46
53
  "Read the codebase to understand the architecture: key components, entry points, data flows, and main patterns within the declared scope.",
47
- "Present the five scalability dimensions and ask the user to select which apply: (1) load \u2014 handles more requests, users, or throughput; (2) data_volume \u2014 handles more records, storage, or query size; (3) team_org \u2014 more teams or developers working on this scope; (4) feature_extensibility \u2014 more features added without rearchitecting; (5) operational \u2014 more deployments, environments, or operational complexity.",
48
- "Ask the user to confirm the scope boundary \u2014 what is explicitly in and explicitly out.",
49
- "Classify audit complexity: Simple (1\u20132 dimensions, small scope), Medium (2\u20133 dimensions, moderate scope), Complex (4\u20135 dimensions or large scope).",
50
- "Run a context-clarity check: score boundary_clarity, dimension_clarity, and codebase_familiarity 1\u20133. If any score is 1, gather more context before advancing."
54
+ "Present the five scalability dimensions and ask the user to select which apply: (1) load handles more requests, users, or throughput; (2) data_volume handles more records, storage, or query size; (3) team_org more teams or developers working on this scope; (4) feature_extensibility more features added without rearchitecting; (5) operational more deployments, environments, or operational complexity.",
55
+ "Ask the user to confirm the scope boundary what is explicitly in and explicitly out.",
56
+ "Classify audit complexity: Simple (1–2 dimensions, small scope), Medium (2–3 dimensions, moderate scope), Complex (4–5 dimensions or large scope).",
57
+ "Run a context-clarity check: score boundary_clarity, dimension_clarity, and codebase_familiarity 1–3. If any score is 1, gather more context before advancing."
51
58
  ],
52
59
  "outputRequired": {
53
60
  "notesMarkdown": "Scope boundary (in and out), declared dimensions with rationale, audit complexity classification, and any open boundary questions.",
@@ -105,7 +112,7 @@
105
112
  "procedure": [
106
113
  "Create a neutral `scalabilityFactPacket` containing: scope boundary (in and out), declared dimensions, key architectural patterns found, main components and their roles, data flow and storage patterns, concurrency and state management approach, dependency boundaries and coupling, deployment and runtime assumptions, and explicit open unknowns.",
107
114
  "Include realism signals: code that looks scalable at a glance but may have hidden limits (e.g. in-memory state, synchronous choke points, missing pagination, tight coupling between components).",
108
- "For each declared dimension, assign a reviewer family mission: load = examine request handling, concurrency, session/state management, caching, connection pools, and horizontal scaling readiness \u2014 check whether session state is in-memory or distributed, whether connection pools are bounded, whether synchronous bottlenecks exist in hot paths; data_volume = examine query patterns, pagination, indexing, result set bounds, storage growth, and data access layer scalability \u2014 check for unbounded queries (missing LIMIT/pagination), missing indexes on filtered columns, N+1 patterns in repository/service layers, and data structures that grow unboundedly; team_org = examine module coupling, shared state, and parallel development friction \u2014 specifically check import graphs for cross-module dependencies that would cause merge conflicts, identify shared mutable singletons or global state, look for test setup that requires spinning up adjacent modules, and check whether public interfaces change frequently or are stable; feature_extensibility = examine how much code changes when a new variant of a core concept is added \u2014 specifically look for switch/when/if-else chains on type discriminators that would need a new branch per feature, hardcoded business-rule constants, direct concrete dependencies instead of interfaces or abstractions, and files that are edited for every new feature; operational = examine deployment complexity, environment-specific behavior, observability, configuration surface, and operational runbook needs \u2014 specifically check for environment-specific code paths (if/switch on env vars that create different behavior per environment), configuration that must be updated in multiple places per deployment, whether logs and metrics cover the main operational failure modes, and whether a new deployment of this scope would require manual steps beyond a standard deploy.",
115
+ "For each declared dimension, assign a reviewer family mission: load = examine request handling, concurrency, session/state management, caching, connection pools, and horizontal scaling readiness check whether session state is in-memory or distributed, whether connection pools are bounded, whether synchronous bottlenecks exist in hot paths; data_volume = examine query patterns, pagination, indexing, result set bounds, storage growth, and data access layer scalability check for unbounded queries (missing LIMIT/pagination), missing indexes on filtered columns, N+1 patterns in repository/service layers, and data structures that grow unboundedly; team_org = examine module coupling, shared state, and parallel development friction specifically check import graphs for cross-module dependencies that would cause merge conflicts, identify shared mutable singletons or global state, look for test setup that requires spinning up adjacent modules, and check whether public interfaces change frequently or are stable; feature_extensibility = examine how much code changes when a new variant of a core concept is added specifically look for switch/when/if-else chains on type discriminators that would need a new branch per feature, hardcoded business-rule constants, direct concrete dependencies instead of interfaces or abstractions, and files that are edited for every new feature; operational = examine deployment complexity, environment-specific behavior, observability, configuration surface, and operational runbook needs specifically check for environment-specific code paths (if/switch on env vars that create different behavior per environment), configuration that must be updated in multiple places per deployment, whether logs and metrics cover the main operational failure modes, and whether a new deployment of this scope would require manual steps beyond a standard deploy.",
109
116
  "Set selectedReviewerFamilies to the list of assigned families (one per declared dimension). Set contradictionCount and blindSpotCount to 0."
110
117
  ],
111
118
  "outputRequired": {
@@ -124,7 +131,7 @@
124
131
  "var": "auditComplexity",
125
132
  "equals": "Simple"
126
133
  },
127
- "text": "For a Simple audit, keep the fact packet compact \u2014 scope summary, key patterns, and declared dimensions only. Skip exhaustive realism signal enumeration."
134
+ "text": "For a Simple audit, keep the fact packet compact scope summary, key patterns, and declared dimensions only. Skip exhaustive realism signal enumeration."
128
135
  }
129
136
  ],
130
137
  "requireConfirmation": false
@@ -149,11 +156,11 @@
149
156
  ],
150
157
  "Each reviewer family uses scalabilityFactPacket as primary truth.",
151
158
  "Reviewer-family outputs are raw evidence. The main agent owns synthesis and verdict assignment.",
152
- "Each reviewer family audits only its declared dimension \u2014 no cross-dimension scope creep."
159
+ "Each reviewer family audits only its declared dimension no cross-dimension scope creep."
153
160
  ],
154
161
  "procedure": [
155
162
  "Before investigating, restate your scalabilityHypothesis and name which dimension is most likely to challenge it.",
156
- "Run one investigation per declared dimension. For each dimension, the investigation must return: top findings, evidence for each finding (specific file, class, method, or pattern references \u2014 not just technology names), verdict tier per finding (will_break / risk / fine), growth scenario for each concern (e.g. 10x traffic, 100x records, 3x team size), biggest uncertainty, and likely false-confidence vector for this dimension.",
163
+ "Run one investigation per declared dimension. For each dimension, the investigation must return: top findings, evidence for each finding (specific file, class, method, or pattern references not just technology names), verdict tier per finding (will_break / risk / fine), growth scenario for each concern (e.g. 10x traffic, 100x records, 3x team size), biggest uncertainty, and likely false-confidence vector for this dimension.",
157
164
  "After completing all dimension investigations, synthesize explicitly: what was confirmed, what was genuinely new, what looks weak or overstated, and what changed your current hypothesis.",
158
165
  "Build dimensionFindings keyed by dimension containing: findings list, verdict summary, evidence quality assessment, and open questions.",
159
166
  "Identify cross-cutting concerns: architectural patterns or components that appear in findings from multiple dimensions."
@@ -244,10 +251,10 @@
244
251
  "This is a structured four-item check, not a free-form review."
245
252
  ],
246
253
  "procedure": [
247
- "Check 1 \u2014 Technology-vs-usage: did any reviewer identify a scalable technology without checking actual usage patterns in the code? (e.g. Postgres was identified as the DB, but were N+1 queries, missing indexes, or unbounded result sets actually checked?) Fix any instances found.",
248
- "Check 2 \u2014 Scope drift: did any reviewer audit components outside the declared scope boundary? Remove out-of-scope findings.",
249
- "Check 3 \u2014 Undeclared relevant dimensions: does the codebase have patterns suggesting a declared-out dimension actually matters for this scope? If so, surface it as an advisory note without adding it to the audit verdict.",
250
- "Check 4 \u2014 Growth scenario vagueness: does every concern name a specific growth scenario? If not, assign one now based on the most realistic growth pattern for this scope.",
254
+ "Check 1 Technology-vs-usage: did any reviewer identify a scalable technology without checking actual usage patterns in the code? (e.g. Postgres was identified as the DB, but were N+1 queries, missing indexes, or unbounded result sets actually checked?) Fix any instances found.",
255
+ "Check 2 Scope drift: did any reviewer audit components outside the declared scope boundary? Remove out-of-scope findings.",
256
+ "Check 3 Undeclared relevant dimensions: does the codebase have patterns suggesting a declared-out dimension actually matters for this scope? If so, surface it as an advisory note without adding it to the audit verdict.",
257
+ "Check 4 Growth scenario vagueness: does every concern name a specific growth scenario? If not, assign one now based on the most realistic growth pattern for this scope.",
251
258
  "Set blindSpotCount to the number of blind spots found across all four checks."
252
259
  ],
253
260
  "outputRequired": {
@@ -299,11 +306,11 @@
299
306
  "Do not advance to handoff with known hard gate failures."
300
307
  ],
301
308
  "procedure": [
302
- "Verdict aggregation \u2014 derive scalabilityVerdict from dimensionFindings using these explicit rules: (1) at_risk if any declared dimension has a will_break finding; (2) conditional if no will_break findings exist but at least one dimension has a risk finding; (3) ready_to_scale if all declared dimensions have only fine findings; (4) inconclusive if any dimension still has evidenceWeak = true after the synthesis loop, making a reliable verdict impossible. Capture verdictRationale naming the specific dimension and finding that drove the verdict.",
303
- "Hard gate 1 \u2014 Evidence grounding: for every will_break and risk finding in dimensionFindings, confirm it cites a specific file, class, method, or code pattern. Technology name alone fails this gate. Fix by locating the code evidence or downgrading to risk with an evidence-needed note.",
304
- "Hard gate 2 \u2014 Dimension coverage: confirm every declared dimension has at least one substantive finding. A verdict of fine with supporting evidence counts. A dimension with no findings at all fails this gate.",
305
- "Hard gate 3 \u2014 Hypothesis revisited: confirm that scalabilityHypothesis from Phase 1 is either confirmed or explicitly revised in synthesis notes. If it was never addressed, address it now.",
306
- "Hard gate 4 \u2014 Growth scenario specificity: confirm every concern in dimensionFindings names a growth scenario. If any do not, assign one now.",
309
+ "Verdict aggregation derive scalabilityVerdict from dimensionFindings using these explicit rules: (1) at_risk if any declared dimension has a will_break finding; (2) conditional if no will_break findings exist but at least one dimension has a risk finding; (3) ready_to_scale if all declared dimensions have only fine findings; (4) inconclusive if any dimension still has evidenceWeak = true after the synthesis loop, making a reliable verdict impossible. Capture verdictRationale naming the specific dimension and finding that drove the verdict.",
310
+ "Hard gate 1 Evidence grounding: for every will_break and risk finding in dimensionFindings, confirm it cites a specific file, class, method, or code pattern. Technology name alone fails this gate. Fix by locating the code evidence or downgrading to risk with an evidence-needed note.",
311
+ "Hard gate 2 Dimension coverage: confirm every declared dimension has at least one substantive finding. A verdict of fine with supporting evidence counts. A dimension with no findings at all fails this gate.",
312
+ "Hard gate 3 Hypothesis revisited: confirm that scalabilityHypothesis from Phase 1 is either confirmed or explicitly revised in synthesis notes. If it was never addressed, address it now.",
313
+ "Hard gate 4 Growth scenario specificity: confirm every concern in dimensionFindings names a growth scenario. If any do not, assign one now.",
307
314
  "Set hardGatesPassed = true only when the verdict aggregation and all four gates pass. Set hardGateFailures to the list of any that needed fixing."
308
315
  ],
309
316
  "outputRequired": {
@@ -327,13 +334,13 @@
327
334
  "Do not drift into implementation planning or remediation design unless the user explicitly asks."
328
335
  ],
329
336
  "procedure": [
330
- "Open with the overall scalability readiness verdict (ready_to_scale / conditional / at_risk / inconclusive) and the verdictRationale \u2014 name the specific dimension and finding that drove it.",
337
+ "Open with the overall scalability readiness verdict (ready_to_scale / conditional / at_risk / inconclusive) and the verdictRationale name the specific dimension and finding that drove it.",
331
338
  "For each declared dimension, give: dimension name, verdict tier (will_break / risk / fine), top finding with specific code reference, growth scenario, and severity.",
332
339
  "List cross-cutting concerns: patterns that create scalability risk across multiple dimensions.",
333
340
  "Revisit scalabilityHypothesis from Phase 1: was it confirmed or revised? What evidence changed your view?",
334
341
  "Give a prioritized concern list ordered by: (1) will_break findings first, (2) risk findings by severity, (3) cross-cutting concerns, (4) fine findings worth noting as already solid.",
335
342
  "Surface any advisory notes for undeclared dimensions that may be worth considering.",
336
- "State what is already well-designed for scale \u2014 not everything should be a concern."
343
+ "State what is already well-designed for scale not everything should be a concern."
337
344
  ],
338
345
  "outputRequired": {
339
346
  "notesMarkdown": "Decision-ready scalability handoff: overall verdict, per-dimension summary with code references, prioritized concerns, cross-cutting concerns, hypothesis outcome, and what is already solid."
@@ -342,7 +349,7 @@
342
349
  "The handoff is verdict-first and evidence-grounded.",
343
350
  "Every concern is tied to a specific code reference and growth scenario.",
344
351
  "The hypothesis from Phase 1 is explicitly addressed.",
345
- "What is already well-designed is stated \u2014 not just the concerns."
352
+ "What is already well-designed is stated not just the concerns."
346
353
  ]
347
354
  },
348
355
  "requireConfirmation": false
@@ -3,6 +3,13 @@
3
3
  "name": "Bug Investigation (v2 \u2022 Notes-First \u2022 WorkRail Executor)",
4
4
  "version": "2.0.0",
5
5
  "description": "Use this to diagnose a bug or unexpected behavior in code. Builds a hypothesis, gathers evidence, and proves or disproves the root cause before concluding.",
6
+ "about": "## Bug Investigation Workflow\n\nThis workflow guides an AI agent through a rigorous, evidence-driven investigation of a bug or unexpected behavior. It is designed to prevent the most common failure mode in AI debugging: jumping to a plausible-sounding conclusion without sufficient proof.\n\n**What it does:**\nThe workflow moves through triage, context gathering, hypothesis generation, evidence planning, iterative evidence collection, diagnosis validation, and a final handoff. It explicitly distinguishes between theories (formed by reading code) and proof (confirmed by running tests or reproducing the failure). The final output is a diagnosis with a confidence rating, the strongest alternative explanations that were ruled out, and a high-level fix direction -- not a patch.\n\n**When to use it:**\n- You have a specific bug report, failing test, or production incident to investigate\n- The root cause is not immediately obvious and multiple explanations are plausible\n- You want a trustworthy diagnosis before spending time writing a fix\n- The bug carries enough risk that you need to be confident before changing code\n\n**What it produces:**\nA structured investigation handoff covering: root cause type (single cause, multi-factor, working as designed, etc.), proof summary, ruled-out alternatives, residual uncertainty, likely files involved, and verification steps for whoever implements the fix.\n\n**How to get good results:**\nProvide repro steps, observed symptoms, and expected behavior upfront. Include any relevant logs, failing test commands, or environment details you already have. The more concrete the repro, the faster the workflow can gather real evidence rather than theorizing. If the bug is intermittent, say so -- the workflow adapts its rigor based on reproducibility confidence.",
7
+ "examples": [
8
+ "Investigate why the payments API returns 500 after deploying the rate limiter",
9
+ "Debug why the mobile app crashes on logout when a background sync is in progress",
10
+ "Find out why search results are missing items added in the last 10 minutes",
11
+ "Diagnose why CI passes locally but the integration test fails on the build server"
12
+ ],
6
13
  "recommendedPreferences": {
7
14
  "recommendedAutonomy": "guided",
8
15
  "recommendedRiskPolicy": "conservative"
@@ -3,6 +3,13 @@
3
3
  "name": "Agentic Task Dev Workflow (Invariants \u2022 Architecture \u2022 Vertical Slices \u2022 PR Sizing \u2022 Audits \u2022 Resumable)",
4
4
  "version": "1.5.0",
5
5
  "description": "Use this to implement a software feature or task. Follows a plan-then-execute approach with architecture decisions, invariant tracking, and final verification.",
6
+ "about": "## Agentic Coding Task Workflow\n\nThis workflow structures the full lifecycle of a software implementation task: from understanding and classifying the work, through architecture decisions and incremental implementation, to final verification and handoff.\n\n### What it does\n\nThe workflow guides an AI agent through a disciplined plan-then-execute process. It begins by analyzing the task to determine complexity, risk, and the right level of rigor (QUICK, STANDARD, or THOROUGH). For non-trivial tasks, it then gathers codebase context, surfaces invariants and non-goals, generates competing design candidates, and selects an approach before writing a single line of code. Implementation proceeds slice by slice, with built-in verification gates after each slice. A final integration verification pass confirms acceptance criteria are met before handoff.\n\n### When to use it\n\nUse this workflow whenever you are implementing a feature, fixing a non-trivial bug, or making an architectural change in a real codebase. It is especially valuable when:\n- The task touches multiple files or systems\n- There is meaningful risk of regressions or invariant violations\n- You want the agent to surface trade-offs and commit to a reasoned design decision rather than guessing\n- You need a resumable, auditable record of what was decided and why\n\nFor quick one-liner fixes or very small changes, the workflow includes a fast path that skips heavyweight planning.\n\n### What it produces\n\n- An `implementation_plan.md` artifact covering the selected approach, vertical slices, test design, and philosophy alignment\n- A `spec.md` for large or high-risk tasks, capturing observable behavior and acceptance criteria\n- Step-level notes in WorkRail that serve as a durable execution log\n- A PR-ready handoff summary with acceptance criteria status, invariant proofs, and follow-up tickets\n\n### How to get good results\n\n- Provide a clear task description and at least partial acceptance criteria before starting\n- If you have coding philosophy or project conventions configured in session rules or Memory MCP, the workflow will apply them automatically as a design lens\n- Let the workflow classify complexity and rigor itself; override only if the classification is clearly wrong\n- For large or high-risk tasks, review the architecture decision step before implementation begins",
7
+ "examples": [
8
+ "Implement JWT refresh token rotation in the auth service",
9
+ "Fix the race condition in the cache invalidation path when concurrent writes occur",
10
+ "Refactor the payment flow to use a Result type instead of throwing exceptions",
11
+ "Add pagination support to the messaging inbox API endpoint"
12
+ ],
6
13
  "recommendedPreferences": {
7
14
  "recommendedAutonomy": "guided",
8
15
  "recommendedRiskPolicy": "conservative"
@@ -1,8 +1,15 @@
1
1
  {
2
2
  "id": "coding-task-workflow-agentic",
3
- "name": "Agentic Task Dev Workflow (Lean \u2022 Notes-First \u2022 WorkRail Executor)",
3
+ "name": "Agentic Task Dev Workflow (Lean Notes-First WorkRail Executor)",
4
4
  "version": "1.0.0",
5
5
  "description": "Use this to implement a software feature or task. Follows a plan-then-execute approach with architecture decisions, invariant tracking, and final verification.",
6
+ "about": "## Agentic Coding Task Workflow\n\nThis workflow structures the full lifecycle of a software implementation task: from understanding and classifying the work, through architecture decisions and incremental implementation, to final verification and handoff.\n\n### What it does\n\nThe workflow guides an AI agent through a disciplined plan-then-execute process. It begins by analyzing the task to determine complexity, risk, and the right level of rigor (QUICK, STANDARD, or THOROUGH). For non-trivial tasks, it then gathers codebase context, surfaces invariants and non-goals, generates competing design candidates, and selects an approach before writing a single line of code. Implementation proceeds slice by slice, with built-in verification gates after each slice. A final integration verification pass confirms acceptance criteria are met before handoff.\n\n### When to use it\n\nUse this workflow whenever you are implementing a feature, fixing a non-trivial bug, or making an architectural change in a real codebase. It is especially valuable when:\n- The task touches multiple files or systems\n- There is meaningful risk of regressions or invariant violations\n- You want the agent to surface trade-offs and commit to a reasoned design decision rather than guessing\n- You need a resumable, auditable record of what was decided and why\n\nFor quick one-liner fixes or very small changes, the workflow includes a fast path that skips heavyweight planning.\n\n### What it produces\n\n- An `implementation_plan.md` artifact covering the selected approach, vertical slices, test design, and philosophy alignment\n- A `spec.md` for large or high-risk tasks, capturing observable behavior and acceptance criteria\n- Step-level notes in WorkRail that serve as a durable execution log\n- A PR-ready handoff summary with acceptance criteria status, invariant proofs, and follow-up tickets\n\n### How to get good results\n\n- Provide a clear task description and at least partial acceptance criteria before starting\n- If you have coding philosophy or project conventions configured in session rules or Memory MCP, the workflow will apply them automatically as a design lens\n- Let the workflow classify complexity and rigor itself; override only if the classification is clearly wrong\n- For large or high-risk tasks, review the architecture decision step before implementation begins",
7
+ "examples": [
8
+ "Implement JWT refresh token rotation in the auth service",
9
+ "Fix the race condition in the cache invalidation path when concurrent writes occur",
10
+ "Refactor the payment flow to use a Result type instead of throwing exceptions",
11
+ "Add pagination support to the messaging inbox API endpoint"
12
+ ],
6
13
  "recommendedPreferences": {
7
14
  "recommendedAutonomy": "guided",
8
15
  "recommendedRiskPolicy": "conservative"
@@ -21,9 +28,10 @@
21
28
  "SUBAGENT SYNTHESIS: treat subagent output as evidence, not conclusions. State your hypothesis before delegating, then interrogate what came back: what was missed, wrong, or new? Say what changed your mind or what you still reject, and why.",
22
29
  "PARALLELISM: when reads, audits, or delegations are independent, run them in parallel inside the phase. Parallelize cognition; serialize synthesis and canonical writes.",
23
30
  "PHILOSOPHY LENS: apply the user's coding philosophy (from active session rules) as the evaluation lens. Flag violations by principle name, not as generic feedback. If principles conflict, surface the tension explicitly instead of silently choosing.",
24
- "VALIDATION: prefer static/compile-time safety over runtime checks. Use build, type-checking, and tests as the primary proof of correctness \u2014 in that order of reliability.",
31
+ "VALIDATION: prefer static/compile-time safety over runtime checks. Use build, type-checking, and tests as the primary proof of correctness in that order of reliability.",
25
32
  "DRIFT HANDLING: when reality diverges from the plan, update the plan artifact and re-audit deliberately rather than accumulating undocumented drift.",
26
- "NEVER COMMIT MARKDOWN FILES UNLESS USER EXPLICITLY ASKS."
33
+ "NEVER COMMIT MARKDOWN FILES UNLESS USER EXPLICITLY ASKS.",
34
+ "SLICE DISCIPLINE: Phase 6 is a loop -- implement ONE slice per iteration. Do not implement multiple slices at once. The verification loop exists to catch drift per slice, not retroactively."
27
35
  ],
28
36
  "references": [
29
37
  {
@@ -107,7 +115,7 @@
107
115
  },
108
116
  {
109
117
  "id": "phase-1b-design-deep",
110
- "title": "Phase 1b: Design Generation (Injected Routine \u2014 Tension-Driven Design)",
118
+ "title": "Phase 1b: Design Generation (Injected Routine Tension-Driven Design)",
111
119
  "runCondition": {
112
120
  "and": [
113
121
  {
@@ -134,7 +142,7 @@
134
142
  "var": "taskComplexity",
135
143
  "not_equals": "Small"
136
144
  },
137
- "prompt": "Read `design-candidates.md`, compare it to your original guess, and make the call.\n\nBe explicit about three things:\n- what the design work confirmed\n- what changed your mind\n- what you missed the first time\n\nThen pressure-test the leading option:\n- what's the strongest case against it?\n- what assumption breaks it?\n\nAfter the challenge batch, say:\n- what changed your mind\n- what didn't\n- which findings you reject and why\n\nPick the approach yourself. Don't hide behind the artifact. If the simplest thing works, prefer it. If the front-runner stops looking right after challenge, switch.\n\nCapture:\n- `selectedApproach` \u2014 chosen design with rationale tied to tensions\n- `runnerUpApproach` \u2014 next-best option and why it lost\n- `architectureRationale` \u2014 tensions resolved vs accepted\n- `pivotTriggers` \u2014 conditions under which you'd switch to the runner-up\n- `keyRiskToMonitor` \u2014 failure mode of the selected approach\n- `acceptedTradeoffs`\n- `identifiedFailureModes`",
145
+ "prompt": "Read `design-candidates.md`, compare it to your original guess, and make the call.\n\nBe explicit about three things:\n- what the design work confirmed\n- what changed your mind\n- what you missed the first time\n\nThen pressure-test the leading option:\n- what's the strongest case against it?\n- what assumption breaks it?\n\nAfter the challenge batch, say:\n- what changed your mind\n- what didn't\n- which findings you reject and why\n\nPick the approach yourself. Don't hide behind the artifact. If the simplest thing works, prefer it. If the front-runner stops looking right after challenge, switch.\n\nCapture:\n- `selectedApproach` chosen design with rationale tied to tensions\n- `runnerUpApproach` next-best option and why it lost\n- `architectureRationale` tensions resolved vs accepted\n- `pivotTriggers` conditions under which you'd switch to the runner-up\n- `keyRiskToMonitor` failure mode of the selected approach\n- `acceptedTradeoffs`\n- `identifiedFailureModes`",
138
146
  "promptFragments": [
139
147
  {
140
148
  "id": "phase-1c-challenge-standard",
@@ -242,7 +250,7 @@
242
250
  "var": "taskComplexity",
243
251
  "not_equals": "Small"
244
252
  },
245
- "prompt": "Turn the decision into a plan someone else could execute without guessing.\n\nUpdate `implementation_plan.md`.\n\nIt should cover:\n1. Problem statement\n2. Acceptance criteria (mirror `spec.md` if it exists; `spec.md` owns observable behavior)\n3. Non-goals\n4. Philosophy-driven constraints\n5. Invariants\n6. Selected approach + rationale + runner-up\n7. Vertical slices\n8. Work packages only if they actually help\n9. Test design\n10. Risk register\n11. PR packaging strategy\n12. Philosophy alignment per slice:\n - [principle] -> [satisfied / tension / violated + 1-line why]\n\nCapture:\n- `implementationPlan`\n- `slices`\n- `testDesign`\n- `estimatedPRCount`\n- `followUpTickets` (initialize if needed)\n- `unresolvedUnknownCount` \u2014 count of open issues that would materially affect implementation quality\n- `planConfidenceBand` \u2014 Low / Medium / High",
253
+ "prompt": "Turn the decision into a plan someone else could execute without guessing.\n\nUpdate `implementation_plan.md`.\n\nIt should cover:\n1. Problem statement\n2. Acceptance criteria (mirror `spec.md` if it exists; `spec.md` owns observable behavior)\n3. Non-goals\n4. Philosophy-driven constraints\n5. Invariants\n6. Selected approach + rationale + runner-up\n7. Vertical slices\n8. Work packages only if they actually help\n9. Test design\n10. Risk register\n11. PR packaging strategy\n12. Philosophy alignment per slice:\n - [principle] -> [satisfied / tension / violated + 1-line why]\n\nCapture:\n- `implementationPlan`\n- `slices`\n- `testDesign`\n- `estimatedPRCount`\n- `followUpTickets` (initialize if needed)\n- `unresolvedUnknownCount` count of open issues that would materially affect implementation quality\n- `planConfidenceBand` Low / Medium / High\n\nThe plan is the deliverable for this step. Do not implement anything -- not a \"quick win\", not a file read that bleeds into edits, nothing. Execution begins in Phase 6, one slice at a time. If you find yourself writing code or editing source files right now, stop immediately.",
246
254
  "requireConfirmation": false
247
255
  },
248
256
  {
@@ -332,7 +340,7 @@
332
340
  {
333
341
  "id": "phase-4b-loop-decision",
334
342
  "title": "Loop Exit Decision",
335
- "prompt": "Decide whether the plan needs another pass.\n\nIf `planFindings` is non-empty, keep going.\nIf it's empty, stop \u2014 but say what you checked so the clean pass means something.\nIf you've hit the limit, stop and record what still bothers you.\n\nThen emit the required loop-control artifact in this shape (`decision` must be `continue` or `stop`):\n```json\n{\n \"artifacts\": [{\n \"kind\": \"wr.loop_control\",\n \"decision\": \"continue\"\n }]\n}\n```",
343
+ "prompt": "Decide whether the plan needs another pass.\n\nIf `planFindings` is non-empty, keep going.\nIf it's empty, stop but say what you checked so the clean pass means something.\nIf you've hit the limit, stop and record what still bothers you.\n\nThen emit the required loop-control artifact in this shape (`decision` must be `continue` or `stop`):\n```json\n{\n \"artifacts\": [{\n \"kind\": \"wr.loop_control\",\n \"decision\": \"continue\"\n }]\n}\n```",
336
344
  "requireConfirmation": true,
337
345
  "outputContract": {
338
346
  "contractRef": "wr.contracts.loop_control"
@@ -369,7 +377,7 @@
369
377
  {
370
378
  "id": "phase-6a-implement-slice",
371
379
  "title": "Implement Slice",
372
- "prompt": "Implement only the current slice: `{{currentSlice.name}}`.\n\nBefore you code, check whether the plan is still valid:\n- if the pivot triggers fired or the assumptions went stale, stop and go back to planning\n- if the target files or symbols no longer match, stop and re-plan\n\nStay in this slice.\n- don't do the rest of the plan early\n- only pull forward later-slice work if you absolutely need it to make this slice compile or integrate, and count that as `unexpectedScopeChange = true`\n- keep the changes incremental\n- run tests and build to prove the slice works\n\nTrack whether this slice required:\n- a new special-case (`specialCaseIntroduced`)\n- an unplanned abstraction (`unplannedAbstractionIntroduced`)\n- unexpected file changes outside planned scope (`unexpectedScopeChange`)\n\nSet `verifyNeeded` to true if ANY of:\n- `sliceIndex` is odd (verify every 2 slices)\n- `prStrategy = MultiPR`\n- `specialCaseIntroduced = true`\n- `unplannedAbstractionIntroduced = true`\n- `unexpectedScopeChange = true`\n- tests or build failed\n\nCapture:\n- `specialCaseIntroduced`\n- `unplannedAbstractionIntroduced`\n- `unexpectedScopeChange`\n- `verifyNeeded`",
380
+ "prompt": "Implement the current slice: `{{currentSlice.name}}`.\n\nBefore writing a single line of code, declare your scope:\n- List the exact files and symbols this slice touches\n- Confirm none of them belong to a later slice\n- If you have already edited files from this or any other slice in a previous step, stop and report it\n\nHard scope rule: you may only modify what is described in `{{currentSlice.name}}`. Anything outside that boundary is out of scope for this iteration -- not \"do it early\", not \"while I'm here\". If you discover you need to touch something outside this slice to make it compile or integrate, set `unexpectedScopeChange = true` and do the minimum necessary to stay green, then stop.\n\nImplement incrementally. Run tests and build to prove the slice works before advancing.\n\nTrack:\n- `specialCaseIntroduced` -- did this slice require a new special-case?\n- `unplannedAbstractionIntroduced` -- did this slice introduce an abstraction not in the plan?\n- `unexpectedScopeChange` -- did this slice touch files outside its planned scope?\n\nSet `verifyNeeded` to true if ANY of:\n- `sliceIndex` is odd (verify every 2 slices)\n- `prStrategy = MultiPR`\n- `specialCaseIntroduced = true`\n- `unplannedAbstractionIntroduced = true`\n- `unexpectedScopeChange = true`\n- tests or build failed\n\nCapture: `specialCaseIntroduced`, `unplannedAbstractionIntroduced`, `unexpectedScopeChange`, `verifyNeeded`",
373
381
  "requireConfirmation": false
374
382
  },
375
383
  {
@@ -3,6 +3,13 @@
3
3
  "name": "Agentic Task Dev Workflow (v2 \u2022 Notes-First \u2022 WorkRail Executor)",
4
4
  "version": "2.0.0",
5
5
  "description": "Use this to implement a software feature or task. Follows a plan-then-execute approach with architecture decisions, invariant tracking, and final verification.",
6
+ "about": "## Agentic Coding Task Workflow\n\nThis workflow structures the full lifecycle of a software implementation task: from understanding and classifying the work, through architecture decisions and incremental implementation, to final verification and handoff.\n\n### What it does\n\nThe workflow guides an AI agent through a disciplined plan-then-execute process. It begins by analyzing the task to determine complexity, risk, and the right level of rigor (QUICK, STANDARD, or THOROUGH). For non-trivial tasks, it then gathers codebase context, surfaces invariants and non-goals, generates competing design candidates, and selects an approach before writing a single line of code. Implementation proceeds slice by slice, with built-in verification gates after each slice. A final integration verification pass confirms acceptance criteria are met before handoff.\n\n### When to use it\n\nUse this workflow whenever you are implementing a feature, fixing a non-trivial bug, or making an architectural change in a real codebase. It is especially valuable when:\n- The task touches multiple files or systems\n- There is meaningful risk of regressions or invariant violations\n- You want the agent to surface trade-offs and commit to a reasoned design decision rather than guessing\n- You need a resumable, auditable record of what was decided and why\n\nFor quick one-liner fixes or very small changes, the workflow includes a fast path that skips heavyweight planning.\n\n### What it produces\n\n- An `implementation_plan.md` artifact covering the selected approach, vertical slices, test design, and philosophy alignment\n- A `spec.md` for large or high-risk tasks, capturing observable behavior and acceptance criteria\n- Step-level notes in WorkRail that serve as a durable execution log\n- A PR-ready handoff summary with acceptance criteria status, invariant proofs, and follow-up tickets\n\n### How to get good results\n\n- Provide a clear task description and at least partial acceptance criteria before starting\n- If you have coding philosophy or project conventions configured in session rules or Memory MCP, the workflow will apply them automatically as a design lens\n- Let the workflow classify complexity and rigor itself; override only if the classification is clearly wrong\n- For large or high-risk tasks, review the architecture decision step before implementation begins",
7
+ "examples": [
8
+ "Implement JWT refresh token rotation in the auth service",
9
+ "Fix the race condition in the cache invalidation path when concurrent writes occur",
10
+ "Refactor the payment flow to use a Result type instead of throwing exceptions",
11
+ "Add pagination support to the messaging inbox API endpoint"
12
+ ],
6
13
  "recommendedPreferences": {
7
14
  "recommendedAutonomy": "guided",
8
15
  "recommendedRiskPolicy": "conservative"
@@ -3,6 +3,13 @@
3
3
  "name": "Cross-Platform Code Conversion",
4
4
  "version": "0.1.0",
5
5
  "description": "Use this to convert code from one platform to another (e.g. Android to iOS, iOS to Web). Triages files by difficulty, parallelizes easy translations, and handles platform-specific design decisions.",
6
+ "about": "## Cross-Platform Code Conversion Workflow\n\nThis workflow guides an AI agent through converting code from one platform to another - for example, Android (Kotlin) to iOS (Swift), iOS to Web (TypeScript/React), or any similar migration. It handles everything from scoping and analysis through idiomatic conversion, build verification, and final handoff.\n\n### What it does\n\nThe workflow starts by scoping the migration and classifying its complexity (Small, Medium, or Large) and adaptation depth (low, moderate, or high). It then analyzes the source architecture to understand patterns, dependencies, concurrency models, and semantic contracts. Files are triaged into three buckets: mechanical translations delegated to subagents in parallel (Bucket A), library substitutions (Bucket B), and platform-specific code needing design decisions (Bucket C). For high-adaptation migrations, the workflow runs a full design generation phase to choose an idiomatic target-platform architecture before any code is written. Implementation proceeds batch by batch, with drift detection after each batch to catch files that turn out harder than classified. A final build-and-integration loop verifies the full converted codebase before handoff.\n\n### When to use it\n\nUse this workflow when migrating a module, feature, or full component from one platform to another. It is especially valuable when:\n- The source and target platforms have meaningfully different idioms (e.g., Kotlin coroutines vs Swift async/await, Hilt vs Swinject)\n- You want parallel delegation of mechanical work while keeping design-sensitive boundaries with the main agent\n- Semantic contracts (lifecycle, threading, cancellation, error handling) must be preserved across the migration\n- The target repo has existing architectural patterns the migrated code must fit into\n\nFor very small, straightforward file-by-file translations, the workflow includes a fast path that skips planning and triage.\n\n### What it produces\n\n- A triage matrix classifying every file into a conversion bucket\n- A semantic contract inventory for non-trivial migration boundaries\n- A target integration analysis mapping boundaries to their destination repo seams\n- Converted source files in the target platform's idioms\n- A passing build or typecheck on the full converted output\n- A handoff summary covering adaptation decisions, known gaps, and items needing manual review\n\n### How to get good results\n\n- Specify the exact scope of the migration - which files, modules, or features to convert\n- If the target repo is not in the same workspace, point the agent to it explicitly or configure the source-to-target path mapping\n- Review the triage and semantic contract inventory steps before conversion begins, especially for high-adaptation migrations\n- Flag any invariants that must survive the migration (API contracts, behavioral guarantees, threading assumptions)",
7
+ "examples": [
8
+ "Convert the Android messaging inbox feature from Kotlin/Coroutines to iOS Swift/Combine",
9
+ "Migrate the Android authentication module (Hilt + ViewModel) to a SwiftUI equivalent",
10
+ "Port the shared data models and repository layer from Android Kotlin to TypeScript for the web client",
11
+ "Convert the Android search feature UI layer from Jetpack Compose to SwiftUI"
12
+ ],
6
13
  "recommendedPreferences": {
7
14
  "recommendedAutonomy": "guided",
8
15
  "recommendedRiskPolicy": "conservative"
@@ -2,7 +2,14 @@
2
2
  "id": "document-creation-workflow",
3
3
  "name": "Document Creation Workflow",
4
4
  "version": "1.0.0",
5
- "description": "Use this to create broad or comprehensive documentation spanning multiple components or systems \u2014 project READMEs, complete API docs, user guides, or technical specifications.",
5
+ "description": "Use this to create broad or comprehensive documentation spanning multiple components or systems project READMEs, complete API docs, user guides, or technical specifications.",
6
+ "about": "## Document Creation Workflow\n\nThis workflow guides you through creating new documentation from scratch -- ranging from a simple project README to a full technical specification spanning multiple systems. It automatically calibrates depth to match the complexity of your request: simple tasks go straight to writing, while complex documentation gets a full analysis-and-planning phase first.\n\n### What it produces\n\nA complete, saved documentation file ready for use. Depending on complexity, it may also include a quality review pass covering accuracy, completeness, audience fit, usability, and style consistency.\n\n### When to use it\n\n- You need to create a **new** document (not update an existing one -- see the Documentation Update workflow for that).\n- The document spans one or more systems, components, or audiences.\n- Examples: project READMEs, API reference docs, user guides, onboarding docs, technical specifications, architecture overviews.\n\n### When NOT to use it\n\n- You want to update or refresh an existing doc -- use the Documentation Update workflow instead.\n- You need tight scope discipline for a single class or mechanism -- the Scoped Documentation workflow is better suited.\n\n### How to get good results\n\n- Be specific about the document type and intended audience upfront. The workflow probes for these, but the clearer your initial goal, the less back-and-forth.\n- If your project has existing documentation or style conventions, mention them -- the workflow will follow them.\n- For complex documentation, the workflow asks a small number of targeted questions it cannot answer from the codebase. Answer these concisely to keep momentum.",
7
+ "examples": [
8
+ "Create a README for the payments-service repo with setup, config, and deployment instructions",
9
+ "Write a full API reference for the new notifications SDK, including all endpoints and error codes",
10
+ "Create a user guide for the self-serve onboarding flow targeting non-technical customers",
11
+ "Write a technical specification for the proposed event-sourcing migration"
12
+ ],
6
13
  "preconditions": [
7
14
  "User has a clear idea of the document type and purpose.",
8
15
  "Relevant project files or information are available for reference.",
@@ -12,9 +19,9 @@
12
19
  "metaGuidance": [
13
20
  "NOTES-FIRST DURABILITY: use output.notesMarkdown as the primary durable record. Do NOT create CONTEXT.md, doc_spec.md, or content_plan.md as workflow memory.",
14
21
  "DISCOVER BEFORE ASKING: use tools to explore the project before asking clarification questions. Only ask what tools cannot answer.",
15
- "COMPLEXITY DRIVES BRANCHING: docComplexity=Simple uses the fast path; Standard/Complex uses the full path. If complexity changes during work, note it in notesMarkdown and adapt \u2014 no retriage ceremony needed.",
22
+ "COMPLEXITY DRIVES BRANCHING: docComplexity=Simple uses the fast path; Standard/Complex uses the full path. If complexity changes during work, note it in notesMarkdown and adapt no retriage ceremony needed.",
16
23
  "CONTENT-FIRST: the deliverable is the document, not planning artifacts. Keep planning proportional to scope.",
17
- "EVIDENCE-BASED QUALITY: each quality dimension in the review step requires a one-sentence evidence statement and a pass or needs-work verdict \u2014 not a numeric score."
24
+ "EVIDENCE-BASED QUALITY: each quality dimension in the review step requires a one-sentence evidence statement and a pass or needs-work verdict not a numeric score."
18
25
  ],
19
26
  "steps": [
20
27
  {
@@ -41,7 +48,7 @@
41
48
  }
42
49
  ]
43
50
  },
44
- "prompt": "Analyze the project to inform documentation strategy. Limit this analysis to 1500 words; prioritize documentation-relevant insights.\n\nCover:\n1. **Existing documentation landscape** \u2014 current docs, style patterns, gaps\n2. **Project architecture** \u2014 key components relevant to this document\n3. **User or developer workflows** \u2014 how documentation fits into user journeys\n4. **Technical constraints** \u2014 APIs, systems, integrations to document\n5. **Style conventions** \u2014 terminology, formatting, naming patterns to follow\n6. **Audience** \u2014 who will use this documentation and what they need to accomplish\n\nNote any complexity indicators that might warrant reclassifying `docComplexity` upward.",
51
+ "prompt": "Analyze the project to inform documentation strategy. Limit this analysis to 1500 words; prioritize documentation-relevant insights.\n\nCover:\n1. **Existing documentation landscape** current docs, style patterns, gaps\n2. **Project architecture** key components relevant to this document\n3. **User or developer workflows** how documentation fits into user journeys\n4. **Technical constraints** APIs, systems, integrations to document\n5. **Style conventions** terminology, formatting, naming patterns to follow\n6. **Audience** who will use this documentation and what they need to accomplish\n\nNote any complexity indicators that might warrant reclassifying `docComplexity` upward.",
45
52
  "requireConfirmation": false
46
53
  },
47
54
  {
@@ -77,7 +84,7 @@
77
84
  }
78
85
  ]
79
86
  },
80
- "prompt": "Create a content plan for this documentation in your notes.\n\nThe plan should cover:\n1. Document purpose and success criteria\n2. Target audience and their primary goals\n3. Section outline with one-line descriptions\n4. Writing strategy \u2014 tone, technical depth, key terminology\n5. Visual elements or code examples needed\n\nKeep the plan proportional to scope. The goal is a clear outline to execute against, not a heavyweight specification.",
87
+ "prompt": "Create a content plan for this documentation in your notes.\n\nThe plan should cover:\n1. Document purpose and success criteria\n2. Target audience and their primary goals\n3. Section outline with one-line descriptions\n4. Writing strategy tone, technical depth, key terminology\n5. Visual elements or code examples needed\n\nKeep the plan proportional to scope. The goal is a clear outline to execute against, not a heavyweight specification.",
81
88
  "promptFragments": [
82
89
  {
83
90
  "id": "phase-3-plan-complex",
@@ -111,7 +118,7 @@
111
118
  }
112
119
  ]
113
120
  },
114
- "prompt": "Review the documentation you just wrote using this rubric. For each dimension, provide a one-sentence evidence statement and a verdict of `pass` or `needs-work`.\n\n1. **Accuracy** \u2014 Does the content correctly describe the actual project or system? *(Evidence: cite one verified fact.)*\n2. **Completeness** \u2014 Does it cover all planned sections? *(Evidence: list planned vs completed sections.)*\n3. **Audience fit** \u2014 Is the technical depth right for the target reader? *(Evidence: identify one audience-appropriate choice made.)*\n4. **Usability** \u2014 Could a reader actually accomplish their goal using this doc? *(Evidence: trace one user journey through the doc.)*\n5. **Consistency** \u2014 Does it match project conventions for style, terminology, and format? *(Evidence: cite one convention followed.)*\n\nIf any dimension is `needs-work`, fix the issue immediately and re-assert the dimension as `pass` in your notes before continuing.",
121
+ "prompt": "Review the documentation you just wrote using this rubric. For each dimension, provide a one-sentence evidence statement and a verdict of `pass` or `needs-work`.\n\n1. **Accuracy** Does the content correctly describe the actual project or system? *(Evidence: cite one verified fact.)*\n2. **Completeness** Does it cover all planned sections? *(Evidence: list planned vs completed sections.)*\n3. **Audience fit** Is the technical depth right for the target reader? *(Evidence: identify one audience-appropriate choice made.)*\n4. **Usability** Could a reader actually accomplish their goal using this doc? *(Evidence: trace one user journey through the doc.)*\n5. **Consistency** Does it match project conventions for style, terminology, and format? *(Evidence: cite one convention followed.)*\n\nIf any dimension is `needs-work`, fix the issue immediately and re-assert the dimension as `pass` in your notes before continuing.",
115
122
  "promptFragments": [
116
123
  {
117
124
  "id": "phase-5-quality-review-complex",
@@ -119,7 +126,7 @@
119
126
  "var": "docComplexity",
120
127
  "equals": "Complex"
121
128
  },
122
- "text": "Also review a sixth dimension:\n6. **Integration coherence** \u2014 Does the doc integrate correctly with the existing documentation ecosystem? *(Evidence: describe how it cross-links or relates to existing docs.)*"
129
+ "text": "Also review a sixth dimension:\n6. **Integration coherence** Does the doc integrate correctly with the existing documentation ecosystem? *(Evidence: describe how it cross-links or relates to existing docs.)*"
123
130
  }
124
131
  ],
125
132
  "requireConfirmation": false
@@ -141,4 +148,4 @@
141
148
  "requireConfirmation": false
142
149
  }
143
150
  ]
144
- }
151
+ }