@exaudeus/workrail 3.14.0 → 3.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. package/dist/application/services/validation-engine.js +4 -9
  2. package/dist/application/services/workflow-compiler.js +4 -6
  3. package/dist/application/services/workflow-service.d.ts +2 -0
  4. package/dist/application/services/workflow-service.js +3 -0
  5. package/dist/console/assets/index-BE5PAgPO.js +28 -0
  6. package/dist/console/assets/index-BZNM03t1.css +1 -0
  7. package/dist/console/index.html +2 -2
  8. package/dist/engine/engine-factory.js +2 -2
  9. package/dist/engine/types.d.ts +1 -1
  10. package/dist/env-flags.d.ts +1 -0
  11. package/dist/env-flags.js +4 -0
  12. package/dist/infrastructure/session/HttpServer.d.ts +3 -3
  13. package/dist/infrastructure/session/HttpServer.js +68 -74
  14. package/dist/infrastructure/storage/caching-workflow-storage.d.ts +2 -0
  15. package/dist/infrastructure/storage/caching-workflow-storage.js +15 -6
  16. package/dist/infrastructure/storage/file-workflow-storage.js +3 -4
  17. package/dist/infrastructure/storage/schema-validating-workflow-storage.js +9 -8
  18. package/dist/manifest.json +283 -219
  19. package/dist/mcp/assert-output.d.ts +37 -0
  20. package/dist/mcp/assert-output.js +52 -0
  21. package/dist/mcp/boundary-coercion.d.ts +1 -0
  22. package/dist/mcp/boundary-coercion.js +44 -0
  23. package/dist/mcp/dev-mode.d.ts +1 -0
  24. package/dist/mcp/dev-mode.js +4 -0
  25. package/dist/mcp/handler-factory.js +12 -9
  26. package/dist/mcp/handlers/session.js +8 -9
  27. package/dist/mcp/handlers/shared/request-workflow-reader.d.ts +5 -0
  28. package/dist/mcp/handlers/shared/request-workflow-reader.js +47 -2
  29. package/dist/mcp/handlers/v2-advance-core/assessment-consequences.d.ts +1 -1
  30. package/dist/mcp/handlers/v2-advance-core/assessment-consequences.js +4 -5
  31. package/dist/mcp/handlers/v2-advance-core/event-builders.d.ts +2 -0
  32. package/dist/mcp/handlers/v2-advance-core/event-builders.js +6 -6
  33. package/dist/mcp/handlers/v2-advance-core/index.d.ts +2 -0
  34. package/dist/mcp/handlers/v2-advance-core/index.js +5 -4
  35. package/dist/mcp/handlers/v2-advance-core/input-validation.d.ts +2 -0
  36. package/dist/mcp/handlers/v2-advance-core/input-validation.js +32 -9
  37. package/dist/mcp/handlers/v2-advance-core/outcome-blocked.d.ts +2 -0
  38. package/dist/mcp/handlers/v2-advance-core/outcome-blocked.js +2 -2
  39. package/dist/mcp/handlers/v2-advance-core/outcome-success.d.ts +2 -0
  40. package/dist/mcp/handlers/v2-advance-core/outcome-success.js +1 -1
  41. package/dist/mcp/handlers/v2-checkpoint.d.ts +1 -1
  42. package/dist/mcp/handlers/v2-checkpoint.js +5 -6
  43. package/dist/mcp/handlers/v2-execution/advance.d.ts +4 -2
  44. package/dist/mcp/handlers/v2-execution/advance.js +5 -7
  45. package/dist/mcp/handlers/v2-execution/continue-advance.js +56 -26
  46. package/dist/mcp/handlers/v2-execution/continue-rehydrate.d.ts +1 -1
  47. package/dist/mcp/handlers/v2-execution/continue-rehydrate.js +9 -9
  48. package/dist/mcp/handlers/v2-execution/replay.d.ts +6 -4
  49. package/dist/mcp/handlers/v2-execution/replay.js +47 -30
  50. package/dist/mcp/handlers/v2-execution/start.d.ts +3 -3
  51. package/dist/mcp/handlers/v2-execution/start.js +31 -12
  52. package/dist/mcp/handlers/v2-execution/workflow-object-cache.d.ts +5 -0
  53. package/dist/mcp/handlers/v2-execution/workflow-object-cache.js +19 -0
  54. package/dist/mcp/handlers/v2-execution-helpers.d.ts +1 -0
  55. package/dist/mcp/handlers/v2-execution-helpers.js +23 -7
  56. package/dist/mcp/handlers/v2-resume.d.ts +1 -1
  57. package/dist/mcp/handlers/v2-resume.js +3 -4
  58. package/dist/mcp/handlers/v2-state-conversion.js +5 -1
  59. package/dist/mcp/handlers/v2-workflow.d.ts +100 -0
  60. package/dist/mcp/handlers/v2-workflow.js +155 -31
  61. package/dist/mcp/handlers/workflow.d.ts +2 -5
  62. package/dist/mcp/handlers/workflow.js +15 -12
  63. package/dist/mcp/output-schemas.d.ts +123 -29
  64. package/dist/mcp/output-schemas.js +36 -18
  65. package/dist/mcp/server.js +70 -5
  66. package/dist/mcp/tool-call-timing.d.ts +24 -0
  67. package/dist/mcp/tool-call-timing.js +85 -0
  68. package/dist/mcp/tool-descriptions.js +17 -9
  69. package/dist/mcp/transports/http-entry.js +3 -2
  70. package/dist/mcp/transports/http-listener.d.ts +1 -0
  71. package/dist/mcp/transports/http-listener.js +25 -0
  72. package/dist/mcp/transports/shutdown-hooks.d.ts +4 -1
  73. package/dist/mcp/transports/shutdown-hooks.js +3 -2
  74. package/dist/mcp/transports/stdio-entry.js +6 -28
  75. package/dist/mcp/v2/tools.d.ts +6 -0
  76. package/dist/mcp/v2/tools.js +2 -0
  77. package/dist/mcp/v2-response-formatter.js +2 -4
  78. package/dist/mcp/validation/schema-introspection.d.ts +1 -0
  79. package/dist/mcp/validation/schema-introspection.js +15 -5
  80. package/dist/mcp/validation/suggestion-generator.js +2 -2
  81. package/dist/mcp/workflow-protocol-contracts.js +5 -1
  82. package/dist/runtime/adapters/node-process-signals.d.ts +1 -0
  83. package/dist/runtime/adapters/node-process-signals.js +5 -0
  84. package/dist/runtime/adapters/noop-process-signals.d.ts +1 -0
  85. package/dist/runtime/adapters/noop-process-signals.js +2 -0
  86. package/dist/runtime/ports/process-signals.d.ts +1 -0
  87. package/dist/types/workflow-definition.d.ts +3 -2
  88. package/dist/types/workflow.d.ts +3 -0
  89. package/dist/types/workflow.js +35 -26
  90. package/dist/v2/durable-core/domain/context-template-resolver.js +2 -2
  91. package/dist/v2/durable-core/domain/function-definition-expander.js +2 -17
  92. package/dist/v2/durable-core/domain/prompt-renderer.d.ts +1 -0
  93. package/dist/v2/durable-core/domain/prompt-renderer.js +23 -18
  94. package/dist/v2/durable-core/domain/recap-recovery.js +23 -16
  95. package/dist/v2/durable-core/domain/retrieval-contract.js +13 -7
  96. package/dist/v2/durable-core/session-index.d.ts +22 -0
  97. package/dist/v2/durable-core/session-index.js +58 -0
  98. package/dist/v2/durable-core/sorted-event-log.d.ts +6 -0
  99. package/dist/v2/durable-core/sorted-event-log.js +15 -0
  100. package/dist/v2/infra/local/fs/index.js +8 -8
  101. package/dist/v2/infra/local/session-store/index.d.ts +1 -1
  102. package/dist/v2/infra/local/session-store/index.js +71 -61
  103. package/dist/v2/infra/local/session-summary-provider/index.js +9 -4
  104. package/dist/v2/infra/local/snapshot-store/index.js +2 -1
  105. package/dist/v2/infra/local/workspace-anchor/index.js +4 -1
  106. package/dist/v2/ports/session-event-log-store.port.d.ts +1 -1
  107. package/dist/v2/projections/assessment-consequences.d.ts +2 -1
  108. package/dist/v2/projections/assessment-consequences.js +0 -5
  109. package/dist/v2/projections/assessments.d.ts +2 -1
  110. package/dist/v2/projections/assessments.js +2 -4
  111. package/dist/v2/projections/gaps.d.ts +2 -1
  112. package/dist/v2/projections/gaps.js +0 -5
  113. package/dist/v2/projections/preferences.d.ts +2 -1
  114. package/dist/v2/projections/preferences.js +0 -5
  115. package/dist/v2/projections/run-context.d.ts +2 -2
  116. package/dist/v2/projections/run-context.js +0 -5
  117. package/dist/v2/projections/run-dag.js +7 -1
  118. package/dist/v2/projections/run-execution-trace.d.ts +8 -0
  119. package/dist/v2/projections/run-execution-trace.js +124 -0
  120. package/dist/v2/projections/run-status-signals.d.ts +2 -2
  121. package/dist/v2/usecases/console-routes.d.ts +3 -1
  122. package/dist/v2/usecases/console-routes.js +149 -3
  123. package/dist/v2/usecases/console-service.d.ts +2 -0
  124. package/dist/v2/usecases/console-service.js +87 -26
  125. package/dist/v2/usecases/console-types.d.ts +65 -0
  126. package/dist/v2/usecases/worktree-service.js +87 -8
  127. package/package.json +7 -6
  128. package/spec/authoring-spec.json +82 -1
  129. package/spec/workflow-tags.json +132 -0
  130. package/spec/workflow.schema.json +21 -11
  131. package/workflows/adaptive-ticket-creation.json +33 -8
  132. package/workflows/architecture-scalability-audit.json +50 -9
  133. package/workflows/bug-investigation.agentic.v2.json +43 -14
  134. package/workflows/coding-task-workflow-agentic.json +57 -38
  135. package/workflows/coding-task-workflow-agentic.lean.v2.json +129 -34
  136. package/workflows/coding-task-workflow-agentic.v2.json +97 -30
  137. package/workflows/cross-platform-code-conversion.v2.json +175 -48
  138. package/workflows/document-creation-workflow.json +49 -12
  139. package/workflows/documentation-update-workflow.json +9 -2
  140. package/workflows/intelligent-test-case-generation.json +9 -2
  141. package/workflows/learner-centered-course-workflow.json +273 -266
  142. package/workflows/mr-review-workflow.agentic.v2.json +88 -14
  143. package/workflows/personal-learning-materials-creation-branched.json +181 -174
  144. package/workflows/presentation-creation.json +167 -160
  145. package/workflows/production-readiness-audit.json +61 -15
  146. package/workflows/relocation-workflow-us.json +21 -5
  147. package/workflows/routines/tension-driven-design.json +1 -1
  148. package/workflows/scoped-documentation-workflow.json +9 -2
  149. package/workflows/test-artifact-loop-control.json +1 -2
  150. package/workflows/ui-ux-design-workflow.json +334 -0
  151. package/workflows/workflow-diagnose-environment.json +7 -1
  152. package/workflows/workflow-for-workflows.json +514 -484
  153. package/workflows/workflow-for-workflows.v2.json +55 -11
  154. package/workflows/wr.discovery.json +118 -29
  155. package/dist/console/assets/index-DW78t31j.css +0 -1
  156. package/dist/console/assets/index-EsSXrC_a.js +0 -28
@@ -2,7 +2,14 @@
2
2
  "id": "cross-platform-code-conversion",
3
3
  "name": "Cross-Platform Code Conversion",
4
4
  "version": "0.1.0",
5
- "description": "Guides an agent through converting code from one platform to another (e.g., Android to iOS, iOS to Web). Triages files by difficulty, delegates easy literal translations to parallel subagents, then the main agent tackles platform-specific code requiring design decisions.",
5
+ "description": "Use this to convert code from one platform to another (e.g. Android to iOS, iOS to Web). Triages files by difficulty, parallelizes easy translations, and handles platform-specific design decisions.",
6
+ "about": "## Cross-Platform Code Conversion Workflow\n\nThis workflow guides an AI agent through converting code from one platform to another - for example, Android (Kotlin) to iOS (Swift), iOS to Web (TypeScript/React), or any similar migration. It handles everything from scoping and analysis through idiomatic conversion, build verification, and final handoff.\n\n### What it does\n\nThe workflow starts by scoping the migration and classifying its complexity (Small, Medium, or Large) and adaptation depth (low, moderate, or high). It then analyzes the source architecture to understand patterns, dependencies, concurrency models, and semantic contracts. Files are triaged into three buckets: mechanical translations delegated to subagents in parallel (Bucket A), library substitutions (Bucket B), and platform-specific code needing design decisions (Bucket C). For high-adaptation migrations, the workflow runs a full design generation phase to choose an idiomatic target-platform architecture before any code is written. Implementation proceeds batch by batch, with drift detection after each batch to catch files that turn out harder than classified. A final build-and-integration loop verifies the full converted codebase before handoff.\n\n### When to use it\n\nUse this workflow when migrating a module, feature, or full component from one platform to another. It is especially valuable when:\n- The source and target platforms have meaningfully different idioms (e.g., Kotlin coroutines vs Swift async/await, Hilt vs Swinject)\n- You want parallel delegation of mechanical work while keeping design-sensitive boundaries with the main agent\n- Semantic contracts (lifecycle, threading, cancellation, error handling) must be preserved across the migration\n- The target repo has existing architectural patterns the migrated code must fit into\n\nFor very small, straightforward file-by-file translations, the workflow includes a fast path that skips planning and triage.\n\n### What it produces\n\n- A triage matrix classifying every file into a conversion bucket\n- A semantic contract inventory for non-trivial migration boundaries\n- A target integration analysis mapping boundaries to their destination repo seams\n- Converted source files in the target platform's idioms\n- A passing build or typecheck on the full converted output\n- A handoff summary covering adaptation decisions, known gaps, and items needing manual review\n\n### How to get good results\n\n- Specify the exact scope of the migration - which files, modules, or features to convert\n- If the target repo is not in the same workspace, point the agent to it explicitly or configure the source-to-target path mapping\n- Review the triage and semantic contract inventory steps before conversion begins, especially for high-adaptation migrations\n- Flag any invariants that must survive the migration (API contracts, behavioral guarantees, threading assumptions)",
7
+ "examples": [
8
+ "Convert the Android messaging inbox feature from Kotlin/Coroutines to iOS Swift/Combine",
9
+ "Migrate the Android authentication module (Hilt + ViewModel) to a SwiftUI equivalent",
10
+ "Port the shared data models and repository layer from Android Kotlin to TypeScript for the web client",
11
+ "Convert the Android search feature UI layer from Jetpack Compose to SwiftUI"
12
+ ],
6
13
  "recommendedPreferences": {
7
14
  "recommendedAutonomy": "guided",
8
15
  "recommendedRiskPolicy": "conservative"
@@ -71,12 +78,15 @@
71
78
  {
72
79
  "id": "phase-1-understand-source",
73
80
  "title": "Phase 1: Understand Source Code",
74
- "prompt": "Read and analyze the source code through a conversion lens what will be easy to convert, what will be hard, and why.\n\nMap out:\n- Architecture and module structure\n- Key patterns used (MVI, MVVM, dependency injection, etc.)\n- External dependencies and what they do\n- Entry points and public API surface\n- Platform coupling depth: is the code cleanly layered or is platform-specific code smeared throughout? This directly determines how much falls into easy vs. hard buckets.\n- Concurrency model: Coroutines, Combine, RxJS, async/await? This is often the single hardest mapping decision.\n- DI approach: Dagger/Hilt, Swinject, Koin? DI frameworks rarely map 1:1.\n- Test coverage shape: unit tests on business logic (convert easily), UI tests (likely rewrite), integration tests (depends on infra).\n- Shared code boundaries: is there already a shared/common module that might not need conversion at all?\n- Non-trivial migration boundaries: public APIs, externally consumed module boundaries, and lifecycle/state/concurrency/resource boundaries that callers depend on.\n- Caller-visible guarantees for those boundaries. Examples include lifecycle/ownership, laziness vs eagerness, shared vs per-consumer behavior, cancellation/disposal, ordering/replay/buffering, failure behavior, threading/scheduling, or consistency/transaction guarantees.\n- Adaptation depth: classify whether the migration is `low`, `moderate`, or `high` adaptation based on architectural mismatch, missing target-side equivalents, lifecycle/state/concurrency mismatch, and the amount of adapter or redesign work needed.\n\nIdentify which files define or materially affect those boundaries and which of them will require target-repo integration analysis.\n\nCapture:\n- `sourceArchitecture`\n- `dependencies`\n- `publicApiSurface`\n- `platformCouplingAssessment`\n- `concurrencyModel`\n- `testCoverageShape`\n- `semanticBoundaryCandidates`\n- `boundaryCriticalFiles`\n- `adaptationProfile`",
81
+ "prompt": "Read and analyze the source code through a conversion lens \u2014 what will be easy to convert, what will be hard, and why.\n\nMap out:\n- Architecture and module structure\n- Key patterns used (MVI, MVVM, dependency injection, etc.)\n- External dependencies and what they do\n- Entry points and public API surface\n- Platform coupling depth: is the code cleanly layered or is platform-specific code smeared throughout? This directly determines how much falls into easy vs. hard buckets.\n- Concurrency model: Coroutines, Combine, RxJS, async/await? This is often the single hardest mapping decision.\n- DI approach: Dagger/Hilt, Swinject, Koin? DI frameworks rarely map 1:1.\n- Test coverage shape: unit tests on business logic (convert easily), UI tests (likely rewrite), integration tests (depends on infra).\n- Shared code boundaries: is there already a shared/common module that might not need conversion at all?\n- Non-trivial migration boundaries: public APIs, externally consumed module boundaries, and lifecycle/state/concurrency/resource boundaries that callers depend on.\n- Caller-visible guarantees for those boundaries. Examples include lifecycle/ownership, laziness vs eagerness, shared vs per-consumer behavior, cancellation/disposal, ordering/replay/buffering, failure behavior, threading/scheduling, or consistency/transaction guarantees.\n- Adaptation depth: classify whether the migration is `low`, `moderate`, or `high` adaptation based on architectural mismatch, missing target-side equivalents, lifecycle/state/concurrency mismatch, and the amount of adapter or redesign work needed.\n\nIdentify which files define or materially affect those boundaries and which of them will require target-repo integration analysis.\n\nCapture:\n- `sourceArchitecture`\n- `dependencies`\n- `publicApiSurface`\n- `platformCouplingAssessment`\n- `concurrencyModel`\n- `testCoverageShape`\n- `semanticBoundaryCandidates`\n- `boundaryCriticalFiles`\n- `adaptationProfile`",
75
82
  "promptFragments": [
76
83
  {
77
84
  "id": "phase-1-small-light",
78
- "when": { "var": "conversionComplexity", "equals": "Small" },
79
- "text": "For Small conversions, keep this lightweight. A quick read of the files in scope is enough — don't map the entire architecture. Focus on identifying any platform-specific code that would prevent a straight translation."
85
+ "when": {
86
+ "var": "conversionComplexity",
87
+ "equals": "Small"
88
+ },
89
+ "text": "For Small conversions, keep this lightweight. A quick read of the files in scope is enough \u2014 don't map the entire architecture. Focus on identifying any platform-specific code that would prevent a straight translation."
80
90
  }
81
91
  ],
82
92
  "requireConfirmation": {
@@ -89,11 +99,17 @@
89
99
  "title": "Small Conversion Fast Path",
90
100
  "runCondition": {
91
101
  "and": [
92
- { "var": "conversionComplexity", "equals": "Small" },
93
- { "var": "adaptationProfile", "equals": "low" }
102
+ {
103
+ "var": "conversionComplexity",
104
+ "equals": "Small"
105
+ },
106
+ {
107
+ "var": "adaptationProfile",
108
+ "equals": "low"
109
+ }
94
110
  ]
95
111
  },
96
- "prompt": "For Small conversions, skip triage and planning just convert.\n\n- Translate the files to the target platform idiomatically\n- Follow target platform naming and structure conventions\n- Map any dependencies to target equivalents\n- Convert tests if they exist\n- Run build or typecheck to verify\n\nIf something turns out harder than expected (deep platform coupling, no clean dependency equivalent, or meaningful architectural mismatch), update `conversionComplexity` to `Medium`, update `adaptationProfile` to `moderate` or `high` based on the newly discovered mismatch, and stop. The full triage and planning pipeline will activate for the remaining work.\n\nCapture:\n- `filesConverted`\n- `buildPassed`\n- `conversionComplexity`\n- `adaptationProfile`",
112
+ "prompt": "For Small conversions, skip triage and planning \u2014 just convert.\n\n- Translate the files to the target platform idiomatically\n- Follow target platform naming and structure conventions\n- Map any dependencies to target equivalents\n- Convert tests if they exist\n- Run build or typecheck to verify\n\nIf something turns out harder than expected (deep platform coupling, no clean dependency equivalent, or meaningful architectural mismatch), update `conversionComplexity` to `Medium`, update `adaptationProfile` to `moderate` or `high` based on the newly discovered mismatch, and stop. The full triage and planning pipeline will activate for the remaining work.\n\nCapture:\n- `filesConverted`\n- `buildPassed`\n- `conversionComplexity`\n- `adaptationProfile`",
97
113
  "requireConfirmation": false
98
114
  },
99
115
  {
@@ -101,11 +117,17 @@
101
117
  "title": "Phase 2: Triage & Sort",
102
118
  "runCondition": {
103
119
  "or": [
104
- { "var": "conversionComplexity", "not_equals": "Small" },
105
- { "var": "adaptationProfile", "not_equals": "low" }
120
+ {
121
+ "var": "conversionComplexity",
122
+ "not_equals": "Small"
123
+ },
124
+ {
125
+ "var": "adaptationProfile",
126
+ "not_equals": "low"
127
+ }
106
128
  ]
107
129
  },
108
- "prompt": "Classify every file or module in scope into one of three buckets:\n\n**Bucket A Literal translation**: Platform-agnostic business logic, data models, utilities, pure functions. These use no platform-specific APIs or libraries. Conversion is mechanical: translate the language syntax, follow target naming conventions, done. These will be delegated to subagents.\n\n**Bucket B Library substitution**: Code that uses platform-specific libraries (networking, persistence, serialization, DI) but follows standard patterns. These need dependency mapping but the structure stays the same.\n\n**Bucket C Platform-specific**: Code deeply tied to the platform (UI layer, lifecycle management, concurrency/threading, navigation, platform APIs). These need design decisions about target-platform idioms.\n\nFor each file or module, list:\n- File/module name\n- Bucket (A, B, or C)\n- One-line reason for classification\n- Dependencies it has on other files in scope (so we know conversion order)\n- Whether it is `boundaryCritical` for a non-trivial migration boundary\n- Which semantic boundaries it affects from `semanticBoundaryCandidates`\n- Whether it will require target-repo integration analysis\n\nBoundary-critical files must not be treated as blind mechanical translation just because the syntax looks simple. If a file materially affects a semantic boundary or destination-repo seam, keep it with main-agent review.\n\nSort the work items within each bucket by dependency order (convert dependencies first).\n\nGroup Bucket A files into parallel batches of 3-5 files each. Each batch should contain files with no cross-dependencies so subagents can work independently.\n\nGroup Bucket B and C files into sequential batches by dependency order.\n\nEach batch should have: `name` (short label), `bucket` (A, B, or C), and `files` (list of file paths).\n\nCapture:\n- `bucketABatches` (parallel batches for subagent delegation)\n- `bucketBCBatches` (sequential batches for main agent)\n- `bucketACounts`\n- `bucketBCounts`\n- `bucketCCounts`\n- `boundaryCriticalItems`",
130
+ "prompt": "Classify every file or module in scope into one of three buckets:\n\n**Bucket A \u2014 Literal translation**: Platform-agnostic business logic, data models, utilities, pure functions. These use no platform-specific APIs or libraries. Conversion is mechanical: translate the language syntax, follow target naming conventions, done. These will be delegated to subagents.\n\n**Bucket B \u2014 Library substitution**: Code that uses platform-specific libraries (networking, persistence, serialization, DI) but follows standard patterns. These need dependency mapping but the structure stays the same.\n\n**Bucket C \u2014 Platform-specific**: Code deeply tied to the platform (UI layer, lifecycle management, concurrency/threading, navigation, platform APIs). These need design decisions about target-platform idioms.\n\nFor each file or module, list:\n- File/module name\n- Bucket (A, B, or C)\n- One-line reason for classification\n- Dependencies it has on other files in scope (so we know conversion order)\n- Whether it is `boundaryCritical` for a non-trivial migration boundary\n- Which semantic boundaries it affects from `semanticBoundaryCandidates`\n- Whether it will require target-repo integration analysis\n\nBoundary-critical files must not be treated as blind mechanical translation just because the syntax looks simple. If a file materially affects a semantic boundary or destination-repo seam, keep it with main-agent review.\n\nSort the work items within each bucket by dependency order (convert dependencies first).\n\nGroup Bucket A files into parallel batches of 3-5 files each. Each batch should contain files with no cross-dependencies so subagents can work independently.\n\nGroup Bucket B and C files into sequential batches by dependency order.\n\nEach batch should have: `name` (short label), `bucket` (A, B, or C), and `files` (list of file paths).\n\nCapture:\n- `bucketABatches` (parallel batches for subagent delegation)\n- `bucketBCBatches` (sequential batches for main agent)\n- `bucketACounts`\n- `bucketBCounts`\n- `bucketCCounts`\n- `boundaryCriticalItems`",
109
131
  "requireConfirmation": true
110
132
  },
111
133
  {
@@ -113,27 +135,45 @@
113
135
  "title": "Phase 3a: Semantic Contract Inventory",
114
136
  "runCondition": {
115
137
  "or": [
116
- { "var": "conversionComplexity", "not_equals": "Small" },
117
- { "var": "adaptationProfile", "not_equals": "low" }
138
+ {
139
+ "var": "conversionComplexity",
140
+ "not_equals": "Small"
141
+ },
142
+ {
143
+ "var": "adaptationProfile",
144
+ "not_equals": "low"
145
+ }
118
146
  ]
119
147
  },
120
148
  "prompt": "Before planning implementation, create a compact semantic contract inventory for the non-trivial migration boundaries in scope.\n\nFocus on:\n- Public APIs\n- Externally consumed module boundaries\n- Lifecycle/state/concurrency/resource boundaries that callers rely on\n\nFor each boundary, record:\n- `boundary`: short identifier for the surface\n- `sourceSurface`: source API or construct\n- `keyGuarantees`: the caller-visible guarantees that must remain true\n- `targetConstruct`: chosen target type or pattern\n- `status`: `preserved`, `intentionally_changed`, or `uncertain`\n- `rationale`\n- `verificationPlan`\n\nUse examples of semantic dimensions as examples only, not a mandatory checklist: lifecycle/ownership, laziness vs eagerness, shared vs per-consumer behavior, cancellation/disposal, ordering/replay/buffering, failure behavior, threading/scheduling, or consistency/transaction guarantees.\n\nIf a boundary cannot be mapped confidently, mark it `uncertain` rather than guessing.\n\nCapture:\n- `semanticContractInventory`\n- `hasUncertainBoundaries`\n- `hasIntentionalBoundaryChanges`",
121
149
  "promptFragments": [
122
150
  {
123
151
  "id": "phase-3a-medium-focused",
124
- "when": { "var": "conversionComplexity", "equals": "Medium" },
152
+ "when": {
153
+ "var": "conversionComplexity",
154
+ "equals": "Medium"
155
+ },
125
156
  "text": "For Medium conversions, inventory only the non-trivial boundaries actually touched by the scoped work. Don't expand this into a whole-system audit."
126
157
  },
127
158
  {
128
159
  "id": "phase-3a-high-adaptation",
129
- "when": { "var": "adaptationProfile", "equals": "high" },
160
+ "when": {
161
+ "var": "adaptationProfile",
162
+ "equals": "high"
163
+ },
130
164
  "text": "For high-adaptation migrations, cover every touched non-trivial boundary and explicitly call out where the contract is likely to be preserved, intentionally changed, or under architectural pressure."
131
165
  }
132
166
  ],
133
167
  "requireConfirmation": {
134
168
  "or": [
135
- { "var": "hasUncertainBoundaries", "equals": true },
136
- { "var": "hasIntentionalBoundaryChanges", "equals": true }
169
+ {
170
+ "var": "hasUncertainBoundaries",
171
+ "equals": true
172
+ },
173
+ {
174
+ "var": "hasIntentionalBoundaryChanges",
175
+ "equals": true
176
+ }
137
177
  ]
138
178
  }
139
179
  },
@@ -142,20 +182,32 @@
142
182
  "title": "Phase 3b: Target Integration Analysis",
143
183
  "runCondition": {
144
184
  "or": [
145
- { "var": "conversionComplexity", "not_equals": "Small" },
146
- { "var": "adaptationProfile", "not_equals": "low" }
185
+ {
186
+ "var": "conversionComplexity",
187
+ "not_equals": "Small"
188
+ },
189
+ {
190
+ "var": "adaptationProfile",
191
+ "not_equals": "low"
192
+ }
147
193
  ]
148
194
  },
149
195
  "prompt": "For each touched non-trivial migration boundary, analyze how it should fit into the target repo.\n\nThis step is about destination-repo fit, not source semantic guarantees. `semanticContractInventory` defines what must remain true. This step defines where the migrated boundary belongs and what target seams it must satisfy.\n\nFocus only on touched boundaries, not the whole target repo.\n\nFor each boundary, record:\n- `boundary`\n- `targetArea`: target module/layer/package/file area it belongs in\n- `existingPattern`: nearest relevant target abstraction or convention\n- `integrationPoints`: callers, adapters, DI/state/navigation/lifecycle hooks, persistence/network seams, or other required target touchpoints\n- `constraints`: repo-specific rules this boundary must satisfy\n- `decision`: `reuse_as_is`, `reuse_with_adapter`, `extend_existing`, `introduce_new`, or `defer_pending_decision`\n- `fitAssessment`: why the nearest existing abstraction is appropriate, insufficient, or misleading\n- `integrationRisk`: `low`, `medium`, or `high`\n- `uncertain`: `true` or `false`\n- `evidenceFiles`: concrete target files observed\n- `evidenceSymbols`: concrete target symbols observed\n- `rationale`\n\nUse concrete evidence from the target repo. Existing target code is evidence, not authority.\n\nCapture:\n- `targetIntegrationAnalysis`\n- `hasUncertainIntegrationPoints`",
150
196
  "promptFragments": [
151
197
  {
152
198
  "id": "phase-3b-medium-focused",
153
- "when": { "var": "conversionComplexity", "equals": "Medium" },
199
+ "when": {
200
+ "var": "conversionComplexity",
201
+ "equals": "Medium"
202
+ },
154
203
  "text": "For Medium conversions, analyze only the touched boundaries that need real destination-repo decisions. Do not expand this into a broad target-architecture survey."
155
204
  },
156
205
  {
157
206
  "id": "phase-3b-high-adaptation",
158
- "when": { "var": "adaptationProfile", "equals": "high" },
207
+ "when": {
208
+ "var": "adaptationProfile",
209
+ "equals": "high"
210
+ },
159
211
  "text": "For high-adaptation migrations, compare at least one plausible alternative target area or seam for each important boundary and explain why the chosen fit is still the best one."
160
212
  }
161
213
  ],
@@ -204,20 +256,32 @@
204
256
  "title": "Phase 3f: Plan Platform-Specific Conversions",
205
257
  "runCondition": {
206
258
  "or": [
207
- { "var": "conversionComplexity", "not_equals": "Small" },
208
- { "var": "adaptationProfile", "not_equals": "low" }
259
+ {
260
+ "var": "conversionComplexity",
261
+ "not_equals": "Small"
262
+ },
263
+ {
264
+ "var": "adaptationProfile",
265
+ "not_equals": "low"
266
+ }
209
267
  ]
210
268
  },
211
269
  "prompt": "For Bucket B and Bucket C items, plan the conversion before writing code.\n\nUse both `semanticContractInventory` and `targetIntegrationAnalysis` as required inputs. If `adaptationProfile` is `high`, also use `architectureAdaptationPlan` as a required input. The implementation plan must preserve boundary contracts and fit the target repo's actual seams.\n\nFor Bucket B (library substitution):\n- Map each source dependency to its target-platform equivalent\n- If no equivalent exists, flag it and propose an alternative\n\nFor Bucket C (platform-specific):\n- Threading/concurrency model mapping\n- UI framework mapping\n- DI framework mapping\n- State management mapping\n- Error handling mapping\n- Navigation patterns\n- Lifecycle management approach\n- Testing framework mapping\n- Target module/layer placement and adapter seams\n\nFor anything with no clean target equivalent, propose an idiomatic solution and explain the tradeoff.\n\nBucket A items don't need a plan. They're mechanical translation handled by subagents.\n\nCapture:\n- `idiomMapping`\n- `dependencyMapping`\n- `tradeoffs`",
212
270
  "promptFragments": [
213
271
  {
214
272
  "id": "phase-3f-medium-focused",
215
- "when": { "var": "conversionComplexity", "equals": "Medium" },
216
- "text": "For Medium conversions, focus the plan on the items that actually need design decisions. Don't exhaustively map every dimension — only the ones relevant to the files in scope."
273
+ "when": {
274
+ "var": "conversionComplexity",
275
+ "equals": "Medium"
276
+ },
277
+ "text": "For Medium conversions, focus the plan on the items that actually need design decisions. Don't exhaustively map every dimension \u2014 only the ones relevant to the files in scope."
217
278
  },
218
279
  {
219
280
  "id": "phase-3f-high-adaptation",
220
- "when": { "var": "adaptationProfile", "equals": "high" },
281
+ "when": {
282
+ "var": "adaptationProfile",
283
+ "equals": "high"
284
+ },
221
285
  "text": "For high-adaptation migrations, state clearly what is translated directly, what is adapted through seams or adapters, and what is intentionally redesigned."
222
286
  }
223
287
  ],
@@ -230,11 +294,20 @@
230
294
  "and": [
231
295
  {
232
296
  "or": [
233
- { "var": "conversionComplexity", "not_equals": "Small" },
234
- { "var": "adaptationProfile", "not_equals": "low" }
297
+ {
298
+ "var": "conversionComplexity",
299
+ "not_equals": "Small"
300
+ },
301
+ {
302
+ "var": "adaptationProfile",
303
+ "not_equals": "low"
304
+ }
235
305
  ]
236
306
  },
237
- { "var": "bucketACounts", "not_equals": 0 }
307
+ {
308
+ "var": "bucketACounts",
309
+ "not_equals": 0
310
+ }
238
311
  ]
239
312
  },
240
313
  "promptBlocks": {
@@ -270,8 +343,14 @@
270
343
  "title": "Phase 5: Convert Bucket B & C (Main Agent)",
271
344
  "runCondition": {
272
345
  "or": [
273
- { "var": "conversionComplexity", "not_equals": "Small" },
274
- { "var": "adaptationProfile", "not_equals": "low" }
346
+ {
347
+ "var": "conversionComplexity",
348
+ "not_equals": "Small"
349
+ },
350
+ {
351
+ "var": "adaptationProfile",
352
+ "not_equals": "low"
353
+ }
275
354
  ]
276
355
  },
277
356
  "loop": {
@@ -331,14 +410,38 @@
331
410
  "title": "Verify Batch",
332
411
  "runCondition": {
333
412
  "or": [
334
- { "var": "bucketDriftDetected", "equals": true },
335
- { "var": "unexpectedDependency", "equals": true },
336
- { "var": "buildBroke", "equals": true },
337
- { "var": "contractDriftDetected", "equals": true },
338
- { "var": "contractUncertain", "equals": true },
339
- { "var": "integrationDriftDetected", "equals": true },
340
- { "var": "integrationUncertain", "equals": true },
341
- { "var": "architectureDriftDetected", "equals": true }
413
+ {
414
+ "var": "bucketDriftDetected",
415
+ "equals": true
416
+ },
417
+ {
418
+ "var": "unexpectedDependency",
419
+ "equals": true
420
+ },
421
+ {
422
+ "var": "buildBroke",
423
+ "equals": true
424
+ },
425
+ {
426
+ "var": "contractDriftDetected",
427
+ "equals": true
428
+ },
429
+ {
430
+ "var": "contractUncertain",
431
+ "equals": true
432
+ },
433
+ {
434
+ "var": "integrationDriftDetected",
435
+ "equals": true
436
+ },
437
+ {
438
+ "var": "integrationUncertain",
439
+ "equals": true
440
+ },
441
+ {
442
+ "var": "architectureDriftDetected",
443
+ "equals": true
444
+ }
342
445
  ]
343
446
  },
344
447
  "promptBlocks": {
@@ -370,9 +473,18 @@
370
473
  },
371
474
  "requireConfirmation": {
372
475
  "or": [
373
- { "var": "contractUncertain", "equals": true },
374
- { "var": "integrationUncertain", "equals": true },
375
- { "var": "architectureDriftDetected", "equals": true }
476
+ {
477
+ "var": "contractUncertain",
478
+ "equals": true
479
+ },
480
+ {
481
+ "var": "integrationUncertain",
482
+ "equals": true
483
+ },
484
+ {
485
+ "var": "architectureDriftDetected",
486
+ "equals": true
487
+ }
376
488
  ]
377
489
  }
378
490
  }
@@ -384,8 +496,14 @@
384
496
  "title": "Phase 6: Final Verification",
385
497
  "runCondition": {
386
498
  "or": [
387
- { "var": "conversionComplexity", "not_equals": "Small" },
388
- { "var": "adaptationProfile", "not_equals": "low" }
499
+ {
500
+ "var": "conversionComplexity",
501
+ "not_equals": "Small"
502
+ },
503
+ {
504
+ "var": "adaptationProfile",
505
+ "not_equals": "low"
506
+ }
389
507
  ]
390
508
  },
391
509
  "loop": {
@@ -401,7 +519,7 @@
401
519
  {
402
520
  "id": "phase-6a-full-build",
403
521
  "title": "Full Build and Integration Check",
404
- "prompt": "Run a full build or typecheck on the entire converted codebase both subagent-converted and main-agent-converted code together.\n\nCheck for:\n- Build/compile errors from cross-batch integration issues\n- Inconsistencies between subagent output and main agent output (naming, patterns)\n- Non-idiomatic patterns that slipped through\n- Missing error handling at module boundaries\n- Threading or concurrency issues across modules\n- Broken public API contracts\n- Contract inventory drift: every row in `semanticContractInventory` is still accounted for, no `uncertain` rows remain, preserved contracts still look preserved, and intentional changes are still justified\n- Target integration drift: code landed in the intended target layer/module, reuse/adaptation decisions still fit the observed target seams, and no unresolved target integration uncertainties remain\n- High-adaptation architecture drift: if `adaptationProfile` is `high`, the final code still matches `architectureAdaptationPlan` and any deviations are explicit and justified\n\nFix each issue. If a fix is a band-aid over a deeper mapping problem, go back and fix the mapping.\n\nCapture:\n- `fullBuildPassed`\n- `integrationIssues`\n- `issuesFixed`",
522
+ "prompt": "Run a full build or typecheck on the entire converted codebase \u2014 both subagent-converted and main-agent-converted code together.\n\nCheck for:\n- Build/compile errors from cross-batch integration issues\n- Inconsistencies between subagent output and main agent output (naming, patterns)\n- Non-idiomatic patterns that slipped through\n- Missing error handling at module boundaries\n- Threading or concurrency issues across modules\n- Broken public API contracts\n- Contract inventory drift: every row in `semanticContractInventory` is still accounted for, no `uncertain` rows remain, preserved contracts still look preserved, and intentional changes are still justified\n- Target integration drift: code landed in the intended target layer/module, reuse/adaptation decisions still fit the observed target seams, and no unresolved target integration uncertainties remain\n- High-adaptation architecture drift: if `adaptationProfile` is `high`, the final code still matches `architectureAdaptationPlan` and any deviations are explicit and justified\n\nFix each issue. If a fix is a band-aid over a deeper mapping problem, go back and fix the mapping.\n\nCapture:\n- `fullBuildPassed`\n- `integrationIssues`\n- `issuesFixed`",
405
523
  "requireConfirmation": false
406
524
  },
407
525
  {
@@ -422,15 +540,24 @@
422
540
  "promptFragments": [
423
541
  {
424
542
  "id": "phase-7-small-summary",
425
- "when": { "var": "conversionComplexity", "equals": "Small" },
426
- "text": "For Small conversions, keep the summary brief — just list what was converted, build status, and any issues."
543
+ "when": {
544
+ "var": "conversionComplexity",
545
+ "equals": "Small"
546
+ },
547
+ "text": "For Small conversions, keep the summary brief \u2014 just list what was converted, build status, and any issues."
427
548
  },
428
549
  {
429
550
  "id": "phase-7-full-summary",
430
551
  "when": {
431
552
  "or": [
432
- { "var": "conversionComplexity", "not_equals": "Small" },
433
- { "var": "adaptationProfile", "not_equals": "low" }
553
+ {
554
+ "var": "conversionComplexity",
555
+ "not_equals": "Small"
556
+ },
557
+ {
558
+ "var": "adaptationProfile",
559
+ "not_equals": "low"
560
+ }
434
561
  ]
435
562
  },
436
563
  "text": "Also include: bucket breakdown (A/B/C counts), delegation results (how many files delegated, subagent quality, any reclassified), key idiom mapping decisions, dependency substitutions, notable preserved contracts, notable target integration decisions, any intentional semantic changes, the selected adaptation approach, any architecture adaptation choices, and any boundaries needing manual review."
@@ -2,7 +2,14 @@
2
2
  "id": "document-creation-workflow",
3
3
  "name": "Document Creation Workflow",
4
4
  "version": "1.0.0",
5
- "description": "Create broad or comprehensive documentation spanning multiple components or systems. Suited for project READMEs, complete API documentation, user guides covering multiple features, and technical specifications. Uses complexity triage (Simple/Standard/Complex) to adapt rigor. For single, bounded subjects (one class, one integration), use scoped-documentation-workflow instead.",
5
+ "description": "Use this to create broad or comprehensive documentation spanning multiple components or systems project READMEs, complete API docs, user guides, or technical specifications.",
6
+ "about": "## Document Creation Workflow\n\nThis workflow guides you through creating new documentation from scratch -- ranging from a simple project README to a full technical specification spanning multiple systems. It automatically calibrates depth to match the complexity of your request: simple tasks go straight to writing, while complex documentation gets a full analysis-and-planning phase first.\n\n### What it produces\n\nA complete, saved documentation file ready for use. Depending on complexity, it may also include a quality review pass covering accuracy, completeness, audience fit, usability, and style consistency.\n\n### When to use it\n\n- You need to create a **new** document (not update an existing one -- see the Documentation Update workflow for that).\n- The document spans one or more systems, components, or audiences.\n- Examples: project READMEs, API reference docs, user guides, onboarding docs, technical specifications, architecture overviews.\n\n### When NOT to use it\n\n- You want to update or refresh an existing doc -- use the Documentation Update workflow instead.\n- You need tight scope discipline for a single class or mechanism -- the Scoped Documentation workflow is better suited.\n\n### How to get good results\n\n- Be specific about the document type and intended audience upfront. The workflow probes for these, but the clearer your initial goal, the less back-and-forth.\n- If your project has existing documentation or style conventions, mention them -- the workflow will follow them.\n- For complex documentation, the workflow asks a small number of targeted questions it cannot answer from the codebase. Answer these concisely to keep momentum.",
7
+ "examples": [
8
+ "Create a README for the payments-service repo with setup, config, and deployment instructions",
9
+ "Write a full API reference for the new notifications SDK, including all endpoints and error codes",
10
+ "Create a user guide for the self-serve onboarding flow targeting non-technical customers",
11
+ "Write a technical specification for the proposed event-sourcing migration"
12
+ ],
6
13
  "preconditions": [
7
14
  "User has a clear idea of the document type and purpose.",
8
15
  "Relevant project files or information are available for reference.",
@@ -31,8 +38,14 @@
31
38
  "title": "Phase 1: Project Analysis",
32
39
  "runCondition": {
33
40
  "or": [
34
- { "var": "docComplexity", "equals": "Standard" },
35
- { "var": "docComplexity", "equals": "Complex" }
41
+ {
42
+ "var": "docComplexity",
43
+ "equals": "Standard"
44
+ },
45
+ {
46
+ "var": "docComplexity",
47
+ "equals": "Complex"
48
+ }
36
49
  ]
37
50
  },
38
51
  "prompt": "Analyze the project to inform documentation strategy. Limit this analysis to 1500 words; prioritize documentation-relevant insights.\n\nCover:\n1. **Existing documentation landscape** — current docs, style patterns, gaps\n2. **Project architecture** — key components relevant to this document\n3. **User or developer workflows** — how documentation fits into user journeys\n4. **Technical constraints** — APIs, systems, integrations to document\n5. **Style conventions** — terminology, formatting, naming patterns to follow\n6. **Audience** — who will use this documentation and what they need to accomplish\n\nNote any complexity indicators that might warrant reclassifying `docComplexity` upward.",
@@ -43,8 +56,14 @@
43
56
  "title": "Phase 2: Targeted Requirements",
44
57
  "runCondition": {
45
58
  "or": [
46
- { "var": "docComplexity", "equals": "Standard" },
47
- { "var": "docComplexity", "equals": "Complex" }
59
+ {
60
+ "var": "docComplexity",
61
+ "equals": "Standard"
62
+ },
63
+ {
64
+ "var": "docComplexity",
65
+ "equals": "Complex"
66
+ }
48
67
  ]
49
68
  },
50
69
  "prompt": "Based on your project analysis, ask 2 to 4 targeted questions that you genuinely cannot answer from the codebase or project files.\n\nFocus on clarifications that materially affect content or structure:\n- Specific scope boundaries (what to include or exclude)\n- Audience-specific requirements not evident from the code\n- Constraints, templates, or organizational standards to follow\n- Integration requirements with existing documentation systems\n\nDo not ask questions the analysis already answered.",
@@ -55,15 +74,24 @@
55
74
  "title": "Phase 3: Content Plan",
56
75
  "runCondition": {
57
76
  "or": [
58
- { "var": "docComplexity", "equals": "Standard" },
59
- { "var": "docComplexity", "equals": "Complex" }
77
+ {
78
+ "var": "docComplexity",
79
+ "equals": "Standard"
80
+ },
81
+ {
82
+ "var": "docComplexity",
83
+ "equals": "Complex"
84
+ }
60
85
  ]
61
86
  },
62
87
  "prompt": "Create a content plan for this documentation in your notes.\n\nThe plan should cover:\n1. Document purpose and success criteria\n2. Target audience and their primary goals\n3. Section outline with one-line descriptions\n4. Writing strategy — tone, technical depth, key terminology\n5. Visual elements or code examples needed\n\nKeep the plan proportional to scope. The goal is a clear outline to execute against, not a heavyweight specification.",
63
88
  "promptFragments": [
64
89
  {
65
90
  "id": "phase-3-plan-complex",
66
- "when": { "var": "docComplexity", "equals": "Complex" },
91
+ "when": {
92
+ "var": "docComplexity",
93
+ "equals": "Complex"
94
+ },
67
95
  "text": "For Complex documentation, also include: integration strategy (how this doc connects to the existing documentation ecosystem), maintenance ownership, and any stakeholder review requirements."
68
96
  }
69
97
  ],
@@ -80,15 +108,24 @@
80
108
  "title": "Phase 5: Quality Review",
81
109
  "runCondition": {
82
110
  "or": [
83
- { "var": "docComplexity", "equals": "Standard" },
84
- { "var": "docComplexity", "equals": "Complex" }
111
+ {
112
+ "var": "docComplexity",
113
+ "equals": "Standard"
114
+ },
115
+ {
116
+ "var": "docComplexity",
117
+ "equals": "Complex"
118
+ }
85
119
  ]
86
120
  },
87
121
  "prompt": "Review the documentation you just wrote using this rubric. For each dimension, provide a one-sentence evidence statement and a verdict of `pass` or `needs-work`.\n\n1. **Accuracy** — Does the content correctly describe the actual project or system? *(Evidence: cite one verified fact.)*\n2. **Completeness** — Does it cover all planned sections? *(Evidence: list planned vs completed sections.)*\n3. **Audience fit** — Is the technical depth right for the target reader? *(Evidence: identify one audience-appropriate choice made.)*\n4. **Usability** — Could a reader actually accomplish their goal using this doc? *(Evidence: trace one user journey through the doc.)*\n5. **Consistency** — Does it match project conventions for style, terminology, and format? *(Evidence: cite one convention followed.)*\n\nIf any dimension is `needs-work`, fix the issue immediately and re-assert the dimension as `pass` in your notes before continuing.",
88
122
  "promptFragments": [
89
123
  {
90
124
  "id": "phase-5-quality-review-complex",
91
- "when": { "var": "docComplexity", "equals": "Complex" },
125
+ "when": {
126
+ "var": "docComplexity",
127
+ "equals": "Complex"
128
+ },
92
129
  "text": "Also review a sixth dimension:\n6. **Integration coherence** — Does the doc integrate correctly with the existing documentation ecosystem? *(Evidence: describe how it cross-links or relates to existing docs.)*"
93
130
  }
94
131
  ],
@@ -111,4 +148,4 @@
111
148
  "requireConfirmation": false
112
149
  }
113
150
  ]
114
- }
151
+ }
@@ -2,7 +2,14 @@
2
2
  "id": "documentation-update-workflow",
3
3
  "name": "Documentation Update & Maintenance Workflow",
4
4
  "version": "2.0.0",
5
- "description": "Update and maintain existing documentation. Uses git history to detect staleness, maps sections to current code, and systematically refreshes outdated content while preserving valuable accurate sections. For updating existing docs — not for creating new documentation.",
5
+ "description": "Use this to update and maintain existing documentation. Uses git history to detect staleness, maps sections to current code, and refreshes outdated content while preserving what's still accurate.",
6
+ "about": "## Documentation Update & Maintenance Workflow\n\nUse this when you have **existing** documentation that may be out of date and needs to be refreshed to match the current state of the codebase. The workflow uses git history as its primary evidence source: it checks when the docs were last committed, what changed in the relevant code since then, and classifies staleness before touching anything.\n\n### What it produces\n\nUpdated documentation files with stale or inaccurate sections corrected, missing coverage added, and removed content pruned. A completion summary is written to notes for future maintainers, including maintenance recommendations and sections at risk of going stale again quickly.\n\n### When to use it\n\n- A feature shipped and the docs were never updated.\n- You suspect a doc is outdated but aren't sure which parts.\n- You want a systematic, section-by-section audit rather than a quick edit.\n- The repo has git history covering both code and docs (the workflow degrades gracefully without git, but git history is the primary evidence source).\n\n### When NOT to use it\n\n- You are writing a doc from scratch -- use the Document Creation workflow instead.\n- You only need to fix a single known typo or sentence -- just edit the file directly.\n\n### How to get good results\n\n- Point the workflow at the specific documentation files and the code directories they describe.\n- The workflow will ask you to approve an update plan before making any edits -- review it carefully. This is the main checkpoint where you control scope.\n- If you want to defer lower-priority improvements, say so during plan review.",
7
+ "examples": [
8
+ "Update the API docs for the search service after last month's v3 endpoint changes",
9
+ "Refresh the developer onboarding guide -- it hasn't been updated since we migrated to Gradle 8",
10
+ "Audit the architecture decision records in docs/adr/ for accuracy against the current codebase",
11
+ "Update the GraphQL schema documentation to reflect recent breaking changes"
12
+ ],
6
13
  "preconditions": [
7
14
  "Target documentation files are accessible",
8
15
  "Agent has git access to the repository containing both docs and code",
@@ -97,4 +104,4 @@
97
104
  "requireConfirmation": false
98
105
  }
99
106
  ]
100
- }
107
+ }
@@ -2,7 +2,14 @@
2
2
  "id": "intelligent-test-case-generation",
3
3
  "name": "Test Case Generation from Tickets",
4
4
  "version": "1.0.0",
5
- "description": "Systematically extracts integration and end-to-end test cases from ticket requirements. Reads the ticket carefully, identifies boundary conditions and edge cases, traces affected code paths, and produces developer-readable test case descriptions ready for implementation.",
5
+ "description": "Use this to generate integration and end-to-end test cases from ticket requirements. Reads the ticket, traces affected code paths, identifies boundary conditions, and produces developer-readable test case descriptions.",
6
+ "about": "## Intelligent Test Case Generation\n\nThis workflow generates structured integration and end-to-end test cases directly from a ticket. It reads the ticket requirements, traces the affected code paths in the codebase, identifies boundary conditions and failure scenarios, and produces developer-readable test case descriptions that a developer can implement without guessing.\n\n**What it does:**\nThe workflow extracts every acceptance criterion from the ticket, traces which modules, endpoints, and integration boundaries are involved, identifies the existing test patterns in the repo (so generated cases match the team's style), then systematically generates happy path, boundary, and failure scenarios for each criterion. It checks coverage before writing, resolves ambiguities with you before generating anything uncertain, and finishes with a full test case list plus a coverage summary.\n\n**When to use it:**\n- When a ticket has clear acceptance criteria and you want comprehensive test coverage without manually reasoning through every edge case\n- When onboarding to a feature area and wanting to understand the expected behavior through its test scenarios\n- When a ticket spans multiple services or integration points and you need coverage across all of them\n- When preparing for a QA handoff or code review where test coverage must be explicitly demonstrated\n\n**What it produces:**\nNumbered test cases (TC-1, TC-2, ...) each with a title, acceptance criterion mapping, test type (Integration or E2E), risk level, preconditions, numbered test steps, expected result, and implementation notes. Cases are grouped by acceptance criterion and followed by a summary table. Open ambiguities and coverage gaps are disclosed explicitly.\n\n**How to get good results:**\nProvide the ticket in any standard format -- title, description, and acceptance criteria are enough. The workflow will trace the codebase itself. If the ticket has linked specs, API docs, or architecture diagrams, mention them. The more complete the acceptance criteria, the fewer clarifying questions the workflow will need to ask.",
7
+ "examples": [
8
+ "Generate test cases for ACEI-1591: expire cached conversations after 24 hours of inactivity",
9
+ "Write integration test scenarios for the ticket adding multi-factor authentication to the login flow",
10
+ "Generate E2E test cases for the checkout redesign ticket covering all payment method variations",
11
+ "Create test cases for the ticket migrating user profile storage from Postgres to the new profile service"
12
+ ],
6
13
  "preconditions": [
7
14
  "User provides a ticket (title, description, acceptance criteria) in any standard format.",
8
15
  "Agent has read access to the codebase for tracing affected paths and finding existing test patterns.",
@@ -31,7 +38,7 @@
31
38
  "var": "ambiguities",
32
39
  "not_equals": []
33
40
  },
34
- "prompt": "Before generating test scenarios, resolve the ambiguities you found.\n\nFor each ambiguity in `ambiguities`:\n1. State what is unclear and why it matters for test design\n2. Propose the most reasonable interpretation based on context\n3. Ask me to confirm, adjust, or provide the missing information\n\nKeep questions targeted. If the ticket, codebase, or docs can answer the question, answer it yourself first.\n\nIf the user's response significantly changes the scope or adds new acceptance criteria, revisit Phase 1 scenario identification before continuing do not carry stale scenarios forward.\n\nCapture:\n- `resolvedAmbiguities` -- list of ambiguities with chosen interpretation\n- `openAmbiguities` -- ambiguities the user still needs to resolve (initialize as empty)",
41
+ "prompt": "Before generating test scenarios, resolve the ambiguities you found.\n\nFor each ambiguity in `ambiguities`:\n1. State what is unclear and why it matters for test design\n2. Propose the most reasonable interpretation based on context\n3. Ask me to confirm, adjust, or provide the missing information\n\nKeep questions targeted. If the ticket, codebase, or docs can answer the question, answer it yourself first.\n\nIf the user's response significantly changes the scope or adds new acceptance criteria, revisit Phase 1 scenario identification before continuing \u2014 do not carry stale scenarios forward.\n\nCapture:\n- `resolvedAmbiguities` -- list of ambiguities with chosen interpretation\n- `openAmbiguities` -- ambiguities the user still needs to resolve (initialize as empty)",
35
42
  "requireConfirmation": true
36
43
  },
37
44
  {