@exaudeus/workrail 3.67.0 → 3.68.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. package/dist/application/services/compiler/template-registry.js +10 -1
  2. package/dist/cli/commands/worktrain-init.js +1 -1
  3. package/dist/console-ui/assets/{index-tOl8Vowf.js → index-CyzltI6D.js} +1 -1
  4. package/dist/console-ui/index.html +1 -1
  5. package/dist/coordinators/modes/full-pipeline.js +4 -4
  6. package/dist/coordinators/modes/implement-shared.js +5 -5
  7. package/dist/coordinators/modes/implement.js +4 -4
  8. package/dist/coordinators/pr-review.js +4 -4
  9. package/dist/daemon/workflow-runner.d.ts +1 -0
  10. package/dist/daemon/workflow-runner.js +1 -0
  11. package/dist/manifest.json +25 -25
  12. package/dist/mcp/handlers/v2-workflow.js +1 -1
  13. package/dist/mcp/workflow-protocol-contracts.js +2 -2
  14. package/docs/authoring-v2.md +4 -4
  15. package/docs/changelog-recent.md +3 -3
  16. package/docs/configuration.md +1 -1
  17. package/docs/design/adaptive-coordinator-context-candidates.md +1 -1
  18. package/docs/design/adaptive-coordinator-context.md +1 -1
  19. package/docs/design/adaptive-coordinator-routing-candidates.md +18 -18
  20. package/docs/design/adaptive-coordinator-routing-review.md +1 -1
  21. package/docs/design/adaptive-coordinator-routing.md +34 -34
  22. package/docs/design/agent-cascade-protocol.md +2 -2
  23. package/docs/design/console-daemon-separation-discovery.md +323 -0
  24. package/docs/design/context-assembly-design-candidates.md +1 -1
  25. package/docs/design/context-assembly-implementation-plan.md +1 -1
  26. package/docs/design/context-assembly-layer.md +2 -2
  27. package/docs/design/context-assembly-review-findings.md +1 -1
  28. package/docs/design/coordinator-access-audit.md +293 -0
  29. package/docs/design/coordinator-architecture-audit.md +62 -0
  30. package/docs/design/coordinator-error-handling-audit.md +240 -0
  31. package/docs/design/coordinator-testability-audit.md +426 -0
  32. package/docs/design/daemon-architecture-discovery.md +1 -1
  33. package/docs/design/daemon-console-separation-discovery.md +242 -0
  34. package/docs/design/daemon-memory-audit.md +203 -0
  35. package/docs/design/design-candidates-console-daemon-separation.md +256 -0
  36. package/docs/design/design-candidates-discovery-loop-fix.md +141 -0
  37. package/docs/design/design-review-findings-console-daemon-separation.md +106 -0
  38. package/docs/design/design-review-findings-discovery-loop-fix.md +81 -0
  39. package/docs/design/discovery-loop-fix-candidates.md +161 -0
  40. package/docs/design/discovery-loop-fix-design-review.md +106 -0
  41. package/docs/design/discovery-loop-fix-validation.md +258 -0
  42. package/docs/design/discovery-loop-investigation-A.md +188 -0
  43. package/docs/design/discovery-loop-investigation-B.md +287 -0
  44. package/docs/design/exploration-workflow-candidates.md +205 -0
  45. package/docs/design/exploration-workflow-design-review.md +166 -0
  46. package/docs/design/exploration-workflow-discovery.md +443 -0
  47. package/docs/design/ide-context-files-candidates.md +231 -0
  48. package/docs/design/ide-context-files-design-review.md +85 -0
  49. package/docs/design/ide-context-files.md +615 -0
  50. package/docs/design/implementation-plan-discovery-loop-fix.md +199 -0
  51. package/docs/design/implementation-plan-queue-poll-rotation.md +102 -0
  52. package/docs/design/in-process-http-audit.md +190 -0
  53. package/docs/design/layer3b-ghost-nodes-design-candidates.md +2 -2
  54. package/docs/design/loadSessionNotes-candidates.md +108 -0
  55. package/docs/design/loadSessionNotes-test-coverage-discovery.md +297 -0
  56. package/docs/design/loadSessionNotes-test-coverage-session4.md +209 -0
  57. package/docs/design/loadSessionNotes-test-coverage-v3.md +321 -0
  58. package/docs/design/probe-session-design-candidates.md +261 -0
  59. package/docs/design/probe-session-phase0.md +490 -0
  60. package/docs/design/routines-guide.md +7 -7
  61. package/docs/design/session-metrics-attribution-candidates.md +250 -0
  62. package/docs/design/session-metrics-attribution-design-review.md +115 -0
  63. package/docs/design/session-metrics-attribution-discovery.md +319 -0
  64. package/docs/design/session-metrics-candidates.md +227 -0
  65. package/docs/design/session-metrics-design-review.md +104 -0
  66. package/docs/design/session-metrics-discovery.md +454 -0
  67. package/docs/design/spawn-session-debug.md +202 -0
  68. package/docs/design/trigger-validator-candidates.md +214 -0
  69. package/docs/design/trigger-validator-review.md +109 -0
  70. package/docs/design/trigger-validator-shaping-phase0.md +239 -0
  71. package/docs/design/trigger-validator.md +454 -0
  72. package/docs/design/v2-core-design-locks.md +2 -2
  73. package/docs/design/workflow-extension-points.md +15 -15
  74. package/docs/design/workflow-id-validation-at-startup.md +1 -1
  75. package/docs/design/workflow-id-validation-implementation-plan.md +2 -2
  76. package/docs/design/workflow-trigger-lifecycle-audit.md +175 -0
  77. package/docs/design/worktrain-task-queue-candidates.md +5 -5
  78. package/docs/design/worktrain-task-queue.md +4 -4
  79. package/docs/discovery/coordinator-script-design.md +1 -1
  80. package/docs/discovery/coordinator-ux-discovery.md +3 -3
  81. package/docs/discovery/simulation-report.md +1 -1
  82. package/docs/discovery/workflow-modernization-discovery.md +326 -0
  83. package/docs/discovery/workflow-selection-for-discovery-tasks.md +33 -33
  84. package/docs/discovery/worktrain-status-briefing.md +1 -1
  85. package/docs/discovery/wr-discovery-goal-reframing.md +1 -1
  86. package/docs/docker.md +1 -1
  87. package/docs/ideas/backlog.md +227 -0
  88. package/docs/ideas/third-party-workflow-setup-design-thinking.md +1 -1
  89. package/docs/integrations/claude-code.md +5 -5
  90. package/docs/integrations/firebender.md +1 -1
  91. package/docs/plans/agentic-orchestration-roadmap.md +2 -2
  92. package/docs/plans/mr-review-workflow-redesign.md +9 -9
  93. package/docs/plans/ui-ux-workflow-design-candidates.md +4 -4
  94. package/docs/plans/ui-ux-workflow-discovery.md +2 -2
  95. package/docs/plans/workflow-categories-candidates.md +8 -8
  96. package/docs/plans/workflow-categories-discovery.md +4 -4
  97. package/docs/plans/workflow-modernization-design.md +430 -0
  98. package/docs/plans/workflow-staleness-detection-candidates.md +11 -11
  99. package/docs/plans/workflow-staleness-detection-review.md +4 -4
  100. package/docs/plans/workflow-staleness-detection.md +9 -9
  101. package/docs/plans/workrail-platform-vision.md +3 -3
  102. package/docs/reference/agent-context-cleaner-snippet.md +1 -1
  103. package/docs/reference/agent-context-guidance.md +4 -4
  104. package/docs/reference/context-optimization.md +2 -2
  105. package/docs/roadmap/now-next-later.md +2 -2
  106. package/docs/roadmap/open-work-inventory.md +16 -16
  107. package/docs/workflows.md +31 -31
  108. package/package.json +1 -1
  109. package/spec/workflow-tags.json +47 -47
  110. package/workflows/adaptive-ticket-creation.json +16 -16
  111. package/workflows/architecture-scalability-audit.json +22 -22
  112. package/workflows/bug-investigation.agentic.v2.json +3 -3
  113. package/workflows/classify-task-workflow.json +1 -1
  114. package/workflows/coding-task-workflow-agentic.json +6 -6
  115. package/workflows/cross-platform-code-conversion.v2.json +8 -8
  116. package/workflows/document-creation-workflow.json +8 -8
  117. package/workflows/documentation-update-workflow.json +8 -8
  118. package/workflows/intelligent-test-case-generation.json +2 -2
  119. package/workflows/learner-centered-course-workflow.json +2 -2
  120. package/workflows/mr-review-workflow.agentic.v2.json +4 -4
  121. package/workflows/personal-learning-materials-creation-branched.json +8 -8
  122. package/workflows/presentation-creation.json +5 -5
  123. package/workflows/production-readiness-audit.json +1 -1
  124. package/workflows/relocation-workflow-us.json +31 -31
  125. package/workflows/routines/context-gathering.json +1 -1
  126. package/workflows/routines/design-review.json +1 -1
  127. package/workflows/routines/execution-simulation.json +1 -1
  128. package/workflows/routines/feature-implementation.json +3 -3
  129. package/workflows/routines/final-verification.json +1 -1
  130. package/workflows/routines/hypothesis-challenge.json +1 -1
  131. package/workflows/routines/ideation.json +1 -1
  132. package/workflows/routines/parallel-work-partitioning.json +3 -3
  133. package/workflows/routines/philosophy-alignment.json +2 -2
  134. package/workflows/routines/plan-analysis.json +1 -1
  135. package/workflows/routines/plan-generation.json +1 -1
  136. package/workflows/routines/tension-driven-design.json +6 -6
  137. package/workflows/scoped-documentation-workflow.json +26 -26
  138. package/workflows/ui-ux-design-workflow.json +14 -14
  139. package/workflows/workflow-diagnose-environment.json +1 -1
  140. package/workflows/workflow-for-workflows.json +1 -1
@@ -1,5 +1,5 @@
1
1
  {
2
- "id": "cross-platform-code-conversion",
2
+ "id": "wr.cross-platform-code-conversion",
3
3
  "name": "Cross-Platform Code Conversion",
4
4
  "version": "0.1.0",
5
5
  "metricsProfile": "coding",
@@ -79,7 +79,7 @@
79
79
  {
80
80
  "id": "phase-1-understand-source",
81
81
  "title": "Phase 1: Understand Source Code",
82
- "prompt": "Read and analyze the source code through a conversion lens what will be easy to convert, what will be hard, and why.\n\nMap out:\n- Architecture and module structure\n- Key patterns used (MVI, MVVM, dependency injection, etc.)\n- External dependencies and what they do\n- Entry points and public API surface\n- Platform coupling depth: is the code cleanly layered or is platform-specific code smeared throughout? This directly determines how much falls into easy vs. hard buckets.\n- Concurrency model: Coroutines, Combine, RxJS, async/await? This is often the single hardest mapping decision.\n- DI approach: Dagger/Hilt, Swinject, Koin? DI frameworks rarely map 1:1.\n- Test coverage shape: unit tests on business logic (convert easily), UI tests (likely rewrite), integration tests (depends on infra).\n- Shared code boundaries: is there already a shared/common module that might not need conversion at all?\n- Non-trivial migration boundaries: public APIs, externally consumed module boundaries, and lifecycle/state/concurrency/resource boundaries that callers depend on.\n- Caller-visible guarantees for those boundaries. Examples include lifecycle/ownership, laziness vs eagerness, shared vs per-consumer behavior, cancellation/disposal, ordering/replay/buffering, failure behavior, threading/scheduling, or consistency/transaction guarantees.\n- Adaptation depth: classify whether the migration is `low`, `moderate`, or `high` adaptation based on architectural mismatch, missing target-side equivalents, lifecycle/state/concurrency mismatch, and the amount of adapter or redesign work needed.\n\nIdentify which files define or materially affect those boundaries and which of them will require target-repo integration analysis.\n\nCapture:\n- `sourceArchitecture`\n- `dependencies`\n- `publicApiSurface`\n- `platformCouplingAssessment`\n- `concurrencyModel`\n- `testCoverageShape`\n- `semanticBoundaryCandidates`\n- `boundaryCriticalFiles`\n- `adaptationProfile`",
82
+ "prompt": "Read and analyze the source code through a conversion lens \u2014 what will be easy to convert, what will be hard, and why.\n\nMap out:\n- Architecture and module structure\n- Key patterns used (MVI, MVVM, dependency injection, etc.)\n- External dependencies and what they do\n- Entry points and public API surface\n- Platform coupling depth: is the code cleanly layered or is platform-specific code smeared throughout? This directly determines how much falls into easy vs. hard buckets.\n- Concurrency model: Coroutines, Combine, RxJS, async/await? This is often the single hardest mapping decision.\n- DI approach: Dagger/Hilt, Swinject, Koin? DI frameworks rarely map 1:1.\n- Test coverage shape: unit tests on business logic (convert easily), UI tests (likely rewrite), integration tests (depends on infra).\n- Shared code boundaries: is there already a shared/common module that might not need conversion at all?\n- Non-trivial migration boundaries: public APIs, externally consumed module boundaries, and lifecycle/state/concurrency/resource boundaries that callers depend on.\n- Caller-visible guarantees for those boundaries. Examples include lifecycle/ownership, laziness vs eagerness, shared vs per-consumer behavior, cancellation/disposal, ordering/replay/buffering, failure behavior, threading/scheduling, or consistency/transaction guarantees.\n- Adaptation depth: classify whether the migration is `low`, `moderate`, or `high` adaptation based on architectural mismatch, missing target-side equivalents, lifecycle/state/concurrency mismatch, and the amount of adapter or redesign work needed.\n\nIdentify which files define or materially affect those boundaries and which of them will require target-repo integration analysis.\n\nCapture:\n- `sourceArchitecture`\n- `dependencies`\n- `publicApiSurface`\n- `platformCouplingAssessment`\n- `concurrencyModel`\n- `testCoverageShape`\n- `semanticBoundaryCandidates`\n- `boundaryCriticalFiles`\n- `adaptationProfile`",
83
83
  "promptFragments": [
84
84
  {
85
85
  "id": "phase-1-small-light",
@@ -87,7 +87,7 @@
87
87
  "var": "conversionComplexity",
88
88
  "equals": "Small"
89
89
  },
90
- "text": "For Small conversions, keep this lightweight. A quick read of the files in scope is enough don't map the entire architecture. Focus on identifying any platform-specific code that would prevent a straight translation."
90
+ "text": "For Small conversions, keep this lightweight. A quick read of the files in scope is enough \u2014 don't map the entire architecture. Focus on identifying any platform-specific code that would prevent a straight translation."
91
91
  }
92
92
  ],
93
93
  "requireConfirmation": {
@@ -110,7 +110,7 @@
110
110
  }
111
111
  ]
112
112
  },
113
- "prompt": "For Small conversions, skip triage and planning just convert.\n\n- Translate the files to the target platform idiomatically\n- Follow target platform naming and structure conventions\n- Map any dependencies to target equivalents\n- Convert tests if they exist\n- Run build or typecheck to verify\n\nIf something turns out harder than expected (deep platform coupling, no clean dependency equivalent, or meaningful architectural mismatch), update `conversionComplexity` to `Medium`, update `adaptationProfile` to `moderate` or `high` based on the newly discovered mismatch, and stop. The full triage and planning pipeline will activate for the remaining work.\n\nCapture:\n- `filesConverted`\n- `buildPassed`\n- `conversionComplexity`\n- `adaptationProfile`",
113
+ "prompt": "For Small conversions, skip triage and planning \u2014 just convert.\n\n- Translate the files to the target platform idiomatically\n- Follow target platform naming and structure conventions\n- Map any dependencies to target equivalents\n- Convert tests if they exist\n- Run build or typecheck to verify\n\nIf something turns out harder than expected (deep platform coupling, no clean dependency equivalent, or meaningful architectural mismatch), update `conversionComplexity` to `Medium`, update `adaptationProfile` to `moderate` or `high` based on the newly discovered mismatch, and stop. The full triage and planning pipeline will activate for the remaining work.\n\nCapture:\n- `filesConverted`\n- `buildPassed`\n- `conversionComplexity`\n- `adaptationProfile`",
114
114
  "requireConfirmation": false
115
115
  },
116
116
  {
@@ -128,7 +128,7 @@
128
128
  }
129
129
  ]
130
130
  },
131
- "prompt": "Classify every file or module in scope into one of three buckets:\n\n**Bucket A Literal translation**: Platform-agnostic business logic, data models, utilities, pure functions. These use no platform-specific APIs or libraries. Conversion is mechanical: translate the language syntax, follow target naming conventions, done. These will be delegated to subagents.\n\n**Bucket B Library substitution**: Code that uses platform-specific libraries (networking, persistence, serialization, DI) but follows standard patterns. These need dependency mapping but the structure stays the same.\n\n**Bucket C Platform-specific**: Code deeply tied to the platform (UI layer, lifecycle management, concurrency/threading, navigation, platform APIs). These need design decisions about target-platform idioms.\n\nFor each file or module, list:\n- File/module name\n- Bucket (A, B, or C)\n- One-line reason for classification\n- Dependencies it has on other files in scope (so we know conversion order)\n- Whether it is `boundaryCritical` for a non-trivial migration boundary\n- Which semantic boundaries it affects from `semanticBoundaryCandidates`\n- Whether it will require target-repo integration analysis\n\nBoundary-critical files must not be treated as blind mechanical translation just because the syntax looks simple. If a file materially affects a semantic boundary or destination-repo seam, keep it with main-agent review.\n\nSort the work items within each bucket by dependency order (convert dependencies first).\n\nGroup Bucket A files into parallel batches of 3-5 files each. Each batch should contain files with no cross-dependencies so subagents can work independently.\n\nGroup Bucket B and C files into sequential batches by dependency order.\n\nEach batch should have: `name` (short label), `bucket` (A, B, or C), and `files` (list of file paths).\n\nCapture:\n- `bucketABatches` (parallel batches for subagent delegation)\n- `bucketBCBatches` (sequential batches for main agent)\n- `bucketACounts`\n- `bucketBCounts`\n- `bucketCCounts`\n- `boundaryCriticalItems`",
131
+ "prompt": "Classify every file or module in scope into one of three buckets:\n\n**Bucket A \u2014 Literal translation**: Platform-agnostic business logic, data models, utilities, pure functions. These use no platform-specific APIs or libraries. Conversion is mechanical: translate the language syntax, follow target naming conventions, done. These will be delegated to subagents.\n\n**Bucket B \u2014 Library substitution**: Code that uses platform-specific libraries (networking, persistence, serialization, DI) but follows standard patterns. These need dependency mapping but the structure stays the same.\n\n**Bucket C \u2014 Platform-specific**: Code deeply tied to the platform (UI layer, lifecycle management, concurrency/threading, navigation, platform APIs). These need design decisions about target-platform idioms.\n\nFor each file or module, list:\n- File/module name\n- Bucket (A, B, or C)\n- One-line reason for classification\n- Dependencies it has on other files in scope (so we know conversion order)\n- Whether it is `boundaryCritical` for a non-trivial migration boundary\n- Which semantic boundaries it affects from `semanticBoundaryCandidates`\n- Whether it will require target-repo integration analysis\n\nBoundary-critical files must not be treated as blind mechanical translation just because the syntax looks simple. If a file materially affects a semantic boundary or destination-repo seam, keep it with main-agent review.\n\nSort the work items within each bucket by dependency order (convert dependencies first).\n\nGroup Bucket A files into parallel batches of 3-5 files each. Each batch should contain files with no cross-dependencies so subagents can work independently.\n\nGroup Bucket B and C files into sequential batches by dependency order.\n\nEach batch should have: `name` (short label), `bucket` (A, B, or C), and `files` (list of file paths).\n\nCapture:\n- `bucketABatches` (parallel batches for subagent delegation)\n- `bucketBCBatches` (sequential batches for main agent)\n- `bucketACounts`\n- `bucketBCounts`\n- `bucketCCounts`\n- `boundaryCriticalItems`",
132
132
  "requireConfirmation": true
133
133
  },
134
134
  {
@@ -275,7 +275,7 @@
275
275
  "var": "conversionComplexity",
276
276
  "equals": "Medium"
277
277
  },
278
- "text": "For Medium conversions, focus the plan on the items that actually need design decisions. Don't exhaustively map every dimension only the ones relevant to the files in scope."
278
+ "text": "For Medium conversions, focus the plan on the items that actually need design decisions. Don't exhaustively map every dimension \u2014 only the ones relevant to the files in scope."
279
279
  },
280
280
  {
281
281
  "id": "phase-3f-high-adaptation",
@@ -520,7 +520,7 @@
520
520
  {
521
521
  "id": "phase-6a-full-build",
522
522
  "title": "Full Build and Integration Check",
523
- "prompt": "Run a full build or typecheck on the entire converted codebase both subagent-converted and main-agent-converted code together.\n\nCheck for:\n- Build/compile errors from cross-batch integration issues\n- Inconsistencies between subagent output and main agent output (naming, patterns)\n- Non-idiomatic patterns that slipped through\n- Missing error handling at module boundaries\n- Threading or concurrency issues across modules\n- Broken public API contracts\n- Contract inventory drift: every row in `semanticContractInventory` is still accounted for, no `uncertain` rows remain, preserved contracts still look preserved, and intentional changes are still justified\n- Target integration drift: code landed in the intended target layer/module, reuse/adaptation decisions still fit the observed target seams, and no unresolved target integration uncertainties remain\n- High-adaptation architecture drift: if `adaptationProfile` is `high`, the final code still matches `architectureAdaptationPlan` and any deviations are explicit and justified\n\nFix each issue. If a fix is a band-aid over a deeper mapping problem, go back and fix the mapping.\n\nCapture:\n- `fullBuildPassed`\n- `integrationIssues`\n- `issuesFixed`",
523
+ "prompt": "Run a full build or typecheck on the entire converted codebase \u2014 both subagent-converted and main-agent-converted code together.\n\nCheck for:\n- Build/compile errors from cross-batch integration issues\n- Inconsistencies between subagent output and main agent output (naming, patterns)\n- Non-idiomatic patterns that slipped through\n- Missing error handling at module boundaries\n- Threading or concurrency issues across modules\n- Broken public API contracts\n- Contract inventory drift: every row in `semanticContractInventory` is still accounted for, no `uncertain` rows remain, preserved contracts still look preserved, and intentional changes are still justified\n- Target integration drift: code landed in the intended target layer/module, reuse/adaptation decisions still fit the observed target seams, and no unresolved target integration uncertainties remain\n- High-adaptation architecture drift: if `adaptationProfile` is `high`, the final code still matches `architectureAdaptationPlan` and any deviations are explicit and justified\n\nFix each issue. If a fix is a band-aid over a deeper mapping problem, go back and fix the mapping.\n\nCapture:\n- `fullBuildPassed`\n- `integrationIssues`\n- `issuesFixed`",
524
524
  "requireConfirmation": false
525
525
  },
526
526
  {
@@ -545,7 +545,7 @@
545
545
  "var": "conversionComplexity",
546
546
  "equals": "Small"
547
547
  },
548
- "text": "For Small conversions, keep the summary brief just list what was converted, build status, and any issues."
548
+ "text": "For Small conversions, keep the summary brief \u2014 just list what was converted, build status, and any issues."
549
549
  },
550
550
  {
551
551
  "id": "phase-7-full-summary",
@@ -1,9 +1,9 @@
1
1
  {
2
- "id": "document-creation-workflow",
2
+ "id": "wr.document-creation",
3
3
  "name": "Document Creation Workflow",
4
4
  "version": "1.0.0",
5
5
  "metricsProfile": "coding",
6
- "description": "Use this to create broad or comprehensive documentation spanning multiple components or systems project READMEs, complete API docs, user guides, or technical specifications.",
6
+ "description": "Use this to create broad or comprehensive documentation spanning multiple components or systems \u2014 project READMEs, complete API docs, user guides, or technical specifications.",
7
7
  "about": "## Document Creation Workflow\n\nThis workflow guides you through creating new documentation from scratch -- ranging from a simple project README to a full technical specification spanning multiple systems. It automatically calibrates depth to match the complexity of your request: simple tasks go straight to writing, while complex documentation gets a full analysis-and-planning phase first.\n\n### What it produces\n\nA complete, saved documentation file ready for use. Depending on complexity, it may also include a quality review pass covering accuracy, completeness, audience fit, usability, and style consistency.\n\n### When to use it\n\n- You need to create a **new** document (not update an existing one -- see the Documentation Update workflow for that).\n- The document spans one or more systems, components, or audiences.\n- Examples: project READMEs, API reference docs, user guides, onboarding docs, technical specifications, architecture overviews.\n\n### When NOT to use it\n\n- You want to update or refresh an existing doc -- use the Documentation Update workflow instead.\n- You need tight scope discipline for a single class or mechanism -- the Scoped Documentation workflow is better suited.\n\n### How to get good results\n\n- Be specific about the document type and intended audience upfront. The workflow probes for these, but the clearer your initial goal, the less back-and-forth.\n- If your project has existing documentation or style conventions, mention them -- the workflow will follow them.\n- For complex documentation, the workflow asks a small number of targeted questions it cannot answer from the codebase. Answer these concisely to keep momentum.",
8
8
  "examples": [
9
9
  "Create a README for the payments-service repo with setup, config, and deployment instructions",
@@ -20,9 +20,9 @@
20
20
  "metaGuidance": [
21
21
  "NOTES-FIRST DURABILITY: use output.notesMarkdown as the primary durable record. Do NOT create CONTEXT.md, doc_spec.md, or content_plan.md as workflow memory.",
22
22
  "DISCOVER BEFORE ASKING: use tools to explore the project before asking clarification questions. Only ask what tools cannot answer.",
23
- "COMPLEXITY DRIVES BRANCHING: docComplexity=Simple uses the fast path; Standard/Complex uses the full path. If complexity changes during work, note it in notesMarkdown and adapt no retriage ceremony needed.",
23
+ "COMPLEXITY DRIVES BRANCHING: docComplexity=Simple uses the fast path; Standard/Complex uses the full path. If complexity changes during work, note it in notesMarkdown and adapt \u2014 no retriage ceremony needed.",
24
24
  "CONTENT-FIRST: the deliverable is the document, not planning artifacts. Keep planning proportional to scope.",
25
- "EVIDENCE-BASED QUALITY: each quality dimension in the review step requires a one-sentence evidence statement and a pass or needs-work verdict not a numeric score."
25
+ "EVIDENCE-BASED QUALITY: each quality dimension in the review step requires a one-sentence evidence statement and a pass or needs-work verdict \u2014 not a numeric score."
26
26
  ],
27
27
  "steps": [
28
28
  {
@@ -49,7 +49,7 @@
49
49
  }
50
50
  ]
51
51
  },
52
- "prompt": "Analyze the project to inform documentation strategy. Limit this analysis to 1500 words; prioritize documentation-relevant insights.\n\nCover:\n1. **Existing documentation landscape** current docs, style patterns, gaps\n2. **Project architecture** key components relevant to this document\n3. **User or developer workflows** how documentation fits into user journeys\n4. **Technical constraints** APIs, systems, integrations to document\n5. **Style conventions** terminology, formatting, naming patterns to follow\n6. **Audience** who will use this documentation and what they need to accomplish\n\nNote any complexity indicators that might warrant reclassifying `docComplexity` upward.",
52
+ "prompt": "Analyze the project to inform documentation strategy. Limit this analysis to 1500 words; prioritize documentation-relevant insights.\n\nCover:\n1. **Existing documentation landscape** \u2014 current docs, style patterns, gaps\n2. **Project architecture** \u2014 key components relevant to this document\n3. **User or developer workflows** \u2014 how documentation fits into user journeys\n4. **Technical constraints** \u2014 APIs, systems, integrations to document\n5. **Style conventions** \u2014 terminology, formatting, naming patterns to follow\n6. **Audience** \u2014 who will use this documentation and what they need to accomplish\n\nNote any complexity indicators that might warrant reclassifying `docComplexity` upward.",
53
53
  "requireConfirmation": false
54
54
  },
55
55
  {
@@ -85,7 +85,7 @@
85
85
  }
86
86
  ]
87
87
  },
88
- "prompt": "Create a content plan for this documentation in your notes.\n\nThe plan should cover:\n1. Document purpose and success criteria\n2. Target audience and their primary goals\n3. Section outline with one-line descriptions\n4. Writing strategy tone, technical depth, key terminology\n5. Visual elements or code examples needed\n\nKeep the plan proportional to scope. The goal is a clear outline to execute against, not a heavyweight specification.",
88
+ "prompt": "Create a content plan for this documentation in your notes.\n\nThe plan should cover:\n1. Document purpose and success criteria\n2. Target audience and their primary goals\n3. Section outline with one-line descriptions\n4. Writing strategy \u2014 tone, technical depth, key terminology\n5. Visual elements or code examples needed\n\nKeep the plan proportional to scope. The goal is a clear outline to execute against, not a heavyweight specification.",
89
89
  "promptFragments": [
90
90
  {
91
91
  "id": "phase-3-plan-complex",
@@ -119,7 +119,7 @@
119
119
  }
120
120
  ]
121
121
  },
122
- "prompt": "Review the documentation you just wrote using this rubric. For each dimension, provide a one-sentence evidence statement and a verdict of `pass` or `needs-work`.\n\n1. **Accuracy** Does the content correctly describe the actual project or system? *(Evidence: cite one verified fact.)*\n2. **Completeness** Does it cover all planned sections? *(Evidence: list planned vs completed sections.)*\n3. **Audience fit** Is the technical depth right for the target reader? *(Evidence: identify one audience-appropriate choice made.)*\n4. **Usability** Could a reader actually accomplish their goal using this doc? *(Evidence: trace one user journey through the doc.)*\n5. **Consistency** Does it match project conventions for style, terminology, and format? *(Evidence: cite one convention followed.)*\n\nIf any dimension is `needs-work`, fix the issue immediately and re-assert the dimension as `pass` in your notes before continuing.",
122
+ "prompt": "Review the documentation you just wrote using this rubric. For each dimension, provide a one-sentence evidence statement and a verdict of `pass` or `needs-work`.\n\n1. **Accuracy** \u2014 Does the content correctly describe the actual project or system? *(Evidence: cite one verified fact.)*\n2. **Completeness** \u2014 Does it cover all planned sections? *(Evidence: list planned vs completed sections.)*\n3. **Audience fit** \u2014 Is the technical depth right for the target reader? *(Evidence: identify one audience-appropriate choice made.)*\n4. **Usability** \u2014 Could a reader actually accomplish their goal using this doc? *(Evidence: trace one user journey through the doc.)*\n5. **Consistency** \u2014 Does it match project conventions for style, terminology, and format? *(Evidence: cite one convention followed.)*\n\nIf any dimension is `needs-work`, fix the issue immediately and re-assert the dimension as `pass` in your notes before continuing.",
123
123
  "promptFragments": [
124
124
  {
125
125
  "id": "phase-5-quality-review-complex",
@@ -127,7 +127,7 @@
127
127
  "var": "docComplexity",
128
128
  "equals": "Complex"
129
129
  },
130
- "text": "Also review a sixth dimension:\n6. **Integration coherence** Does the doc integrate correctly with the existing documentation ecosystem? *(Evidence: describe how it cross-links or relates to existing docs.)*"
130
+ "text": "Also review a sixth dimension:\n6. **Integration coherence** \u2014 Does the doc integrate correctly with the existing documentation ecosystem? *(Evidence: describe how it cross-links or relates to existing docs.)*"
131
131
  }
132
132
  ],
133
133
  "requireConfirmation": false
@@ -1,5 +1,5 @@
1
1
  {
2
- "id": "documentation-update-workflow",
2
+ "id": "wr.documentation-update",
3
3
  "name": "Documentation Update & Maintenance Workflow",
4
4
  "version": "2.0.0",
5
5
  "metricsProfile": "coding",
@@ -22,15 +22,15 @@
22
22
  "GIT-EVIDENCE-FIRST: staleness judgment must be grounded in actual git log output. Do not assert a doc is stale based on reading it alone. Run git log; record commit SHAs and messages as evidence.",
23
23
  "PRESERVATION-FIRST: keep accurate, well-written content unchanged. Only update what is demonstrably stale or incorrect. Targeted updates are better than wholesale rewrites.",
24
24
  "VERIFY AGAINST CODE: all updated technical content must be checked against current codebase state. Code examples and API references must match what is actually in the code today.",
25
- "DEGRADE AND DISCLOSE: if git history is unavailable or shallow for some paths, classify staleness as medium and note what evidence is missing. Never block proceed with what is available.",
25
+ "DEGRADE AND DISCLOSE: if git history is unavailable or shallow for some paths, classify staleness as medium and note what evidence is missing. Never block \u2014 proceed with what is available.",
26
26
  "SELF-EXECUTE: explore first with tools. Ask the user only what you genuinely cannot determine from the codebase and git history. The one real confirmation gate is the update plan before executing edits.",
27
- "LOOP DISCIPLINE: the update loop runs without per-section gates the plan was approved in phase-2. Only pause if a section requires changes beyond what the approved plan covers; note the deviation and ask."
27
+ "LOOP DISCIPLINE: the update loop runs without per-section gates \u2014 the plan was approved in phase-2. Only pause if a section requires changes beyond what the approved plan covers; note the deviation and ask."
28
28
  ],
29
29
  "steps": [
30
30
  {
31
31
  "id": "phase-0-assess",
32
32
  "title": "Phase 0: Assess Documentation & Establish Git Baseline",
33
- "prompt": "Locate the target documentation and establish an evidence-based staleness assessment before you decide anything.\n\n**Step 1: Locate and inventory target docs**\n- Identify all documentation files in scope\n- Note file formats, structure, and rough section organization\n- Infer the code paths these docs reference (scopePaths)\n\n**Step 2: Git baseline**\n- Run `git log -1 <docPath>` for each target doc to get the last commit SHA and date\n- Run `git log <lastCommitSha>..HEAD -- <scopePaths>` to get all code changes since the doc was last updated\n- For each commit, classify impact: API/breaking (new exports, changed interfaces, removed functions), behavioral (changed logic), config (schema/option changes), or minor (refactor, rename, test-only)\n\n**Step 3: Staleness classification (rubric-based)**\n\nScore these three dimensions:\n- **Impact**: any API/breaking changes? any behavioral or config changes?\n- **Volume**: how many commits changed the relevant scope since last doc update?\n- **Age**: how many days since the doc was last committed?\n\nDerive `stalenessLevel`:\n- `high`: any API/breaking impact, OR volume > 5 commits AND age > 90 days\n- `medium`: volume > 2 commits, OR age > 60 days, OR behavioral/config changes present\n- `low`: few changes, nothing impacting documented behavior, age < 60 days\n- If git history is unavailable: `medium` note what is missing\n\nDerive `updateUrgency`:\n- `high` staleness `immediate`\n- `medium` staleness `scheduled`\n- `low` staleness `monitor` if user did not request a forced update, document why and offer to exit\n\n**Capture:**\n- `targetDocPaths`, `scopePaths`\n- `gitLastDocCommitSha`, `gitLastDocCommitDate`\n- `stalenessLevel`, `updateUrgency`\n- `gitChangeSummary` prose summary of what changed and why it matters for the docs",
33
+ "prompt": "Locate the target documentation and establish an evidence-based staleness assessment before you decide anything.\n\n**Step 1: Locate and inventory target docs**\n- Identify all documentation files in scope\n- Note file formats, structure, and rough section organization\n- Infer the code paths these docs reference (scopePaths)\n\n**Step 2: Git baseline**\n- Run `git log -1 <docPath>` for each target doc to get the last commit SHA and date\n- Run `git log <lastCommitSha>..HEAD -- <scopePaths>` to get all code changes since the doc was last updated\n- For each commit, classify impact: API/breaking (new exports, changed interfaces, removed functions), behavioral (changed logic), config (schema/option changes), or minor (refactor, rename, test-only)\n\n**Step 3: Staleness classification (rubric-based)**\n\nScore these three dimensions:\n- **Impact**: any API/breaking changes? any behavioral or config changes?\n- **Volume**: how many commits changed the relevant scope since last doc update?\n- **Age**: how many days since the doc was last committed?\n\nDerive `stalenessLevel`:\n- `high`: any API/breaking impact, OR volume > 5 commits AND age > 90 days\n- `medium`: volume > 2 commits, OR age > 60 days, OR behavioral/config changes present\n- `low`: few changes, nothing impacting documented behavior, age < 60 days\n- If git history is unavailable: `medium` \u2014 note what is missing\n\nDerive `updateUrgency`:\n- `high` staleness \u2192 `immediate`\n- `medium` staleness \u2192 `scheduled`\n- `low` staleness \u2192 `monitor` \u2014 if user did not request a forced update, document why and offer to exit\n\n**Capture:**\n- `targetDocPaths`, `scopePaths`\n- `gitLastDocCommitSha`, `gitLastDocCommitDate`\n- `stalenessLevel`, `updateUrgency`\n- `gitChangeSummary` \u2014 prose summary of what changed and why it matters for the docs",
34
34
  "requireConfirmation": {
35
35
  "var": "updateUrgency",
36
36
  "equals": "monitor"
@@ -39,13 +39,13 @@
39
39
  {
40
40
  "id": "phase-1-analyze",
41
41
  "title": "Phase 1: Section-by-Section Gap Analysis",
42
- "prompt": "Now map each documentation section to current code and classify what needs to change.\n\nFor each section in the target docs:\n1. **Map to code** which files, functions, APIs, or behaviors does this section describe?\n2. **Assess accuracy** does the current code match what the section says? Check API signatures, config options, behavioral descriptions, examples, and file paths.\n3. **Classify the section action**:\n - `preserve` still accurate, well-written; keep unchanged\n - `update` needs correction or expansion; note specifically what changed\n - `remove` describes something that no longer exists\n\n4. **Assign update type** for sections marked `update`:\n - `corrective` fix inaccurate information\n - `additive` add missing coverage for new features\n - `expansive` expand thin explanations\n - `reductive` remove deprecated or removed content\n - `structural` reorganize while preserving content\n\n5. **Assign priority** for sections marked `update`:\n - `critical` inaccurate content that would cause errors or confusion for users\n - `important` missing or outdated content for significant features or workflows\n - `beneficial` improvements that add value but aren't blocking\n\n**Capture:**\n- `sectionInventory` list of all sections with: sectionId, action (preserve/update/remove), updateType, priority, and a one-line reason\n\nEvery section must be classified before moving on.",
42
+ "prompt": "Now map each documentation section to current code and classify what needs to change.\n\nFor each section in the target docs:\n1. **Map to code** \u2014 which files, functions, APIs, or behaviors does this section describe?\n2. **Assess accuracy** \u2014 does the current code match what the section says? Check API signatures, config options, behavioral descriptions, examples, and file paths.\n3. **Classify the section action**:\n - `preserve` \u2014 still accurate, well-written; keep unchanged\n - `update` \u2014 needs correction or expansion; note specifically what changed\n - `remove` \u2014 describes something that no longer exists\n\n4. **Assign update type** for sections marked `update`:\n - `corrective` \u2014 fix inaccurate information\n - `additive` \u2014 add missing coverage for new features\n - `expansive` \u2014 expand thin explanations\n - `reductive` \u2014 remove deprecated or removed content\n - `structural` \u2014 reorganize while preserving content\n\n5. **Assign priority** for sections marked `update`:\n - `critical` \u2014 inaccurate content that would cause errors or confusion for users\n - `important` \u2014 missing or outdated content for significant features or workflows\n - `beneficial` \u2014 improvements that add value but aren't blocking\n\n**Capture:**\n- `sectionInventory` \u2014 list of all sections with: sectionId, action (preserve/update/remove), updateType, priority, and a one-line reason\n\nEvery section must be classified before moving on.",
43
43
  "requireConfirmation": false
44
44
  },
45
45
  {
46
46
  "id": "phase-2-plan",
47
47
  "title": "Phase 2: Update Plan",
48
- "prompt": "Build the ordered update plan based on the section inventory, then confirm it with me before you start editing anything.\n\nFrom `sectionInventory`, create `updatePlan` as an ordered list:\n1. All `critical` updates first\n2. `important` updates next\n3. `beneficial` updates last (may defer if scope is large)\n\nFor each entry in the plan:\n- Section name and location\n- Update type (corrective / additive / expansive / reductive / structural)\n- Specific description of what to change and why\n- What content to preserve unchanged\n\nAlso note:\n- Total sections to update vs total sections to preserve\n- Any `remove` entries that need explicit deletion\n- Any sections that are `beneficial` you recommend deferring\n\n**This step requires confirmation** I need to review the plan before you make edits to the documentation files.\n\n**Capture:**\n- `updatePlan` ordered list as described above\n- `sectionsRemaining` total count of sections to update (for loop tracking)",
48
+ "prompt": "Build the ordered update plan based on the section inventory, then confirm it with me before you start editing anything.\n\nFrom `sectionInventory`, create `updatePlan` as an ordered list:\n1. All `critical` updates first\n2. `important` updates next\n3. `beneficial` updates last (may defer if scope is large)\n\nFor each entry in the plan:\n- Section name and location\n- Update type (corrective / additive / expansive / reductive / structural)\n- Specific description of what to change and why\n- What content to preserve unchanged\n\nAlso note:\n- Total sections to update vs total sections to preserve\n- Any `remove` entries that need explicit deletion\n- Any sections that are `beneficial` you recommend deferring\n\n**This step requires confirmation** \u2014 I need to review the plan before you make edits to the documentation files.\n\n**Capture:**\n- `updatePlan` \u2014 ordered list as described above\n- `sectionsRemaining` \u2014 total count of sections to update (for loop tracking)",
49
49
  "requireConfirmation": true
50
50
  },
51
51
  {
@@ -77,7 +77,7 @@
77
77
  {
78
78
  "id": "verify-section",
79
79
  "title": "Verify the Updated Section",
80
- "prompt": "Verify that the update to `currentSection` is correct before moving on.\n\nCheck:\n1. **Technical accuracy** does every technical claim match the current codebase? Check the code directly if needed.\n2. **Code examples** are all code blocks in this section syntactically valid and behaviorally correct against current APIs?\n3. **Preservation** what did you keep unchanged? Confirm the preserved content is still present and intact.\n4. **Cross-references** are any internal links pointing to or from this section still working?\n\nDecrement `sectionsRemaining` by 1.\n\nRecord findings in notes: what you changed, what you preserved, any issues found.\n\n**Capture:** updated `sectionsRemaining`",
80
+ "prompt": "Verify that the update to `currentSection` is correct before moving on.\n\nCheck:\n1. **Technical accuracy** \u2014 does every technical claim match the current codebase? Check the code directly if needed.\n2. **Code examples** \u2014 are all code blocks in this section syntactically valid and behaviorally correct against current APIs?\n3. **Preservation** \u2014 what did you keep unchanged? Confirm the preserved content is still present and intact.\n4. **Cross-references** \u2014 are any internal links pointing to or from this section still working?\n\nDecrement `sectionsRemaining` by 1.\n\nRecord findings in notes: what you changed, what you preserved, any issues found.\n\n**Capture:** updated `sectionsRemaining`",
81
81
  "requireConfirmation": false
82
82
  },
83
83
  {
@@ -95,7 +95,7 @@
95
95
  {
96
96
  "id": "phase-4-validate",
97
97
  "title": "Phase 4: End-to-End Validation",
98
- "prompt": "Read through all updated documentation as a fresh reader and validate it as a whole.\n\n1. **End-to-end consistency** read the docs in order. Is terminology consistent? Does the logical flow make sense? Do sections refer to each other correctly?\n\n2. **Technical accuracy pass** for any section you feel uncertain about, verify it against current code now. If verification reveals a remaining inaccuracy, note it explicitly: what is wrong, where it is, and what the correct information should be. Do not silently pass a section you are unsure about.\n\n3. **User journey test** walk through the key documented workflows from start to finish using only the documentation. Do setup instructions work? Are the most important use cases covered correctly?\n\n4. **Navigation and structure** are all cross-references working? Is the table of contents (if present) accurate? Can a user find what they need?\n\n5. **Completeness check** look back at the original gap analysis. Were all critical and important updates completed? Note explicitly if any were deferred.\n\nDocument what you found: any remaining issues, any sections that still need work, and your overall assessment of the documentation quality after the update.",
98
+ "prompt": "Read through all updated documentation as a fresh reader and validate it as a whole.\n\n1. **End-to-end consistency** \u2014 read the docs in order. Is terminology consistent? Does the logical flow make sense? Do sections refer to each other correctly?\n\n2. **Technical accuracy pass** \u2014 for any section you feel uncertain about, verify it against current code now. If verification reveals a remaining inaccuracy, note it explicitly: what is wrong, where it is, and what the correct information should be. Do not silently pass a section you are unsure about.\n\n3. **User journey test** \u2014 walk through the key documented workflows from start to finish using only the documentation. Do setup instructions work? Are the most important use cases covered correctly?\n\n4. **Navigation and structure** \u2014 are all cross-references working? Is the table of contents (if present) accurate? Can a user find what they need?\n\n5. **Completeness check** \u2014 look back at the original gap analysis. Were all critical and important updates completed? Note explicitly if any were deferred.\n\nDocument what you found: any remaining issues, any sections that still need work, and your overall assessment of the documentation quality after the update.",
99
99
  "requireConfirmation": false
100
100
  },
101
101
  {
@@ -1,5 +1,5 @@
1
1
  {
2
- "id": "intelligent-test-case-generation",
2
+ "id": "wr.intelligent-test-case-generation",
3
3
  "name": "Test Case Generation from Tickets",
4
4
  "version": "1.0.0",
5
5
  "metricsProfile": "research",
@@ -39,7 +39,7 @@
39
39
  "var": "ambiguities",
40
40
  "not_equals": []
41
41
  },
42
- "prompt": "Before generating test scenarios, resolve the ambiguities you found.\n\nFor each ambiguity in `ambiguities`:\n1. State what is unclear and why it matters for test design\n2. Propose the most reasonable interpretation based on context\n3. Ask me to confirm, adjust, or provide the missing information\n\nKeep questions targeted. If the ticket, codebase, or docs can answer the question, answer it yourself first.\n\nIf the user's response significantly changes the scope or adds new acceptance criteria, revisit Phase 1 scenario identification before continuing do not carry stale scenarios forward.\n\nCapture:\n- `resolvedAmbiguities` -- list of ambiguities with chosen interpretation\n- `openAmbiguities` -- ambiguities the user still needs to resolve (initialize as empty)",
42
+ "prompt": "Before generating test scenarios, resolve the ambiguities you found.\n\nFor each ambiguity in `ambiguities`:\n1. State what is unclear and why it matters for test design\n2. Propose the most reasonable interpretation based on context\n3. Ask me to confirm, adjust, or provide the missing information\n\nKeep questions targeted. If the ticket, codebase, or docs can answer the question, answer it yourself first.\n\nIf the user's response significantly changes the scope or adds new acceptance criteria, revisit Phase 1 scenario identification before continuing \u2014 do not carry stale scenarios forward.\n\nCapture:\n- `resolvedAmbiguities` -- list of ambiguities with chosen interpretation\n- `openAmbiguities` -- ambiguities the user still needs to resolve (initialize as empty)",
43
43
  "requireConfirmation": true
44
44
  },
45
45
  {
@@ -1,5 +1,5 @@
1
1
  {
2
- "id": "personal-learning-course-design",
2
+ "id": "wr.personal-learning-course-design",
3
3
  "name": "Personal Learning Course Design Workflow",
4
4
  "version": "1.0.0",
5
5
  "metricsProfile": "none",
@@ -37,7 +37,7 @@
37
37
  {
38
38
  "id": "select-design-path",
39
39
  "title": "Choose Your Learning Design Path",
40
- "prompt": "Based on your time constraints and learning design experience, select the approach that best fits your needs:\n\n**🚀 QUICK START PATH (3-5 days)**\n- **Best for**: First-time course designers, tight timelines, simple learning goals\n- **What you get**: Essential structure with clear objectives, basic assessment, and simple schedule\n- **Time investment**: 3-5 days to complete design process\n- **Result**: Functional learning plan that covers the basics effectively\n\n**⚖️ BALANCED PATH (1-2 weeks)**\n- **Best for**: Some learning design experience, moderate complexity goals, want good system without overwhelm\n- **What you get**: Solid instructional design plus engagement features, assessment strategy, and progress tracking\n- **Time investment**: 1-2 weeks to complete design process\n- **Result**: Comprehensive learning system with proven pedagogical principles\n\n**🎓 COMPREHENSIVE PATH (2-3 weeks)**\n- **Best for**: Complex learning goals, want professional-grade system, experienced with instructional design\n- **What you get**: Full pedagogical depth with spaced repetition, detailed accountability, and advanced monitoring\n- **Time investment**: 2-3 weeks to complete design process\n- **Result**: Professional-grade learning system with all advanced features\n\n**Please select your path:**\n- Type 'quick' for Quick Start Path (3-5 days)\n- Type 'balanced' for Balanced Path (1-2 weeks)\n- Type 'comprehensive' for Comprehensive Path (2-3 weeks)\n\nYour choice will customize the remaining steps to match your needs and time constraints.",
40
+ "prompt": "Based on your time constraints and learning design experience, select the approach that best fits your needs:\n\n**\ud83d\ude80 QUICK START PATH (3-5 days)**\n- **Best for**: First-time course designers, tight timelines, simple learning goals\n- **What you get**: Essential structure with clear objectives, basic assessment, and simple schedule\n- **Time investment**: 3-5 days to complete design process\n- **Result**: Functional learning plan that covers the basics effectively\n\n**\u2696\ufe0f BALANCED PATH (1-2 weeks)**\n- **Best for**: Some learning design experience, moderate complexity goals, want good system without overwhelm\n- **What you get**: Solid instructional design plus engagement features, assessment strategy, and progress tracking\n- **Time investment**: 1-2 weeks to complete design process\n- **Result**: Comprehensive learning system with proven pedagogical principles\n\n**\ud83c\udf93 COMPREHENSIVE PATH (2-3 weeks)**\n- **Best for**: Complex learning goals, want professional-grade system, experienced with instructional design\n- **What you get**: Full pedagogical depth with spaced repetition, detailed accountability, and advanced monitoring\n- **Time investment**: 2-3 weeks to complete design process\n- **Result**: Professional-grade learning system with all advanced features\n\n**Please select your path:**\n- Type 'quick' for Quick Start Path (3-5 days)\n- Type 'balanced' for Balanced Path (1-2 weeks)\n- Type 'comprehensive' for Comprehensive Path (2-3 weeks)\n\nYour choice will customize the remaining steps to match your needs and time constraints.",
41
41
  "agentRole": "You are a learning design consultant who helps users choose the right level of complexity for their learning design process. Guide them to select a path that matches their experience, time constraints, and learning goals without overwhelming them.",
42
42
  "guidance": [
43
43
  "Help users honestly assess their time availability and design experience",
@@ -1,6 +1,6 @@
1
1
  {
2
- "id": "mr-review-workflow-agentic",
3
- "name": "MR Review Workflow (Lean v2 Notes-First Evidence-Driven Reviewer Families)",
2
+ "id": "wr.mr-review",
3
+ "name": "MR Review Workflow (Lean v2 \u2022 Notes-First \u2022 Evidence-Driven Reviewer Families)",
4
4
  "version": "2.6.0",
5
5
  "description": "Lean v2 MR review workflow. Merges intake, missing-input gating, context gathering, and re-triage into one structured front phase, then drives review through a shared fact packet, parallel reviewer families, contradiction-driven synthesis, and evidence-first final validation.",
6
6
  "about": "## MR Review Workflow\n\nThis workflow conducts a structured, evidence-driven code review of a merge request or pull request. It is designed for cases where you want a thorough, audit-quality review rather than a quick glance -- particularly when the change touches critical surfaces, spans many files, or carries real production risk.\n\n**What it does:**\nThe workflow locates and bounds the review target, enriches it with PR context and ticket intent, classifies the change by risk and shape, then runs parallel \"reviewer family\" agents (covering correctness, architecture, runtime risk, tests/docs, and more) from a shared neutral fact packet. It reconciles contradictions between reviewer families, stress-tests the recommendation with adversarial validators, and produces a final handoff with severity-classified findings and ready-to-post MR comments.\n\n**When to use it:**\n- Before merging a PR that touches auth, data models, APIs, or critical paths\n- When you want independent perspectives on a change without the noise of an unstructured review\n- When the change is large or the reviewer is unfamiliar with the surrounding code\n- When you need a reproducible audit trail for compliance or team review processes\n\n**What it produces:**\nA final review recommendation (approve / request changes / needs discussion) with a confidence band, severity-graded findings (Critical / Major / Minor / Nit), ready-to-post MR comments, a coverage ledger showing which review domains were checked, and an honest disclosure of any context that could not be recovered.\n\n**How to get good results:**\nProvide the PR URL, branch name, or diff. The workflow can recover most context on its own -- ticket links, repo patterns, policy docs -- but if the change has non-obvious intent, a one-sentence description of the goal helps calibrate review sensitivity. The workflow will not post comments or approve/reject without explicit instruction.",
@@ -86,7 +86,7 @@
86
86
  {
87
87
  "id": "phase-0-understand-and-classify",
88
88
  "title": "Phase 0: Locate, Bound, Enrich & Classify",
89
- "prompt": "Build the review foundation in one pass.\n\nStep 1 Early exit / minimum inputs:\nBefore exploring, verify that the review target is real and inspectable. If the diff, changed files, or equivalent review material are completely absent and cannot be inferred with tools, ask for the minimum missing artifact and stop. Do NOT ask questions you can resolve with tools.\n\nStep 2 Locate and bound the review target:\nAttempt to determine the strongest available review target and boundary.\n\nAttempt to establish:\n- `reviewTargetKind` from the strongest available source such as PR/MR, branch, patch, diff, or local working tree changes\n- `reviewTargetSource` describing where the target came from\n- likely PR/MR identity when available (`prUrl`, `prNumber`)\n- likely base / ancestor reference (`baseCandidate`, `mergeBaseRef`) when available\n- whether the branch may include inherited or out-of-scope changes\n- `boundaryConfidence`: High / Medium / Low\n\nDo not over-prescribe your own investigation path. Use the strongest available evidence and record uncertainty honestly.\n\nStep 3 Enrich with context:\nRecover the strongest available intent and policy context from whatever sources are actually available.\n\nAttempt to recover:\n- MR title and purpose\n- ticket / issue / acceptance context (`ticketRefs`, `ticketContext`)\n- supporting docs / specs / rollout context (`supportingDocsFound`)\n- repo or user policy/convention context when it is likely to affect review judgment (`policySourcesFound`)\n- `contextConfidence`: High / Medium / Low\n\nStep 4 Review-surface hygiene:\nClassify the visible change into a minimal review surface.\n\nSet:\n- `coreReviewSurface`\n- `likelyNoiseOrMechanicalChurn`\n- `likelyInheritedOrOutOfScopeChanges`\n- `reviewSurfaceSummary`\n- `reviewScopeWarnings`\n\nThe goal is not a giant ledger. The goal is to avoid treating every visible changed file as equally worthy of deep review by default.\n\nStep 5 Classify the review:\nAfter exploration, classify the work.\n\nSet:\n- `reviewMode`: QUICK / STANDARD / THOROUGH\n- `riskLevel`: Low / Medium / High\n- `shapeProfile`: choose the best primary label from `isolated_change`, `crosscutting_change`, `mechanically_noisy_change`, or `ambiguous_boundary`\n- `changeTypeProfile`: choose the best primary label from `general_code_change`, `api_contract_change`, `data_model_or_migration`, `security_sensitive`, or `test_only`\n- `maxParallelism`: 0 / 3 / 5\n- `criticalSurfaceTouched`: true / false\n- `needsSimulation`: true / false\n- `needsBoundaryFollowup`: true / false\n- `needsContextFollowup`: true / false\n- `needsReviewerBundle`: true / false\n\nDecision guidance:\n- QUICK: very small, isolated, low-risk changes with little ambiguity\n- STANDARD: typical feature or bug-fix reviews with moderate ambiguity or moderate risk\n- THOROUGH: critical surfaces, architectural novelty, high risk, broad change sets, or strong need for independent reviewer perspectives\n\nMinimal routing guidance:\n- if `boundaryConfidence = Low`, bias toward boundary/context follow-up before strong recommendation confidence\n- if `changeTypeProfile = api_contract_change`, bias toward contract/consumer/backward-compatibility scrutiny\n- if `changeTypeProfile = data_model_or_migration`, bias toward rollout / compatibility / simulation scrutiny\n- if `changeTypeProfile = security_sensitive`, bias toward adversarial/runtime-risk scrutiny and lower tolerance for weak evidence\n- if `changeTypeProfile = test_only`, bias toward stronger false-positive suppression\n- if `shapeProfile = mechanically_noisy_change`, bias toward stronger noise filtering and lower appetite for style-only findings\n\nStep 6 Optional deeper context:\nIf `reviewMode` is STANDARD or THOROUGH and context remains incomplete, and delegation is available, spawn TWO WorkRail Executors SIMULTANEOUSLY running `routine-context-gathering` with focus=COMPLETENESS and focus=DEPTH. Synthesize both outputs before finishing this step.\n\nStep 7 Human-facing artifact:\nChoose `reviewDocPath` only if a live artifact will materially improve human readability. Default suggestion: `mr-review.md` at the project root. This artifact is optional and never canonical workflow state.\n\nFallback behavior:\n- if PR/MR is not found but a branch/diff is inspectable, continue with downgraded context confidence and disclose missing PR context later\n- if the branch is inspectable but merge-base / ancestor remains ambiguous, continue with downgraded boundary confidence, set `needsBoundaryFollowup = true`, and disclose the uncertainty later\n- if ticket or supporting docs are missing, continue with downgraded context confidence and avoid overclaiming intent-sensitive findings\n- if only a patch/diff is available, continue if it is inspectable, but keep lower confidence on intent/boundary-dependent conclusions\n- if the review target itself is missing, ask only for that missing artifact and stop\n\nSet these keys in the next `continue_workflow` call's `context` object:\n- `reviewTargetKind`\n- `reviewTargetSource`\n- `prUrl`\n- `prNumber`\n- `baseCandidate`\n- `mergeBaseRef`\n- `boundaryConfidence`\n- `contextConfidence`\n- `mrTitle`\n- `mrPurpose`\n- `ticketRefs`\n- `ticketContext`\n- `supportingDocsFound`\n- `policySourcesFound`\n- `accessibleContextSources`\n- `missingContextSources`\n- `focusAreas`\n- `changedFileCount`\n- `criticalSurfaceTouched`\n- `reviewMode`\n- `riskLevel`\n- `shapeProfile`\n- `changeTypeProfile`\n- `maxParallelism`\n- `reviewDocPath`\n- `contextSummary`\n- `candidateFiles`\n- `moduleRoots`\n- `contextUnknownCount`\n- `coverageGapCount`\n- `authorIntentUnclear`\n- `needsSimulation`\n- `needsBoundaryFollowup`\n- `needsContextFollowup`\n- `needsReviewerBundle`\n- `coreReviewSurface`\n- `likelyNoiseOrMechanicalChurn`\n- `likelyInheritedOrOutOfScopeChanges`\n- `reviewSurfaceSummary`\n- `reviewScopeWarnings`\n- `openQuestions`\n\nRules:\n- answer your own questions with tools whenever possible\n- only keep true human-decision questions in `openQuestions`\n- keep `openQuestions` bounded to the minimum necessary\n- classify AFTER exploring, not before\n- before leaving this phase, either establish the likely review boundary or explicitly record why you could not\n\nAlso set in the context object: one sentence describing what you are trying to accomplish (e.g. \"implement OAuth refresh token rotation\", \"review PR #47 before merge\"). This populates the session title in the Workspace console immediately.",
89
+ "prompt": "Build the review foundation in one pass.\n\nStep 1 \u2014 Early exit / minimum inputs:\nBefore exploring, verify that the review target is real and inspectable. If the diff, changed files, or equivalent review material are completely absent and cannot be inferred with tools, ask for the minimum missing artifact and stop. Do NOT ask questions you can resolve with tools.\n\nStep 2 \u2014 Locate and bound the review target:\nAttempt to determine the strongest available review target and boundary.\n\nAttempt to establish:\n- `reviewTargetKind` from the strongest available source such as PR/MR, branch, patch, diff, or local working tree changes\n- `reviewTargetSource` describing where the target came from\n- likely PR/MR identity when available (`prUrl`, `prNumber`)\n- likely base / ancestor reference (`baseCandidate`, `mergeBaseRef`) when available\n- whether the branch may include inherited or out-of-scope changes\n- `boundaryConfidence`: High / Medium / Low\n\nDo not over-prescribe your own investigation path. Use the strongest available evidence and record uncertainty honestly.\n\nStep 3 \u2014 Enrich with context:\nRecover the strongest available intent and policy context from whatever sources are actually available.\n\nAttempt to recover:\n- MR title and purpose\n- ticket / issue / acceptance context (`ticketRefs`, `ticketContext`)\n- supporting docs / specs / rollout context (`supportingDocsFound`)\n- repo or user policy/convention context when it is likely to affect review judgment (`policySourcesFound`)\n- `contextConfidence`: High / Medium / Low\n\nStep 4 \u2014 Review-surface hygiene:\nClassify the visible change into a minimal review surface.\n\nSet:\n- `coreReviewSurface`\n- `likelyNoiseOrMechanicalChurn`\n- `likelyInheritedOrOutOfScopeChanges`\n- `reviewSurfaceSummary`\n- `reviewScopeWarnings`\n\nThe goal is not a giant ledger. The goal is to avoid treating every visible changed file as equally worthy of deep review by default.\n\nStep 5 \u2014 Classify the review:\nAfter exploration, classify the work.\n\nSet:\n- `reviewMode`: QUICK / STANDARD / THOROUGH\n- `riskLevel`: Low / Medium / High\n- `shapeProfile`: choose the best primary label from `isolated_change`, `crosscutting_change`, `mechanically_noisy_change`, or `ambiguous_boundary`\n- `changeTypeProfile`: choose the best primary label from `general_code_change`, `api_contract_change`, `data_model_or_migration`, `security_sensitive`, or `test_only`\n- `maxParallelism`: 0 / 3 / 5\n- `criticalSurfaceTouched`: true / false\n- `needsSimulation`: true / false\n- `needsBoundaryFollowup`: true / false\n- `needsContextFollowup`: true / false\n- `needsReviewerBundle`: true / false\n\nDecision guidance:\n- QUICK: very small, isolated, low-risk changes with little ambiguity\n- STANDARD: typical feature or bug-fix reviews with moderate ambiguity or moderate risk\n- THOROUGH: critical surfaces, architectural novelty, high risk, broad change sets, or strong need for independent reviewer perspectives\n\nMinimal routing guidance:\n- if `boundaryConfidence = Low`, bias toward boundary/context follow-up before strong recommendation confidence\n- if `changeTypeProfile = api_contract_change`, bias toward contract/consumer/backward-compatibility scrutiny\n- if `changeTypeProfile = data_model_or_migration`, bias toward rollout / compatibility / simulation scrutiny\n- if `changeTypeProfile = security_sensitive`, bias toward adversarial/runtime-risk scrutiny and lower tolerance for weak evidence\n- if `changeTypeProfile = test_only`, bias toward stronger false-positive suppression\n- if `shapeProfile = mechanically_noisy_change`, bias toward stronger noise filtering and lower appetite for style-only findings\n\nStep 6 \u2014 Optional deeper context:\nIf `reviewMode` is STANDARD or THOROUGH and context remains incomplete, and delegation is available, spawn TWO WorkRail Executors SIMULTANEOUSLY running `routine-context-gathering` with focus=COMPLETENESS and focus=DEPTH. Synthesize both outputs before finishing this step.\n\nStep 7 \u2014 Human-facing artifact:\nChoose `reviewDocPath` only if a live artifact will materially improve human readability. Default suggestion: `mr-review.md` at the project root. This artifact is optional and never canonical workflow state.\n\nFallback behavior:\n- if PR/MR is not found but a branch/diff is inspectable, continue with downgraded context confidence and disclose missing PR context later\n- if the branch is inspectable but merge-base / ancestor remains ambiguous, continue with downgraded boundary confidence, set `needsBoundaryFollowup = true`, and disclose the uncertainty later\n- if ticket or supporting docs are missing, continue with downgraded context confidence and avoid overclaiming intent-sensitive findings\n- if only a patch/diff is available, continue if it is inspectable, but keep lower confidence on intent/boundary-dependent conclusions\n- if the review target itself is missing, ask only for that missing artifact and stop\n\nSet these keys in the next `continue_workflow` call's `context` object:\n- `reviewTargetKind`\n- `reviewTargetSource`\n- `prUrl`\n- `prNumber`\n- `baseCandidate`\n- `mergeBaseRef`\n- `boundaryConfidence`\n- `contextConfidence`\n- `mrTitle`\n- `mrPurpose`\n- `ticketRefs`\n- `ticketContext`\n- `supportingDocsFound`\n- `policySourcesFound`\n- `accessibleContextSources`\n- `missingContextSources`\n- `focusAreas`\n- `changedFileCount`\n- `criticalSurfaceTouched`\n- `reviewMode`\n- `riskLevel`\n- `shapeProfile`\n- `changeTypeProfile`\n- `maxParallelism`\n- `reviewDocPath`\n- `contextSummary`\n- `candidateFiles`\n- `moduleRoots`\n- `contextUnknownCount`\n- `coverageGapCount`\n- `authorIntentUnclear`\n- `needsSimulation`\n- `needsBoundaryFollowup`\n- `needsContextFollowup`\n- `needsReviewerBundle`\n- `coreReviewSurface`\n- `likelyNoiseOrMechanicalChurn`\n- `likelyInheritedOrOutOfScopeChanges`\n- `reviewSurfaceSummary`\n- `reviewScopeWarnings`\n- `openQuestions`\n\nRules:\n- answer your own questions with tools whenever possible\n- only keep true human-decision questions in `openQuestions`\n- keep `openQuestions` bounded to the minimum necessary\n- classify AFTER exploring, not before\n- before leaving this phase, either establish the likely review boundary or explicitly record why you could not\n\nAlso set in the context object: one sentence describing what you are trying to accomplish (e.g. \"implement OAuth refresh token rotation\", \"review PR #47 before merge\"). This populates the session title in the Workspace console immediately.",
90
90
  "requireConfirmation": {
91
91
  "or": [
92
92
  {
@@ -232,7 +232,7 @@
232
232
  {
233
233
  "id": "phase-4b-canonical-synthesis",
234
234
  "title": "Canonical Synthesis and Coverage Update",
235
- "prompt": "Synthesize all reviewer-family outputs and targeted follow-up into one canonical review state.\n\nPart A Compare against your hypothesis:\n- revisit `recommendationHypothesis`\n- what did the evidence confirm?\n- what did it challenge?\n- what changed your mind, what held firm, and what do you explicitly reject?\n\nPart B Synthesis decision table:\n- if 2+ reviewer families flag the same serious issue with the same severity, treat it as validated\n- if the same issue is flagged with different severities, default to the higher severity unless the lower-severity position includes specific counter-evidence\n- if one family flags an issue and others are silent, investigate it but do not automatically block unless it is clearly critical or security-sensitive\n- if one family says false positive and another says valid issue, require explicit main-agent adjudication in notes before finalization\n- if recommendation spread shows material disagreement, findings override recommendation until reconciled\n- if simulation reveals a new production risk, add a new finding and re-evaluate recommendation confidence\n\nPart C Coverage ledger rules:\n- move a domain from `uncertain` to `checked` only when evidence is materially adequate\n- keep a domain `uncertain` if disagreement or missing evidence still materially affects recommendation quality\n- mark `not_applicable` only when the MR genuinely does not engage that dimension\n- clear `contradicted` only when the contradiction is explicitly resolved by evidence or adjudication\n- clear `needs_followup` only when required follow-up has actually been completed or the domain is explicitly downgraded as non-material\n\nPart D Recommendation confidence rules:\n- set `recommendationConfidenceBand = High` only if no unresolved material contradictions remain, no important coverage domains remain uncertain, false-positive risk is not material, and the evidence is strong enough for the current mode\n- set `recommendationConfidenceBand = Medium` when one bounded uncertainty remains but the recommendation is still directionally justified\n- set `recommendationConfidenceBand = Low` when multiple viable interpretations remain, major contradictions are unresolved, or important coverage gaps still weaken the recommendation\n\nSet these keys in the next `continue_workflow` call's `context` object:\n- `reviewFindings`\n- `criticalFindingsCount`\n- `majorFindingsCount`\n- `minorFindingsCount`\n- `nitFindingsCount`\n- `recommendation`\n- `recommendationConfidenceBand`\n- `recommendationDriftDetected`\n- `coverageLedger`\n- `coverageUncertainCount`\n- `docCompletenessConcernCount`\n\nIf `reviewDocPath` exists, keep it aligned for human readability only. Notes/context remain workflow truth.",
235
+ "prompt": "Synthesize all reviewer-family outputs and targeted follow-up into one canonical review state.\n\nPart A \u2014 Compare against your hypothesis:\n- revisit `recommendationHypothesis`\n- what did the evidence confirm?\n- what did it challenge?\n- what changed your mind, what held firm, and what do you explicitly reject?\n\nPart B \u2014 Synthesis decision table:\n- if 2+ reviewer families flag the same serious issue with the same severity, treat it as validated\n- if the same issue is flagged with different severities, default to the higher severity unless the lower-severity position includes specific counter-evidence\n- if one family flags an issue and others are silent, investigate it but do not automatically block unless it is clearly critical or security-sensitive\n- if one family says false positive and another says valid issue, require explicit main-agent adjudication in notes before finalization\n- if recommendation spread shows material disagreement, findings override recommendation until reconciled\n- if simulation reveals a new production risk, add a new finding and re-evaluate recommendation confidence\n\nPart C \u2014 Coverage ledger rules:\n- move a domain from `uncertain` to `checked` only when evidence is materially adequate\n- keep a domain `uncertain` if disagreement or missing evidence still materially affects recommendation quality\n- mark `not_applicable` only when the MR genuinely does not engage that dimension\n- clear `contradicted` only when the contradiction is explicitly resolved by evidence or adjudication\n- clear `needs_followup` only when required follow-up has actually been completed or the domain is explicitly downgraded as non-material\n\nPart D \u2014 Recommendation confidence rules:\n- set `recommendationConfidenceBand = High` only if no unresolved material contradictions remain, no important coverage domains remain uncertain, false-positive risk is not material, and the evidence is strong enough for the current mode\n- set `recommendationConfidenceBand = Medium` when one bounded uncertainty remains but the recommendation is still directionally justified\n- set `recommendationConfidenceBand = Low` when multiple viable interpretations remain, major contradictions are unresolved, or important coverage gaps still weaken the recommendation\n\nSet these keys in the next `continue_workflow` call's `context` object:\n- `reviewFindings`\n- `criticalFindingsCount`\n- `majorFindingsCount`\n- `minorFindingsCount`\n- `nitFindingsCount`\n- `recommendation`\n- `recommendationConfidenceBand`\n- `recommendationDriftDetected`\n- `coverageLedger`\n- `coverageUncertainCount`\n- `docCompletenessConcernCount`\n\nIf `reviewDocPath` exists, keep it aligned for human readability only. Notes/context remain workflow truth.",
236
236
  "requireConfirmation": false
237
237
  },
238
238
  {
@@ -1,9 +1,9 @@
1
1
  {
2
- "id": "personal-learning-materials-creation-branched",
2
+ "id": "wr.personal-learning-materials",
3
3
  "name": "Personal Learning Materials Creation Workflow",
4
4
  "version": "1.1.0",
5
5
  "metricsProfile": "none",
6
- "description": "Use this to create learning materials for a course or subject. Adapts depth and format to your time budget Quick Start, Balanced, or Comprehensive.",
6
+ "description": "Use this to create learning materials for a course or subject. Adapts depth and format to your time budget \u2014 Quick Start, Balanced, or Comprehensive.",
7
7
  "about": "## Personal Learning Materials Creation Workflow\n\nUse this to create the actual study materials for a course or subject you are learning -- study guides, exercises, assessments, and spaced-repetition review materials. This workflow assumes you already have a learning plan or course design with defined objectives; it focuses on producing materials that directly support those objectives.\n\n### What it produces\n\nDepending on the path you choose:\n\n- **Quick Start (2-3 weeks)**: study guides and basic exercises for immediate use.\n- **Balanced (4-6 weeks)**: a complete learning system -- study guides, exercises, assessments, and spaced repetition materials.\n- **Comprehensive (8-12 weeks)**: a full learning ecosystem with interactive elements, effectiveness measurement, and a scalable update protocol.\n\n### When to use it\n\n- You have a learning plan and need to turn it into usable materials.\n- You are preparing for a certification, exam, or structured self-study program.\n- You want materials tailored to your specific objectives rather than relying entirely on off-the-shelf resources.\n\n### When NOT to use it\n\n- You haven't designed your learning course yet -- use the Personal Learning Course Design workflow first to define objectives and structure.\n- You need to design a course for others to take -- use the Learner-Centered Course workflow instead.\n\n### How to get good results\n\n- Select the path honestly based on available time. Starting with Quick Start and expanding later is better than committing to Comprehensive and abandoning it.\n- Have your learning objectives written out before starting -- the workflow maps every material directly to an objective.\n- Be specific about your preferred learning formats (text, diagrams, flashcards, practice problems) at the start.",
8
8
  "examples": [
9
9
  "Create study guides and exercises for my AWS Solutions Architect certification prep",
@@ -33,7 +33,7 @@
33
33
  {
34
34
  "id": "phase-0-select-thoroughness-path",
35
35
  "title": "Phase 0: Select Your Materials Creation Path",
36
- "prompt": "Choose your learning materials creation approach based on your time, goals, and quality needs:\n\n📚 **Quick Start Path**\n Timeline: 2-3 weeks (5-8 hours total)\n Materials: Study guides + basic exercises\n Best for: Time-constrained learners, getting started quickly\n Outcome: Functional materials for immediate use\n\n🎯 **Balanced Path**\n Timeline: 4-6 weeks (12-20 hours total)\n Materials: Study guides + exercises + assessments + spaced repetition\n Best for: Comprehensive learning support, professional quality\n Outcome: Complete learning system with proven effectiveness\n\n🏆 **Comprehensive Path**\n Timeline: 8-12 weeks (25-40 hours total)\n Materials: All types + interactive elements + full testing\n Best for: Professional educators, enterprise-grade projects\n Outcome: Optimized learning ecosystem with maximum effectiveness\n\nWhich path best matches your timeline and quality goals?",
36
+ "prompt": "Choose your learning materials creation approach based on your time, goals, and quality needs:\n\n\ud83d\udcda **Quick Start Path**\n\u2022 Timeline: 2-3 weeks (5-8 hours total)\n\u2022 Materials: Study guides + basic exercises\n\u2022 Best for: Time-constrained learners, getting started quickly\n\u2022 Outcome: Functional materials for immediate use\n\n\ud83c\udfaf **Balanced Path**\n\u2022 Timeline: 4-6 weeks (12-20 hours total)\n\u2022 Materials: Study guides + exercises + assessments + spaced repetition\n\u2022 Best for: Comprehensive learning support, professional quality\n\u2022 Outcome: Complete learning system with proven effectiveness\n\n\ud83c\udfc6 **Comprehensive Path**\n\u2022 Timeline: 8-12 weeks (25-40 hours total)\n\u2022 Materials: All types + interactive elements + full testing\n\u2022 Best for: Professional educators, enterprise-grade projects\n\u2022 Outcome: Optimized learning ecosystem with maximum effectiveness\n\nWhich path best matches your timeline and quality goals?",
37
37
  "agentRole": "You are a learning materials consultant specializing in helping users choose the right approach for their constraints and goals. Guide users toward the path that best fits their needs. Set the thoroughnessLevel context variable based on their selection.",
38
38
  "guidance": [
39
39
  "Help users make realistic choices based on their actual time availability",
@@ -49,7 +49,7 @@
49
49
  "equals": "Quick"
50
50
  },
51
51
  "title": "Phase 1: Essential Learning Plan Analysis (Quick Start)",
52
- "prompt": "Extract the core elements from your learning plan for rapid materials creation:\n\n**STEP 1: Core Objectives**\n Identify your 3-5 most important learning objectives\n Note success criteria for each objective\n Skip complex prerequisite analysis\n\n**STEP 2: Essential Materials Map**\n For each objective, identify if you need: study guide, basic exercises, or both\n Focus on immediate learning needs, not comprehensive coverage\n Note existing resources that can supplement your materials\n\n**STEP 3: Quick Resource Assessment**\n List available source materials (books, courses, notes)\n Identify 2-3 key resources for each objective\n Note time constraints and creation priorities\n\nGoal: Practical roadmap for essential materials creation in minimal time.",
52
+ "prompt": "Extract the core elements from your learning plan for rapid materials creation:\n\n**STEP 1: Core Objectives**\n\u2022 Identify your 3-5 most important learning objectives\n\u2022 Note success criteria for each objective\n\u2022 Skip complex prerequisite analysis\n\n**STEP 2: Essential Materials Map**\n\u2022 For each objective, identify if you need: study guide, basic exercises, or both\n\u2022 Focus on immediate learning needs, not comprehensive coverage\n\u2022 Note existing resources that can supplement your materials\n\n**STEP 3: Quick Resource Assessment**\n\u2022 List available source materials (books, courses, notes)\n\u2022 Identify 2-3 key resources for each objective\n\u2022 Note time constraints and creation priorities\n\nGoal: Practical roadmap for essential materials creation in minimal time.",
53
53
  "agentRole": "You are an efficient learning analyst focused on rapid materials development. Help users identify core needs quickly without over-analysis. Emphasize practical, immediately actionable insights.",
54
54
  "guidance": [
55
55
  "Keep analysis focused and action-oriented",
@@ -83,7 +83,7 @@
83
83
  "equals": "Balanced"
84
84
  },
85
85
  "title": "Phase 1: Comprehensive Learning Plan Analysis (Balanced)",
86
- "prompt": "Analyze your learning plan to guide professional-quality materials creation:\n\n**STEP 1: Objective Architecture**\n Extract all learning objectives with success criteria\n Identify prerequisite relationships between objectives\n Note assessment strategies for each objective\n Map objectives to modules and time allocations\n\n**STEP 2: Materials Requirements Matrix**\n For each objective, determine needed materials: study guides, exercises, assessments\n Identify concepts requiring multiple reinforcement approaches\n Note which objectives need spaced repetition support\n Flag areas requiring practical application or hands-on practice\n\n**STEP 3: Resource Integration Plan**\n Evaluate existing resources for quality and coverage\n Identify gaps where custom materials are essential\n Plan integration between created materials and external resources\n Design quality standards for materials consistency\n\nGoal: Strategic foundation for professional learning materials system.",
86
+ "prompt": "Analyze your learning plan to guide professional-quality materials creation:\n\n**STEP 1: Objective Architecture**\n\u2022 Extract all learning objectives with success criteria\n\u2022 Identify prerequisite relationships between objectives\n\u2022 Note assessment strategies for each objective\n\u2022 Map objectives to modules and time allocations\n\n**STEP 2: Materials Requirements Matrix**\n\u2022 For each objective, determine needed materials: study guides, exercises, assessments\n\u2022 Identify concepts requiring multiple reinforcement approaches\n\u2022 Note which objectives need spaced repetition support\n\u2022 Flag areas requiring practical application or hands-on practice\n\n**STEP 3: Resource Integration Plan**\n\u2022 Evaluate existing resources for quality and coverage\n\u2022 Identify gaps where custom materials are essential\n\u2022 Plan integration between created materials and external resources\n\u2022 Design quality standards for materials consistency\n\nGoal: Strategic foundation for professional learning materials system.",
87
87
  "agentRole": "You are a professional instructional designer specializing in systematic materials development. Help users create comprehensive yet practical plans that balance quality with efficiency. Focus on proven instructional design principles.",
88
88
  "guidance": [
89
89
  "Apply instructional design best practices systematically",
@@ -112,7 +112,7 @@
112
112
  "equals": "Comprehensive"
113
113
  },
114
114
  "title": "Phase 1: Expert Learning Plan Analysis (Comprehensive)",
115
- "prompt": "Conduct thorough analysis of learning architecture for enterprise-grade materials:\n\n**STEP 1: Learning System Architecture**\n Map complete learning objective hierarchy with dependencies\n Analyze cognitive load and complexity progression\n Identify multiple learning pathways and individual differences\n Design assessment strategy aligned with learning taxonomies\n\n**STEP 2: Advanced Materials Strategy**\n Determine optimal material types for each learning objective\n Plan multi-modal approach for different learning styles\n Design integration points for spaced repetition and active recall\n Identify opportunities for interactive and adaptive elements\n\n**STEP 3: Quality & Effectiveness Framework**\n Establish criteria for materials effectiveness measurement\n Plan user testing and feedback integration\n Design continuous improvement and iteration protocols\n Create scalability and maintenance considerations\n\nGoal: Strategic foundation for optimized, enterprise-grade learning ecosystem.",
115
+ "prompt": "Conduct thorough analysis of learning architecture for enterprise-grade materials:\n\n**STEP 1: Learning System Architecture**\n\u2022 Map complete learning objective hierarchy with dependencies\n\u2022 Analyze cognitive load and complexity progression\n\u2022 Identify multiple learning pathways and individual differences\n\u2022 Design assessment strategy aligned with learning taxonomies\n\n**STEP 2: Advanced Materials Strategy**\n\u2022 Determine optimal material types for each learning objective\n\u2022 Plan multi-modal approach for different learning styles\n\u2022 Design integration points for spaced repetition and active recall\n\u2022 Identify opportunities for interactive and adaptive elements\n\n**STEP 3: Quality & Effectiveness Framework**\n\u2022 Establish criteria for materials effectiveness measurement\n\u2022 Plan user testing and feedback integration\n\u2022 Design continuous improvement and iteration protocols\n\u2022 Create scalability and maintenance considerations\n\nGoal: Strategic foundation for optimized, enterprise-grade learning ecosystem.",
116
116
  "agentRole": "You are an expert learning systems architect with deep expertise in advanced instructional design and learning optimization. Guide users in creating sophisticated materials that maximize learning effectiveness through evidence-based approaches.",
117
117
  "guidance": [
118
118
  "Apply advanced learning science principles and research",
@@ -141,7 +141,7 @@
141
141
  "equals": "Quick"
142
142
  },
143
143
  "title": "Phase 2: Efficient Materials Strategy (Quick Start)",
144
- "prompt": "Create a focused strategy for essential materials creation:\n\n**STEP 1: Format Selection**\n Choose 1-2 primary formats based on your learning style\n Prioritize formats you can create quickly (text-based, simple templates)\n Plan minimal but consistent formatting approach\n Focus on immediate usability over visual polish\n\n**STEP 2: Creation Workflow**\n Design simple templates for study guides and exercises\n Plan batch creation approach to maximize efficiency\n Set realistic quality standards (functional over perfect)\n Create basic organization system for easy access\n\n**STEP 3: Quality Framework**\n Establish minimum viable product standards\n Plan quick self-review process\n Design simple feedback collection for future improvement\n Focus on completion over perfection\n\nGoal: Practical strategy for rapid materials creation without sacrificing core functionality.",
144
+ "prompt": "Create a focused strategy for essential materials creation:\n\n**STEP 1: Format Selection**\n\u2022 Choose 1-2 primary formats based on your learning style\n\u2022 Prioritize formats you can create quickly (text-based, simple templates)\n\u2022 Plan minimal but consistent formatting approach\n\u2022 Focus on immediate usability over visual polish\n\n**STEP 2: Creation Workflow**\n\u2022 Design simple templates for study guides and exercises\n\u2022 Plan batch creation approach to maximize efficiency\n\u2022 Set realistic quality standards (functional over perfect)\n\u2022 Create basic organization system for easy access\n\n**STEP 3: Quality Framework**\n\u2022 Establish minimum viable product standards\n\u2022 Plan quick self-review process\n\u2022 Design simple feedback collection for future improvement\n\u2022 Focus on completion over perfection\n\nGoal: Practical strategy for rapid materials creation without sacrificing core functionality.",
145
145
  "agentRole": "You are an efficiency expert specializing in rapid content creation. Help users design streamlined approaches that maximize output while maintaining essential quality. Focus on practical, time-saving strategies.",
146
146
  "guidance": [
147
147
  "Emphasize efficiency and speed over perfection",
@@ -170,7 +170,7 @@
170
170
  "equals": "Comprehensive"
171
171
  },
172
172
  "title": "Phase 2: Advanced Materials Strategy (Comprehensive)",
173
- "prompt": "Develop sophisticated strategy for enterprise-grade materials:\n\n**STEP 1: Multi-Modal Format Strategy**\n Design format variety to engage different learning modes\n Plan advanced visual elements, interactive components, adaptive features\n Create sophisticated template system with consistent branding\n Consider accessibility, mobile optimization, and universal design\n\n**STEP 2: Integration Architecture**\n Plan seamless connections between all material types\n Design advanced spaced repetition integration with learning analytics\n Create sophisticated cross-referencing and linking systems\n Plan for collaborative features and social learning elements\n\n**STEP 3: Quality Excellence Framework**\n Establish enterprise-grade quality standards and measurement\n Design comprehensive user testing and feedback integration\n Plan continuous optimization based on learning effectiveness data\n Create scalable maintenance and update protocols\n\nGoal: Strategic foundation for learning materials that optimize effectiveness through sophisticated design.",
173
+ "prompt": "Develop sophisticated strategy for enterprise-grade materials:\n\n**STEP 1: Multi-Modal Format Strategy**\n\u2022 Design format variety to engage different learning modes\n\u2022 Plan advanced visual elements, interactive components, adaptive features\n\u2022 Create sophisticated template system with consistent branding\n\u2022 Consider accessibility, mobile optimization, and universal design\n\n**STEP 2: Integration Architecture**\n\u2022 Plan seamless connections between all material types\n\u2022 Design advanced spaced repetition integration with learning analytics\n\u2022 Create sophisticated cross-referencing and linking systems\n\u2022 Plan for collaborative features and social learning elements\n\n**STEP 3: Quality Excellence Framework**\n\u2022 Establish enterprise-grade quality standards and measurement\n\u2022 Design comprehensive user testing and feedback integration\n\u2022 Plan continuous optimization based on learning effectiveness data\n\u2022 Create scalable maintenance and update protocols\n\nGoal: Strategic foundation for learning materials that optimize effectiveness through sophisticated design.",
174
174
  "agentRole": "You are a learning systems architect with expertise in enterprise-grade materials design. Help users create sophisticated strategies that maximize learning effectiveness through advanced features and optimization.",
175
175
  "guidance": [
176
176
  "Apply advanced instructional design and learning optimization principles",
@@ -1,5 +1,5 @@
1
1
  {
2
- "id": "presentation-creation",
2
+ "id": "wr.presentation-creation",
3
3
  "name": "Presentation Creation Workflow",
4
4
  "version": "1.0.0",
5
5
  "metricsProfile": "none",
@@ -64,9 +64,9 @@
64
64
  "promptBlocks": {
65
65
  "goal": "Define the core message and argument structure that will guide every slide and talking point. Using your audience profile from the previous step, build a content strategy grounded in their specific needs.",
66
66
  "constraints": [
67
- "Start with one core message a single, memorable sentence. Everything else should support it.",
67
+ "Start with one core message \u2014 a single, memorable sentence. Everything else should support it.",
68
68
  "Supporting arguments should directly address the audience motivations and pain points you identified.",
69
- "The narrative arc should feel natural for this audience in this context not a generic template unless it actually fits."
69
+ "The narrative arc should feel natural for this audience in this context \u2014 not a generic template unless it actually fits."
70
70
  ],
71
71
  "procedure": [
72
72
  "State your core message: one clear, memorable sentence that captures the single most important thing you want the audience to take away.",
@@ -93,7 +93,7 @@
93
93
  "constraints": [
94
94
  "One key idea per slide. If a slide is trying to say two things, split it.",
95
95
  "Plan for pacing: balance information-dense slides with breathing room and interaction moments.",
96
- "Think about how the slides will work if someone reads them later without you titles should be informative, not just labels."
96
+ "Think about how the slides will work if someone reads them later without you \u2014 titles should be informative, not just labels."
97
97
  ],
98
98
  "procedure": [
99
99
  "Opening (1-3 slides): attention-grabbing hook, context framing, and agenda or roadmap.",
@@ -117,7 +117,7 @@
117
117
  "id": "content-development",
118
118
  "title": "Content Development",
119
119
  "promptBlocks": {
120
- "goal": "Write the actual presentation content slide text and speaker notes following the approved outline. Every piece of content should be grounded in your audience profile and core message.",
120
+ "goal": "Write the actual presentation content \u2014 slide text and speaker notes \u2014 following the approved outline. Every piece of content should be grounded in your audience profile and core message.",
121
121
  "constraints": [
122
122
  "Write for the ear, not the eye. Slide text should be sparse; speaker notes should sound natural when spoken aloud.",
123
123
  "Use active voice and concrete language. Replace abstractions with specific examples.",
@@ -1,5 +1,5 @@
1
1
  {
2
- "id": "production-readiness-audit",
2
+ "id": "wr.production-readiness-audit",
3
3
  "name": "Production Readiness Audit",
4
4
  "version": "0.1.0",
5
5
  "metricsProfile": "research",