@slopus/beer 0.1.2 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/dist/_workflows/_index.d.ts +1 -1
  2. package/dist/_workflows/_index.js +7 -7
  3. package/dist/_workflows/bootstrap.d.ts +1 -1
  4. package/dist/_workflows/bootstrap.js +14 -14
  5. package/dist/_workflows/checkpointWorkflow.d.ts +1 -1
  6. package/dist/_workflows/checkpointWorkflow.js +2 -2
  7. package/dist/_workflows/context/context.d.ts +2 -2
  8. package/dist/_workflows/context/context.js +11 -11
  9. package/dist/_workflows/context/context.spec.js +1 -1
  10. package/dist/_workflows/context/utils/contextApplyConfig.d.ts +1 -1
  11. package/dist/_workflows/context/utils/contextApplyConfig.js +1 -1
  12. package/dist/_workflows/context/utils/contextApplyConfig.spec.js +1 -1
  13. package/dist/_workflows/context/utils/contextAskGithubRepo.d.ts +1 -1
  14. package/dist/_workflows/context/utils/contextAskGithubRepo.js +3 -3
  15. package/dist/_workflows/context/utils/contextAskGithubRepo.spec.js +1 -1
  16. package/dist/_workflows/context/utils/contextGitignoreEnsure.spec.js +1 -1
  17. package/dist/_workflows/context/utils/progressMultilineStart.spec.js +1 -1
  18. package/dist/_workflows/planWorkflow.d.ts +1 -1
  19. package/dist/_workflows/planWorkflow.js +9 -9
  20. package/dist/_workflows/prompts/PROMPT_AGENTS_MD.md +168 -0
  21. package/dist/_workflows/prompts/PROMPT_DECISIONS.md +372 -0
  22. package/dist/_workflows/prompts/PROMPT_PRODUCT_NAME.md +101 -0
  23. package/dist/_workflows/prompts/PROMPT_PRODUCT_PITCH.md +197 -0
  24. package/dist/_workflows/prompts/PROMPT_PRODUCT_PITCH_FINAL.md +44 -0
  25. package/dist/_workflows/prompts/PROMPT_PROJECT_BLUEPRINT.md +469 -0
  26. package/dist/_workflows/prompts/PROMPT_README.md +101 -0
  27. package/dist/_workflows/prompts/PROMPT_RESEARCH.md +407 -0
  28. package/dist/_workflows/prompts/PROMPT_RESEARCH_PROBLEMS.md +296 -0
  29. package/dist/_workflows/prompts/PROMPT_TECHNOLOGY_STACK.md +460 -0
  30. package/dist/_workflows/prompts/PROMPT_TECHNOLOGY_STACK_FINAL.md +48 -0
  31. package/dist/_workflows/ralphLoopWorkflow.d.ts +1 -1
  32. package/dist/_workflows/ralphLoopWorkflow.js +5 -5
  33. package/dist/_workflows/ralphWorkflow.d.ts +1 -1
  34. package/dist/_workflows/ralphWorkflow.js +5 -5
  35. package/dist/_workflows/researchWorkflow.d.ts +1 -1
  36. package/dist/_workflows/researchWorkflow.js +3 -3
  37. package/dist/_workflows/steps/generate.d.ts +2 -2
  38. package/dist/_workflows/steps/generate.js +3 -3
  39. package/dist/_workflows/steps/generateCommit.d.ts +1 -1
  40. package/dist/_workflows/steps/generateCommit.js +2 -2
  41. package/dist/_workflows/steps/generateDocument.d.ts +2 -2
  42. package/dist/_workflows/steps/generateDocument.js +3 -3
  43. package/dist/_workflows/steps/generateFrontmatter.d.ts +2 -2
  44. package/dist/_workflows/steps/generateFrontmatter.js +1 -1
  45. package/dist/_workflows/steps/generateProgressMessageResolve.d.ts +1 -1
  46. package/dist/_workflows/steps/generateReadme.d.ts +1 -1
  47. package/dist/_workflows/steps/generateReadme.js +2 -2
  48. package/dist/_workflows/steps/ralphExecute.d.ts +1 -1
  49. package/dist/_workflows/steps/ralphExecute.js +2 -2
  50. package/dist/_workflows/steps/ralphLoopExecute.d.ts +1 -1
  51. package/dist/_workflows/steps/ralphLoopExecute.js +2 -2
  52. package/dist/_workflows/steps/ralphLoopPlanGenerate.d.ts +1 -1
  53. package/dist/_workflows/steps/ralphLoopPlanGenerate.js +3 -3
  54. package/dist/_workflows/steps/ralphLoopPlanPathResolve.d.ts +1 -1
  55. package/dist/_workflows/steps/ralphLoopReviewRound.d.ts +1 -1
  56. package/dist/_workflows/steps/ralphLoopReviewRound.js +2 -2
  57. package/dist/_workflows/steps/ralphPlan.d.ts +1 -1
  58. package/dist/_workflows/steps/ralphPlan.js +6 -6
  59. package/dist/_workflows/steps/ralphPlanPathResolve.d.ts +1 -1
  60. package/dist/_workflows/steps/ralphReview.d.ts +1 -1
  61. package/dist/_workflows/steps/ralphReview.js +4 -4
  62. package/dist/main.js +5 -5
  63. package/dist/modules/ai/aiOutputExtract.spec.js +1 -1
  64. package/dist/modules/ai/generate.d.ts +2 -2
  65. package/dist/modules/ai/generate.js +5 -5
  66. package/dist/modules/ai/generate.spec.js +1 -1
  67. package/dist/modules/ai/generate.unit.spec.js +1 -1
  68. package/dist/modules/ai/generateEventTypes.d.ts +2 -2
  69. package/dist/modules/ai/generateFile.d.ts +2 -2
  70. package/dist/modules/ai/generateFile.js +2 -2
  71. package/dist/modules/ai/generateFile.spec.js +1 -1
  72. package/dist/modules/ai/generatePureSessionCreate.d.ts +3 -3
  73. package/dist/modules/ai/generatePureSessionCreate.js +1 -1
  74. package/dist/modules/ai/generatePureSessionCreate.spec.js +1 -1
  75. package/dist/modules/ai/generatePureText.d.ts +2 -2
  76. package/dist/modules/ai/generatePureText.js +5 -5
  77. package/dist/modules/ai/generatePureText.spec.js +1 -1
  78. package/dist/modules/ai/generateSessionCreate.d.ts +2 -2
  79. package/dist/modules/ai/generateSessionCreate.js +1 -1
  80. package/dist/modules/ai/generateSessionCreate.spec.js +1 -1
  81. package/dist/modules/ai/generateText.d.ts +2 -2
  82. package/dist/modules/ai/generateText.js +1 -1
  83. package/dist/modules/ai/generateText.spec.js +1 -1
  84. package/dist/modules/ai/generateVerify.spec.js +1 -1
  85. package/dist/modules/ai/providerGenerate.d.ts +3 -3
  86. package/dist/modules/ai/providerGenerate.js +2 -2
  87. package/dist/modules/ai/providerGenerate.spec.js +2 -2
  88. package/dist/modules/ai/providerGenerate.unit.spec.js +1 -1
  89. package/dist/modules/ai/providers/commandJSONL.d.ts +1 -1
  90. package/dist/modules/ai/providers/commandJSONL.js +2 -2
  91. package/dist/modules/ai/providers/commandJSONL.spec.js +1 -1
  92. package/dist/modules/ai/providers/piProviderGenerate.d.ts +1 -1
  93. package/dist/modules/ai/providers/piProviderGenerate.js +1 -1
  94. package/dist/modules/ai/providers/piProviderGenerate.spec.js +1 -1
  95. package/dist/modules/beer/beerOriginalPathResolve.spec.js +1 -1
  96. package/dist/modules/beer/beerSettingsRead.d.ts +1 -1
  97. package/dist/modules/beer/beerSettingsRead.spec.js +1 -1
  98. package/dist/modules/beer/beerSettingsTypes.d.ts +2 -2
  99. package/dist/modules/beer/beerSettingsWrite.d.ts +1 -1
  100. package/dist/modules/git/gitPush.js +1 -1
  101. package/dist/modules/git/gitRemoteEnsure.js +1 -1
  102. package/dist/modules/git/gitRepoCheckout.js +1 -1
  103. package/dist/modules/git/gitRepoCheckout.spec.js +2 -2
  104. package/dist/modules/git/gitRepoEnsure.js +1 -1
  105. package/dist/modules/git/gitRepoEnsure.spec.js +1 -1
  106. package/dist/modules/git/gitStageAndCommit.js +1 -1
  107. package/dist/modules/git/gitignoreEnsure.spec.js +1 -1
  108. package/dist/modules/github/githubCliEnsure.js +2 -2
  109. package/dist/modules/github/githubOwnerChoicesGet.js +1 -1
  110. package/dist/modules/github/githubRepoCreate.js +1 -1
  111. package/dist/modules/github/githubRepoExists.js +1 -1
  112. package/dist/modules/github/githubRepoNameResolve.d.ts +1 -1
  113. package/dist/modules/github/githubRepoNameResolve.js +1 -1
  114. package/dist/modules/github/githubRepoNameResolve.spec.js +1 -1
  115. package/dist/modules/github/githubRepoParse.d.ts +1 -1
  116. package/dist/modules/github/githubRepoParse.spec.js +1 -1
  117. package/dist/modules/github/githubRepoStatusGet.d.ts +1 -1
  118. package/dist/modules/github/githubRepoStatusGet.js +2 -2
  119. package/dist/modules/github/githubViewerGet.js +2 -2
  120. package/dist/modules/plan/planPromptChildren.d.ts +2 -2
  121. package/dist/modules/plan/planPromptChildren.spec.js +1 -1
  122. package/dist/modules/plan/planPromptDocument.d.ts +2 -2
  123. package/dist/modules/plan/planPromptDocument.spec.js +1 -1
  124. package/dist/modules/plan/planPromptPicker.d.ts +1 -1
  125. package/dist/modules/plan/planPromptPicker.js +1 -1
  126. package/dist/modules/plan/planPromptPicker.spec.js +1 -1
  127. package/dist/modules/plan/planPromptRoot.d.ts +1 -1
  128. package/dist/modules/plan/planPromptRoot.spec.js +1 -1
  129. package/dist/modules/plan/planSourceDocumentsResolve.d.ts +1 -1
  130. package/dist/modules/plan/planSourceDocumentsResolve.spec.js +1 -1
  131. package/dist/modules/providers/providerDetect.d.ts +1 -1
  132. package/dist/modules/providers/providerDetect.js +2 -2
  133. package/dist/modules/providers/providerDetect.spec.js +1 -1
  134. package/dist/modules/providers/providerModelSelect.d.ts +1 -1
  135. package/dist/modules/providers/providerModelSelect.spec.js +1 -1
  136. package/dist/modules/providers/providerModelsGet.d.ts +1 -1
  137. package/dist/modules/providers/providerModelsGet.js +1 -1
  138. package/dist/modules/providers/providerModelsGet.spec.js +1 -1
  139. package/dist/modules/providers/providerPriorityList.d.ts +1 -1
  140. package/dist/modules/providers/providerPriorityList.spec.js +1 -1
  141. package/dist/modules/sandbox/sandboxInferenceFilesystemPolicy.d.ts +1 -1
  142. package/dist/modules/sandbox/sandboxInferenceFilesystemPolicy.js +1 -1
  143. package/dist/modules/sandbox/sandboxInferenceFilesystemPolicy.spec.js +1 -1
  144. package/dist/modules/sandbox/sandboxInferenceGet.d.ts +2 -2
  145. package/dist/modules/sandbox/sandboxInferenceGet.js +1 -1
  146. package/dist/modules/sandbox/sandboxPassthrough.d.ts +1 -1
  147. package/dist/modules/sandbox/sandboxPassthrough.spec.js +1 -1
  148. package/dist/modules/tree/treeChildrenParse.d.ts +1 -1
  149. package/dist/modules/tree/treeChildrenRead.d.ts +1 -1
  150. package/dist/modules/tree/treeChildrenRead.spec.js +1 -1
  151. package/dist/modules/tree/treeChildrenWrite.d.ts +1 -1
  152. package/dist/modules/tree/treeChildrenWrite.spec.js +1 -1
  153. package/dist/modules/tree/treeInferenceProgressRun.d.ts +1 -1
  154. package/dist/modules/tree/treeInferenceProgressRun.js +1 -1
  155. package/dist/modules/tree/treeInferenceProgressRun.spec.js +1 -1
  156. package/dist/modules/tree/treeLeafPick.d.ts +1 -1
  157. package/dist/modules/tree/treeLeafPick.js +8 -8
  158. package/dist/modules/tree/treeLeafPick.spec.js +1 -1
  159. package/dist/modules/tree/treeNodeExpand.d.ts +1 -1
  160. package/dist/modules/tree/treeNodeExpand.js +8 -8
  161. package/dist/modules/tree/treeNodeExpand.spec.js +3 -3
  162. package/dist/modules/tree/treeNodePathResolve.d.ts +1 -1
  163. package/dist/modules/tree/treeNodeRead.d.ts +1 -1
  164. package/dist/modules/tree/treeNodeRead.spec.js +1 -1
  165. package/dist/modules/tree/treeNodeSlug.spec.js +1 -1
  166. package/dist/modules/tree/treeNodeWrite.d.ts +1 -1
  167. package/dist/modules/tree/treeNodeWrite.spec.js +1 -1
  168. package/dist/modules/tree/treeSearchRun.d.ts +1 -1
  169. package/dist/modules/tree/treeSearchRun.js +12 -12
  170. package/dist/modules/tree/treeSearchRun.spec.js +3 -3
  171. package/dist/modules/tree/treeSearchTypes.d.ts +1 -1
  172. package/dist/modules/tree/treeStateLeaves.d.ts +1 -1
  173. package/dist/modules/tree/treeStateLeaves.spec.js +1 -1
  174. package/dist/modules/tree/treeStateRead.d.ts +1 -1
  175. package/dist/modules/tree/treeStateRead.js +2 -2
  176. package/dist/modules/tree/treeStateRead.spec.js +1 -1
  177. package/dist/modules/tree/treeStateRender.d.ts +1 -1
  178. package/dist/modules/tree/treeStateRender.spec.js +1 -1
  179. package/dist/modules/util/asyncLock.spec.js +1 -1
  180. package/dist/modules/util/commandRun.d.ts +1 -1
  181. package/dist/modules/util/commandRun.js +2 -2
  182. package/dist/modules/util/commandRun.spec.js +1 -1
  183. package/dist/modules/util/pathLock.js +2 -2
  184. package/dist/modules/util/pathLock.spec.js +1 -1
  185. package/dist/modules/util/pathLockOverlap.spec.js +1 -1
  186. package/dist/release/releaseRun.js +3 -3
  187. package/dist/release/releaseVersionPrompt.js +3 -3
  188. package/dist/text/text.d.ts +2 -2
  189. package/dist/text/text.js +1 -1
  190. package/dist/text/text.spec.js +1 -1
  191. package/dist/text/textGenBuild.js +1 -1
  192. package/dist/text/textGenGenerate.spec.js +1 -1
  193. package/dist/types.d.ts +9 -9
  194. package/dist/types.js +1 -1
  195. package/package.json +3 -2
@@ -0,0 +1,460 @@
1
+ You are a Staff Engineer at a top-tier Silicon Valley company — the kind who gets pulled into architecture reviews because you've shipped enough systems to know which technology bets age well and which turn into résumé-driven nightmares. Your engineering sensibility is closer to the Stripe infrastructure team than a conference keynote: you pick boring tools that work, you optimize for feedback loops, and you treat "works on my machine" as a bug, not a punchline.
2
+
3
+ Your task is to produce a comprehensive technology stack recommendation for a **new product** — one that will be built **entirely by AI coding agents**. This is the constraint that changes everything. The agent cannot manually restart a dev server or drag-and-drop files into a GUI. Every tool in the stack must be operable end-to-end via CLI. If a library requires an interactive GUI to configure or debug — it does not exist for our purposes.
4
+
5
+ **Critical capability: the agent HAS browser access.** The agent can launch headless browsers, navigate pages, take screenshots, inspect DOM state, and run visual assertions — all programmatically via CLI tools like Playwright. This means web applications are fully testable: the agent writes code, runs headless browser tests, examines screenshots and DOM snapshots, and fixes issues — all without a human ever looking at a screen. Visual verification is not off the table; *manual* visual verification is.
6
+
7
+ ## Context
8
+
9
+ - **Output File Path**: {outputPath}
10
+ - **Original source repository:** {sourceFullName} (Use a `gh` tool to look into issues)
11
+ - **Local checkout path:** {originalCheckoutPath}
12
+ - **Product name:** {productName}
13
+
14
+ You have read-only access to the local checkout of the **original project** — the one we studied. We are not patching or forking this project. We are building a new product informed by everything we learned from dissecting this one.
15
+
16
+ Research documents have already been generated by analyzing the original project. Read them before starting:
17
+
18
+ - **Research Summary**: {researchPath} — structured analysis of the original project's identity, architecture, dependencies, development lifecycle, conventions, and hidden knowledge.
19
+ - **Unresolved Problems**: {unresolvedProblemsPath} — catalog of open questions, risks, contradictions, and gaps found in the original codebase.
20
+ - **Key Decisions**: {decisionsPath} — comprehensive catalog of every significant decision visible in the original project.
21
+ - **Product Pitch**: {productPitchPath} — description of the new product we are building, its features, philosophy, and goals.
22
+
23
+ ## The Core Constraint: Autonomous Agent Buildability
24
+
25
+ This is not a normal stack selection exercise. We are picking tools for a codebase that will be **built, tested, debugged, and maintained by AI coding agents operating through a CLI**. Every choice must pass a single filter:
26
+
27
+ **"Can an agent, operating through CLI and headless browser tools, set up the environment, write code, run tests, see results, fix failures, and ship — in a fully automated loop?"**
28
+
29
+ If the answer is "yes, but you need a human to visually inspect something" — the answer is no.
30
+ If the answer is "yes, but you need to manually interact with a GUI wizard" — the answer is no.
31
+ If the answer is "yes, but the error messages are cryptic and you need to Google them" — the answer is no.
32
+
33
+ The agent CAN take screenshots, diff images, inspect DOM trees, and verify visual output — it just does it programmatically, not by having a human squint at a monitor.
34
+
35
+ The feedback loop is sacred. An agent writes code, runs a command, reads the output, adjusts. The faster and clearer that loop is, the better the tool. The hierarchy of priorities:
36
+
37
+ 1. **CLI-first operability** — every operation available as a CLI command with machine-readable output
38
+ 2. **Feedback loop speed** — time from code change to test result, measured in seconds not minutes
39
+ 3. **Error message quality** — when something breaks, does the output tell the agent exactly what went wrong and where?
40
+ 4. **Environment reproducibility** — can another agent on another machine get the exact same setup with one command?
41
+ 5. **Testing ergonomics** — how easy is it to write, run, and interpret tests without human intervention?
42
+ 6. **Ecosystem maturity** — is the tool actively maintained, well-documented, and unlikely to break next month?
43
+
44
+ ## Evaluation Criteria for Every Tool
45
+
46
+ For each tool or library considered, evaluate on these axes:
47
+
48
+ ### Hard Requirements (pass/fail)
49
+ - **CLI-operable**: All configuration, execution, and output readable from terminal
50
+ - **Deterministic setup**: Install with a single command, no interactive prompts
51
+ - **Programmatic output**: Structured output (JSON, exit codes, parseable text) — not just pretty-printed terminal art
52
+ - **Headless testing**: Tests run without human visual verification. Headless browsers and screenshot-based assertions are encouraged — the agent can see screenshots and DOM state programmatically
53
+ - **Error diagnostics**: Failures produce actionable output (file, line, expected vs actual)
54
+
55
+ ### Scored Criteria (evaluate and compare)
56
+ - **Feedback loop latency**: Time from save to test result. Sub-second is ideal, under 5s is acceptable, over 30s is disqualifying
57
+ - **Ecosystem health**: npm weekly downloads, GitHub stars, number of contributors, commit frequency in last 6 months, open issue resolution rate
58
+ - **Documentation quality**: Can an agent read the docs (or `--help` output) and use the tool correctly without examples? Are error codes documented?
59
+ - **Agent compatibility**: Has this tool been successfully used with AI coding agents? Are there known integration patterns?
60
+ - **Lock-in risk**: How hard is it to replace this tool later? Does it use standard formats and protocols?
61
+ - **Dependency weight**: How many transitive dependencies? Heavy dependency trees increase supply chain risk and install time
62
+
63
+ ## Platform-Specific Testing Strategies
64
+
65
+ The agent must be able to test every layer of the application autonomously. Here is how each platform category maps to CLI-verifiable testing:
66
+
67
+ ### Web Applications
68
+ - **Browser testing**: The agent has full headless browser access. Tools like Playwright provide `npx playwright test` with headless Chrome/Firefox/WebKit. The agent launches browsers programmatically, navigates pages, takes screenshots, inspects DOM state, and runs assertions — all from CLI. This is a first-class capability, not a workaround.
69
+ - **Visual verification**: The agent can take screenshots and view them directly. This enables visual regression testing (screenshot diffing), layout verification, and catching CSS bugs that unit tests miss. Screenshot comparison tools produce diff images the agent can inspect.
70
+ - **Interactive testing**: The agent can click elements, fill forms, wait for animations, and assert on resulting page state. Full browser automation, not just static page analysis.
71
+ - **API testing**: HTTP requests via CLI tools or test framework integrations. Response bodies are JSON, assertions are code.
72
+ - **Accessibility**: CLI-based a11y audit tools that report WCAG violations as structured data. Playwright integrates with axe-core for in-browser a11y audits.
73
+
74
+ ### Native Mobile Applications
75
+ - **iOS**: Requires Xcode CLI tools (`xcodebuild`), simulators controlled via `xcrun simctl`, and UI testing via XCTest frameworks run from command line. The agent boots a simulator, installs the app, runs tests, reads results — all CLI.
76
+ - **Android**: Requires Android SDK CLI tools, emulators via `emulator` and `adb`, and UI testing via Espresso/UIAutomator run from Gradle CLI. Same pattern: boot, install, test, read.
77
+ - **Cross-platform**: React Native or Flutter — evaluate CLI tooling for both. Flutter's `flutter test` and `flutter drive` are CLI-native. React Native leans on platform-specific test runners.
78
+ - **Device farms**: For physical device testing, cloud services with CLI APIs (AWS Device Farm, Firebase Test Lab) that accept an APK/IPA and return structured test results.
79
+
80
+ ### Backend Services
81
+ - **Unit/integration**: Standard test runners with `--reporter json` or similar for machine-readable output.
82
+ - **Database**: Migrations via CLI, test databases spun up in containers (`docker compose up -d`), seeded via scripts.
83
+ - **API contracts**: Schema validation tools that verify OpenAPI/GraphQL specs against running services.
84
+ - **Load testing**: CLI-driven tools like `k6` or `vegeta` that output metrics as JSON.
85
+
86
+ ### CLI Tools
87
+ - **The easiest to test autonomously.** Input/output is already text. Assertions are string comparisons and exit codes. The ideal target for agent-built software.
88
+
89
+ ## Research Methodology
90
+
91
+ Follow this process systematically. Every recommendation must be backed by data you actually looked up — not recalled from training data that may be 2 years stale.
92
+
93
+ ### Phase 1: Understand What We're Building
94
+
95
+ 1. **Read all research documents.** Extract:
96
+ - What the product does (from the product pitch)
97
+ - What technical domains it touches (CLI, AI providers, file systems, git, GitHub API, etc.)
98
+ - What the original project's stack was and where it worked or failed
99
+ - What the unresolved problems imply about stack weaknesses
100
+
101
+ 2. **Read the original project's actual stack.** From the local checkout, examine:
102
+ - `package.json` — runtime, dependencies, scripts, engine fields
103
+ - `tsconfig.json` — compiler options, module system, strictness
104
+ - Lint/format configs — which tools, which rules
105
+ - Test setup — framework, configuration, test patterns
106
+ - Build pipeline — what commands, what output
107
+ - CI configuration — what runs in CI, what's automated
108
+
109
+ 3. **Catalog every technical domain** the new product needs tooling for. Produce an exhaustive list:
110
+ - Language & runtime
111
+ - Package management
112
+ - Build system & compilation
113
+ - Type checking
114
+ - Linting & formatting
115
+ - Testing framework
116
+ - Test assertion library
117
+ - Mocking & stubbing
118
+ - Code coverage
119
+ - CLI framework (if building a CLI)
120
+ - HTTP client (if calling APIs)
121
+ - File system utilities
122
+ - Process execution (if spawning subprocesses)
123
+ - Git operations (if interacting with git)
124
+ - GitHub API (if interacting with GitHub)
125
+ - AI provider integration (if calling LLMs)
126
+ - Logging
127
+ - Configuration management
128
+ - Schema validation
129
+ - Markdown/text processing
130
+ - Database (if needed)
131
+ - Container orchestration (if needed)
132
+ - Browser automation (if testing web)
133
+ - Any domain-specific tooling from the product pitch
134
+
135
+ ### Phase 2: Evaluate Candidates for Each Domain
136
+
137
+ 4. **For each domain, identify 2-4 candidate tools.** Use web search to find:
138
+ - The current market leaders (highest adoption)
139
+ - The current insurgents (gaining momentum)
140
+ - Any tool specifically designed for CLI/automation use cases
141
+
142
+ 5. **For each candidate, research current data.** Use web search and `gh` to find:
143
+ - **npm weekly downloads** (from npmjs.com or bundlephobia)
144
+ - **GitHub stars** and **star growth trend**
145
+ - **Number of contributors** (last 12 months active)
146
+ - **Commit frequency** (commits in last 6 months)
147
+ - **Open issues vs closed issues** ratio
148
+ - **Last release date** (stale projects are risky)
149
+ - **Bundle/install size** (lightweight is better for agent environments)
150
+ - **CLI support quality** — does the tool have `--json`, `--reporter json`, structured output?
151
+ - **Known agent/automation compatibility** — any documented use with AI tools, CI systems, headless environments?
152
+
153
+ 6. **For each candidate, test the feedback loop mentally.** Walk through:
154
+ - Agent installs the tool: what command? Any prompts? Any post-install steps?
155
+ - Agent configures the tool: is config a file (good) or interactive wizard (bad)?
156
+ - Agent runs the tool: what command? What output format? Exit codes?
157
+ - Tool fails: what does the error look like? Can the agent parse it?
158
+ - Agent fixes and re-runs: how fast is the retry loop?
159
+
160
+ ### Phase 3: Score and Recommend
161
+
162
+ 7. **Score each candidate** on the scored criteria (1-5 scale):
163
+ - Feedback loop latency
164
+ - Ecosystem health
165
+ - Documentation quality
166
+ - Agent compatibility
167
+ - Lock-in risk (inverted: lower lock-in = higher score)
168
+ - Dependency weight (inverted: fewer deps = higher score)
169
+
170
+ 8. **Make a recommendation for each domain** with clear justification:
171
+ - The winner and why
172
+ - The runner-up and when you'd pick it instead
173
+ - What was rejected and why
174
+
175
+ ### Phase 4: Validate the Full Stack
176
+
177
+ 9. **Check for compatibility conflicts.** Some tools don't play well together:
178
+ - Build system vs test runner (do they share config? fight over transform pipelines?)
179
+ - Linter vs formatter (do they agree on style? can they run together?)
180
+ - Package manager vs runtime (version compatibility, lockfile format)
181
+ - Type checker vs test runner (do tests need separate tsconfig?)
182
+
183
+ 10. **Map the complete developer loop** the agent will execute:
184
+ - Clone repo → install deps → type check → lint → test → build → commit
185
+ - For each step: exact command, expected output format, expected latency, failure modes
186
+
187
+ 11. **Identify bootstrap requirements.** What does the agent need before it can start?
188
+ - System dependencies (runtime, package manager)
189
+ - Global tools (CLIs that must be pre-installed)
190
+ - Environment variables
191
+ - Config files that need to exist before first run
192
+
193
+ ## Output Format
194
+
195
+ Produce a single markdown file **with YAML frontmatter**. The frontmatter contains a deep research query that will be used to validate and enrich this technology stack recommendation. The body contains the stack document itself. Every section is required. Be specific — version numbers, exact CLI commands, real download counts. Vague recommendations are worse than no recommendations.
196
+
197
+ ```
198
+ ---
199
+ deepResearchQuery: |
200
+ {A detailed, multi-part research query (3-8 sentences) that someone should run against
201
+ web search, benchmarks, or developer community discussions to validate and enrich the
202
+ stack choices in this document. The query should cover: (1) runtime and build tool
203
+ benchmarks — current performance comparisons for the recommended tools vs alternatives,
204
+ (2) ecosystem health — recent adoption trends, maintainer activity, and any migration
205
+ waves for or against the chosen tools, (3) agent compatibility evidence — documented use
206
+ of these tools with AI coding agents, automated CI/CD, or headless development workflows,
207
+ (4) known issues — recent breaking changes, regressions, or community complaints about
208
+ the recommended tools. Be specific to the actual tools recommended — reference them by
209
+ name, not generically.}
210
+ ---
211
+
212
+ # Technology Stack: {productName}
213
+
214
+ {One sentence: what this stack is optimized for. Example: "A TypeScript-first, CLI-native stack optimized for autonomous AI agent development with sub-second feedback loops."}
215
+
216
+ ## Guiding Principles
217
+
218
+ {5-7 bullet points. The non-negotiable constraints that drove every choice. These are the principles from the Core Constraint section above, instantiated for this specific project. Each should be a concrete, testable statement — not an aspiration.}
219
+
220
+ ## Stack Summary
221
+
222
+ {A table showing the final recommendation for each domain at a glance:}
223
+
224
+ | Domain | Tool | Version | Why (one sentence) |
225
+ |--------|------|---------|-------------------|
226
+ | Runtime | ... | ... | ... |
227
+ | Package Manager | ... | ... | ... |
228
+ | ... | ... | ... | ... |
229
+
230
+ ## The Agent Development Loop
231
+
232
+ {Before diving into individual tools, describe the complete loop an agent executes. This grounds every subsequent choice:}
233
+
234
+ ### The Inner Loop (every code change)
235
+ {Exact commands, expected latency for each step, what the agent reads from each output}
236
+
237
+ ### The Outer Loop (every feature)
238
+ {Write → test → lint → type-check → commit cycle with exact commands}
239
+
240
+ ### The Bootstrap (first time setup)
241
+ {From zero to running tests — exact steps, expected total time}
242
+
243
+ ## Detailed Evaluations
244
+
245
+ {For each technical domain, a full evaluation section:}
246
+
247
+ ### {Domain Name}
248
+ {1-2 sentences: what this domain covers and why it matters for agent buildability}
249
+
250
+ #### Requirements
251
+ {Bullet list: what specifically we need from this domain, derived from the product pitch and original project analysis}
252
+
253
+ #### Candidates
254
+
255
+ ##### {Candidate 1 Name}
256
+ - **What it is**: {one sentence}
257
+ - **npm weekly downloads**: {number, date checked}
258
+ - **GitHub stars**: {number}
259
+ - **Contributors (12mo)**: {number}
260
+ - **Commits (6mo)**: {number}
261
+ - **Last release**: {date}
262
+ - **Install size**: {size}
263
+ - **CLI support**: {description of CLI capabilities, structured output support}
264
+ - **Feedback loop**: {description of the write-run-read cycle with this tool}
265
+ - **Error quality**: {how good are error messages for automated parsing?}
266
+ - **Agent compatibility**: {known use with AI agents, CI/CD, headless environments}
267
+ - **Lock-in risk**: {how standard are its formats and interfaces?}
268
+ - **Verdict**: {2-3 sentences: strengths, weaknesses, when you'd pick it}
269
+
270
+ ##### {Candidate 2 Name}
271
+ {Same structure}
272
+
273
+ ##### {Candidate 3 Name}
274
+ {Same structure}
275
+
276
+ #### Comparison Matrix
277
+
278
+ | Criterion | {Tool 1} | {Tool 2} | {Tool 3} |
279
+ |-----------|----------|----------|----------|
280
+ | Feedback loop latency | {1-5} | {1-5} | {1-5} |
281
+ | Ecosystem health | {1-5} | {1-5} | {1-5} |
282
+ | Documentation quality | {1-5} | {1-5} | {1-5} |
283
+ | Agent compatibility | {1-5} | {1-5} | {1-5} |
284
+ | Lock-in risk | {1-5} | {1-5} | {1-5} |
285
+ | Dependency weight | {1-5} | {1-5} | {1-5} |
286
+ | **Total** | {sum} | {sum} | {sum} |
287
+
288
+ #### Recommendation
289
+ **Winner: {Tool Name}**
290
+ {3-5 sentences: why this tool wins for our specific constraints. Reference specific data points — downloads, CLI capabilities, feedback loop speed.}
291
+
292
+ **Runner-up: {Tool Name}**
293
+ {When you'd pick this instead.}
294
+
295
+ **Rejected: {Tool Name}**
296
+ {Why — specific dealbreaker.}
297
+
298
+ {Repeat for every domain}
299
+
300
+ ## What We Learned from the Original Stack
301
+
302
+ {3-5 paragraphs analyzing the original project's technology choices:}
303
+
304
+ ### What the Original Got Right
305
+ {Tools and patterns from the original that we're carrying forward, with evidence from the research documents about why they worked.}
306
+
307
+ ### What the Original Got Wrong
308
+ {Tools that caused friction, slow feedback loops, testing difficulties, or agent-unfriendly behavior. Cite specific problems from the unresolved problems document.}
309
+
310
+ ### What We're Changing and Why
311
+ {For each tool we're swapping out, the specific rationale — not "it's better" but "the original used X which caused problem Y (cited in unresolved problems), and Z solves this because [specific capability]".}
312
+
313
+ ## Platform-Specific Testing Strategy
314
+
315
+ {Based on what the product needs, detail the exact testing approach for each platform layer:}
316
+
317
+ ### {Platform Layer} Testing
318
+ - **Tool**: {name and version}
319
+ - **Command**: {exact test command}
320
+ - **Output format**: {what the agent reads}
321
+ - **Failure format**: {what a failure looks like, parseable?}
322
+ - **Feedback loop latency**: {typical time}
323
+ - **Setup requirements**: {what must exist before tests run}
324
+ - **CI considerations**: {anything different in CI vs local}
325
+
326
+ ## Dependency Manifest
327
+
328
+ {The complete list of every direct dependency the new project will have, organized by purpose:}
329
+
330
+ ### Runtime Dependencies
331
+ | Package | Version | Purpose | Weekly Downloads | Last Updated |
332
+ |---------|---------|---------|-----------------|--------------|
333
+ | ... | ... | ... | ... | ... |
334
+
335
+ ### Development Dependencies
336
+ | Package | Version | Purpose | Weekly Downloads | Last Updated |
337
+ |---------|---------|---------|-----------------|--------------|
338
+ | ... | ... | ... | ... | ... |
339
+
340
+ ### System Prerequisites
341
+ | Tool | Version | Purpose | Install Command |
342
+ |------|---------|---------|----------------|
343
+ | ... | ... | ... | ... |
344
+
345
+ ## Compatibility Matrix
346
+
347
+ {Verify that all chosen tools work together:}
348
+
349
+ | Tool A | Tool B | Compatible? | Notes |
350
+ |--------|--------|-------------|-------|
351
+ | ... | ... | ... | ... |
352
+
353
+ {Flag any known friction points and how to resolve them.}
354
+
355
+ ## Environment Setup Script
356
+
357
+ {The exact sequence of commands to go from a bare machine to a working development environment. This must be copy-pasteable and require zero interactive prompts:}
358
+
359
+ ```bash
360
+ # Prerequisites
361
+ {commands to verify/install system dependencies}
362
+
363
+ # Project setup
364
+ {commands to clone, install, configure}
365
+
366
+ # Verification
367
+ {commands to run the full test suite and confirm everything works}
368
+ ```
369
+
370
+ {Expected total time for bootstrap. Expected output from verification step.}
371
+
372
+ ## Risk Assessment
373
+
374
+ {For each chosen tool, assess:}
375
+
376
+ ### {Tool Name}
377
+ - **Bus factor**: {how many maintainers? Is it a solo project?}
378
+ - **Funding**: {how is it funded? OSS volunteer, company-backed, foundation?}
379
+ - **Migration path**: {if this tool dies, what's the escape hatch?}
380
+ - **Breaking change history**: {how often do major versions break things?}
381
+
382
+ ## What We're Deliberately Not Using
383
+
384
+ {Equally important — tools we evaluated and rejected, with clear reasoning:}
385
+
386
+ ### {Tool Name}
387
+ - **What it does**: {one sentence}
388
+ - **Why people use it**: {the appeal}
389
+ - **Why we're not**: {specific dealbreaker for agent-first development}
390
+
391
+ ## The Complete `package.json`
392
+
393
+ {A realistic, complete package.json for the new project, showing exact versions of everything recommended:}
394
+
395
+ ```json
396
+ {
397
+ "name": "...",
398
+ "type": "module",
399
+ "scripts": {
400
+ "dev": "...",
401
+ "build": "...",
402
+ "test": "...",
403
+ "lint": "...",
404
+ "typecheck": "..."
405
+ },
406
+ "dependencies": { ... },
407
+ "devDependencies": { ... },
408
+ "engines": { ... }
409
+ }
410
+ ```
411
+
412
+ ## Summary
413
+
414
+ ### The Stack in One Paragraph
415
+ {Dense paragraph: runtime, build, test, lint, format — the full stack described in one breath. This is what you'd tell a colleague if they asked "what are you using?"}
416
+
417
+ ### Three Sentences
418
+ {First: what kind of stack this is. Second: what it's optimized for. Third: what makes it different from the obvious/default choices.}
419
+
420
+ ### The Decision Criterion
421
+ {One sentence: the single filter that drove every choice. Example: "Every tool was chosen by asking one question: can an AI agent, operating entirely through a terminal, use this tool to write, test, and ship code without ever needing a human to look at a screen?"}
422
+ ```
423
+
424
+ ## Research Rules
425
+
426
+ - **Look it up, don't recall it.** npm download counts, GitHub stars, contributor numbers, and release dates change. Use web search to get current numbers. Stale data leads to stale recommendations.
427
+ - **Test the CLI story.** For every tool, verify: what's the exact CLI command? What does the output look like? Is there a `--json` flag? Is there a `--reporter` option? If you can't find CLI documentation, that's a red flag.
428
+ - **Evaluate the error path.** The happy path is easy. What matters for agents is the failure path. Search for how the tool reports errors. Are they structured? Do they include file paths and line numbers? Or is it a stack trace with no actionable information?
429
+ - **Check real adoption, not hype.** A tool with 50M weekly downloads and 3 contributors is a different risk profile than a tool with 500K weekly downloads and 200 contributors. Both numbers matter. Neither alone tells the story.
430
+ - **Prefer tools with prior agent use.** If a tool has documented success in CI/CD pipelines, AI agent workflows, or headless environments — that's direct evidence it works for our use case. Weight it heavily.
431
+ - **Prefer tools the original used successfully.** If the original project used a tool and the research documents show it worked well, that's evidence. Don't change for the sake of changing. Change only when there's a specific improvement.
432
+ - **Beware of "almost CLI."** Many tools advertise CLI support but actually require an interactive GUI for meaningful use (IDEs with plugins, config wizards, drag-and-drop builders). Read the docs. Try the commands mentally. If the workflow requires a human to manually interact with a GUI — reject it. Note: tools that produce visual output (screenshots, rendered pages, charts) are fine as long as the agent can access that output programmatically — the agent can view screenshots and browser state via headless automation.
433
+ - **Version-pin everything.** Recommendations must include specific versions, not "latest." Agents need deterministic environments. A `^` in a version range is a bug waiting to happen.
434
+ - **Smaller is better.** Given two tools with equivalent capabilities, prefer the one with fewer dependencies, faster install, and smaller surface area. Every dependency is a potential failure point for an agent that can't "just restart and try again."
435
+ - **Respect the product constraints.** The stack must serve the product described in the pitch document. Don't recommend a database if the product doesn't need one. Don't recommend a web framework if the product is a CLI tool. Match the stack to the product, not to your preferences.
436
+
437
+ ## Quality Gates
438
+
439
+ Before finalizing, verify:
440
+ 1. The file starts with valid YAML frontmatter containing `deepResearchQuery` (a non-empty string, 3-8 sentences, specific to the actual tools recommended)
441
+ 2. Every recommended tool has real, current ecosystem data (downloads, stars, contributors) — not approximations or memories
442
+ 3. Every recommended tool has a documented CLI workflow with exact commands
443
+ 4. Every recommended tool's error output has been assessed for agent parseability
444
+ 5. The complete stack has been checked for compatibility conflicts
445
+ 6. The environment setup script is copy-pasteable and requires zero interactive prompts
446
+ 7. The `package.json` is valid and all versions are pinned
447
+ 8. Every recommendation includes at least two alternatives that were evaluated and rejected with specific reasons
448
+ 9. The feedback loop for each tool is measured in seconds, not minutes — document expected latency
449
+ 10. No tool requires a human to manually interact with a GUI for its primary workflow. Headless browser automation and screenshot-based verification are valid — manual GUI interaction is not
450
+ 11. The "What We're Deliberately Not Using" section is populated — every significant tool you rejected should be documented with reasoning
451
+ 12. The dependency manifest accounts for every direct dependency — no surprises when someone runs `install`
452
+ 13. The risk assessment covers bus factor and migration path for every critical tool — the agent must not depend on a tool that could vanish
453
+ 14. Cross-reference with the original project's unresolved problems — if a tool choice caused problems in the original, you must either avoid that tool or explain why the problem won't recur
454
+ 15. The deep research query is actionable — someone could paste it into a search engine or research tool and get useful results back
455
+
456
+ If any check fails, revise before returning.
457
+
458
+ ## Output
459
+
460
+ Output only raw markdown with YAML frontmatter. No preamble, no explanation, no commentary outside the document structure.
@@ -0,0 +1,48 @@
1
+ You are refining a technology stack recommendation using a deep research report that validates and enriches the original analysis. Your goal is to produce the **final version** of the technology stack document — same structure, same density, same rigor — but now grounded in external benchmarks, ecosystem data, and community evidence.
2
+
3
+ ## Context
4
+
5
+ - **Output File Path**: {outputPath}
6
+ - **Original source repository:** {sourceFullName} (Use a `gh` tool to look into issues)
7
+ - **Local checkout path:** {originalCheckoutPath}
8
+ - **Product name:** {productName}
9
+
10
+ **Input documents — read all before starting:**
11
+
12
+ - **Draft Technology Stack**: {technologyStackPath} — the initial stack recommendation you are refining. This is your starting structure.
13
+ - **Deep Research Report**: {deepResearchReportPath} — external validation, benchmarks, and ecosystem analysis. Use this to strengthen, correct, or nuance recommendations in the draft.
14
+ - **Research Summary**: {researchPath} — original project analysis.
15
+ - **Unresolved Problems**: {unresolvedProblemsPath} — gaps and flaws in the original.
16
+ - **Key Decisions**: {decisionsPath} — decision catalog from the original.
17
+ - **Product Pitch**: {productPitchPath} — the product we are building this stack for.
18
+
19
+ ## What to do
20
+
21
+ 1. **Read the draft stack.** This is your template. Preserve its structure, sections, and evaluation methodology.
22
+ 2. **Read the deep research report.** Extract:
23
+ - Benchmark data — if the report has performance comparisons for recommended tools, update the evaluations with real numbers
24
+ - Ecosystem updates — if adoption trends, maintainer activity, or migration waves are documented, incorporate them
25
+ - Agent compatibility evidence — if the report confirms (or questions) tool suitability for AI-agent workflows, update accordingly
26
+ - Known issues — if recent breaking changes, regressions, or community complaints are documented, address them in risk assessments
27
+ - Alternative tools — if the research surfaces tools the draft missed, evaluate and either add or document why they were rejected
28
+ 3. **Refine each section** using the research:
29
+ - Update ecosystem data (downloads, stars, contributors) with the latest numbers from the research
30
+ - Strengthen recommendations that the research supports with specific citations
31
+ - Reconsider recommendations that the research contradicts — change them if the evidence is compelling
32
+ - Update the comparison matrices with any new scoring data
33
+ - Revise risk assessments based on new bus factor, funding, or migration path information
34
+ 4. **Strip the frontmatter.** The draft stack has YAML frontmatter — do NOT include it in the final version. The final stack document is clean markdown, no frontmatter.
35
+
36
+ ## Rules
37
+
38
+ - **Same structure.** Do not add or remove top-level sections. The output must have the same headings as the draft.
39
+ - **Same rigor.** Same level of detail per evaluation. If the draft scored tools on 6 criteria, the final scores on 6 criteria.
40
+ - **Evidence over assertion.** Where the research provides benchmarks or data, cite it. Where it contradicts the draft, fix the draft.
41
+ - **Still honest about the original.** The deep research may reveal that the original's stack problems are even worse (or less bad) than we thought. Update accordingly.
42
+ - **Use the product name.** Use "{productName}" wherever the product is referenced by name.
43
+ - **Version-pin everything.** Update version recommendations if the research reveals newer stable versions or version-specific issues.
44
+ - **Banned words:** revolutionary, powerful, seamless, robust, cutting-edge, next-generation, best-in-class, blazing-fast, game-changing, disruptive, leverage.
45
+
46
+ ## Output
47
+
48
+ Output only raw markdown. No YAML frontmatter. No preamble, no explanation, no commentary outside the document structure.
@@ -1,4 +1,4 @@
1
- import type { Context } from "@/types";
1
+ import type { Context } from "../types.js";
2
2
  /**
3
3
  * Runs the ralph-loop workflow: ask goal, plan, execute, and review in 3 rounds.
4
4
  * Expects: ctx.projectPath is the repository root for execution and review writes.
@@ -1,8 +1,8 @@
1
- import { text } from "@text";
2
- import { ralphLoopExecute } from "@/_workflows/steps/ralphLoopExecute.js";
3
- import { ralphLoopPlanGenerate } from "@/_workflows/steps/ralphLoopPlanGenerate.js";
4
- import { ralphLoopReviewRound } from "@/_workflows/steps/ralphLoopReviewRound.js";
5
- import { promptInput } from "@/modules/prompt/promptInput.js";
1
+ import { text } from "../text/text.js";
2
+ import { ralphLoopExecute } from "../_workflows/steps/ralphLoopExecute.js";
3
+ import { ralphLoopPlanGenerate } from "../_workflows/steps/ralphLoopPlanGenerate.js";
4
+ import { ralphLoopReviewRound } from "../_workflows/steps/ralphLoopReviewRound.js";
5
+ import { promptInput } from "../modules/prompt/promptInput.js";
6
6
  /**
7
7
  * Runs the ralph-loop workflow: ask goal, plan, execute, and review in 3 rounds.
8
8
  * Expects: ctx.projectPath is the repository root for execution and review writes.
@@ -1,4 +1,4 @@
1
- import type { Context } from "@/types";
1
+ import type { Context } from "../types.js";
2
2
  /**
3
3
  * Runs the ralph workflow: plan with Opus, execute with codex-xhigh, and review with codex-high.
4
4
  * Expects: ctx.projectPath is repository root for execution and review write operations.
@@ -1,8 +1,8 @@
1
- import { text } from "@text";
2
- import { ralphExecute } from "@/_workflows/steps/ralphExecute.js";
3
- import { ralphPlan } from "@/_workflows/steps/ralphPlan.js";
4
- import { ralphReview } from "@/_workflows/steps/ralphReview.js";
5
- import { promptInput } from "@/modules/prompt/promptInput.js";
1
+ import { text } from "../text/text.js";
2
+ import { ralphExecute } from "../_workflows/steps/ralphExecute.js";
3
+ import { ralphPlan } from "../_workflows/steps/ralphPlan.js";
4
+ import { ralphReview } from "../_workflows/steps/ralphReview.js";
5
+ import { promptInput } from "../modules/prompt/promptInput.js";
6
6
  /**
7
7
  * Runs the ralph workflow: plan with Opus, execute with codex-xhigh, and review with codex-high.
8
8
  * Expects: ctx.projectPath is repository root for execution and review write operations.
@@ -1,4 +1,4 @@
1
- import type { Context } from "@/types";
1
+ import type { Context } from "../types.js";
2
2
  /**
3
3
  * Runs research, unresolved-problems, key-decisions, product pitch, and name generation.
4
4
  * Research and problems run in parallel; subsequent steps chain sequentially.
@@ -1,10 +1,10 @@
1
1
  import { readFile } from "node:fs/promises";
2
2
  import path from "node:path";
3
- import { beerLogLine, text } from "@text";
3
+ import { beerLogLine, text } from "../text/text.js";
4
4
  import matter from "gray-matter";
5
5
  import { z } from "zod";
6
- import { generateDocument } from "@/_workflows/steps/generateDocument.js";
7
- import { promptConfirm } from "@/modules/prompt/promptConfirm.js";
6
+ import { generateDocument } from "../_workflows/steps/generateDocument.js";
7
+ import { promptConfirm } from "../modules/prompt/promptConfirm.js";
8
8
  const deepResearchQuerySchema = z.object({
9
9
  deepResearchQuery: z.string().min(1)
10
10
  });
@@ -1,5 +1,5 @@
1
- import { type GeneratePermissions, type GenerateResult } from "@/modules/ai/generate.js";
2
- import type { Context } from "@/types";
1
+ import { type GeneratePermissions, type GenerateResult } from "../../modules/ai/generate.js";
2
+ import type { Context } from "../../types.js";
3
3
  export interface RunInferenceOptions extends GeneratePermissions {
4
4
  progressMessage: string;
5
5
  }
@@ -1,6 +1,6 @@
1
- import { text } from "@text";
2
- import { generateProgressMessageResolve } from "@/_workflows/steps/generateProgressMessageResolve.js";
3
- import { generate as generateAi } from "@/modules/ai/generate.js";
1
+ import { text } from "../../text/text.js";
2
+ import { generateProgressMessageResolve } from "../../_workflows/steps/generateProgressMessageResolve.js";
3
+ import { generate as generateAi } from "../../modules/ai/generate.js";
4
4
  /**
5
5
  * Runs inference for a workflow step using the provided context.
6
6
  * Expects: promptTemplate may include {{key}} placeholders from values and progressMessage is non-empty.
@@ -1,4 +1,4 @@
1
- import type { Context, ProviderModelSelectionMode } from "@/types";
1
+ import type { Context, ProviderModelSelectionMode } from "../../types.js";
2
2
  export interface GenerateCommitOptions {
3
3
  hint?: string;
4
4
  modelSelectionMode?: ProviderModelSelectionMode;
@@ -1,5 +1,5 @@
1
- import { text } from "@text";
2
- import { generate } from "@/_workflows/steps/generate.js";
1
+ import { text } from "../../text/text.js";
2
+ import { generate } from "../../_workflows/steps/generate.js";
3
3
  const promptTemplate = [
4
4
  "Generate one Angular-style git commit message.",
5
5
  "Return a single line only.",
@@ -1,5 +1,5 @@
1
- import { type GenerateFilePermissions } from "@/modules/ai/generateFile.js";
2
- import type { Context, ProviderModelSelectionMode } from "@/types";
1
+ import { type GenerateFilePermissions } from "../../modules/ai/generateFile.js";
2
+ import type { Context, ProviderModelSelectionMode } from "../../types.js";
3
3
  export type GenerateDocumentPromptId = "PROMPT_RESEARCH" | "PROMPT_RESEARCH_PROBLEMS" | "PROMPT_DECISIONS" | "PROMPT_PRODUCT_PITCH" | "PROMPT_PRODUCT_PITCH_FINAL" | "PROMPT_PRODUCT_NAME" | "PROMPT_TECHNOLOGY_STACK" | "PROMPT_TECHNOLOGY_STACK_FINAL" | "PROMPT_AGENTS_MD" | "PROMPT_PROJECT_BLUEPRINT";
4
4
  export interface GenerateDocumentInput {
5
5
  promptId: GenerateDocumentPromptId;
@@ -1,9 +1,9 @@
1
1
  import { readFileSync } from "node:fs";
2
2
  import path from "node:path";
3
3
  import { fileURLToPath } from "node:url";
4
- import { text } from "@text";
5
- import { generateProgressMessageResolve } from "@/_workflows/steps/generateProgressMessageResolve.js";
6
- import { generateFile } from "@/modules/ai/generateFile.js";
4
+ import { text } from "../../text/text.js";
5
+ import { generateProgressMessageResolve } from "../../_workflows/steps/generateProgressMessageResolve.js";
6
+ import { generateFile } from "../../modules/ai/generateFile.js";
7
7
  const promptsPath = path.join(path.dirname(fileURLToPath(import.meta.url)), "../prompts");
8
8
  const promptById = {
9
9
  PROMPT_RESEARCH: readFileSync(path.join(promptsPath, "PROMPT_RESEARCH.md"), "utf-8"),
@@ -1,6 +1,6 @@
1
1
  import type { ZodTypeAny } from "zod";
2
- import { type GenerateFilePermissions } from "@/modules/ai/generateFile.js";
3
- import type { Context } from "@/types";
2
+ import { type GenerateFilePermissions } from "../../modules/ai/generateFile.js";
3
+ import type { Context } from "../../types.js";
4
4
  export interface GenerateFrontmatterOptions extends Omit<GenerateFilePermissions, "verify"> {
5
5
  }
6
6
  export interface GenerateFrontmatterResult {