ace-interview-prep 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +87 -38
  2. package/dist/commands/dispute.js +226 -0
  3. package/dist/commands/feedback.js +57 -22
  4. package/dist/commands/generate.js +31 -13
  5. package/dist/commands/reset.js +60 -27
  6. package/dist/commands/score.js +41 -26
  7. package/dist/commands/setup.js +38 -6
  8. package/dist/commands/test.js +19 -11
  9. package/dist/index.js +33 -3
  10. package/dist/lib/categories.js +12 -4
  11. package/dist/lib/llm.js +13 -8
  12. package/dist/lib/scaffold.js +1 -11
  13. package/dist/lib/scorecard.js +32 -1
  14. package/dist/prompts/question-brainstorm.md +2 -2
  15. package/dist/prompts/question-generate.md +37 -10
  16. package/dist/prompts/test-dispute.md +54 -0
  17. package/dist/templates/web-components/Component.test.tsx.hbs +16 -0
  18. package/dist/templates/web-components/Component.tsx.hbs +29 -0
  19. package/package.json +1 -1
  20. package/dist/templates/web-components/component.test.ts.hbs +0 -11
  21. package/dist/templates/web-components/component.ts.hbs +0 -22
  22. package/dist/templates/web-components/index.html.hbs +0 -12
  23. package/questions/design-be/url-shortener/README.md +0 -23
  24. package/questions/design-be/url-shortener/notes.md +0 -27
  25. package/questions/design-be/url-shortener/scorecard.json +0 -1
  26. package/questions/design-fe/news-feed/README.md +0 -22
  27. package/questions/design-fe/news-feed/notes.md +0 -27
  28. package/questions/design-fe/news-feed/scorecard.json +0 -1
  29. package/questions/design-full/google-docs/README.md +0 -22
  30. package/questions/design-full/google-docs/notes.md +0 -27
  31. package/questions/design-full/google-docs/scorecard.json +0 -1
  32. package/questions/js-ts/debounce/README.md +0 -86
  33. package/questions/js-ts/debounce/scorecard.json +0 -9
  34. package/questions/js-ts/debounce/solution.test.ts +0 -128
  35. package/questions/js-ts/debounce/solution.ts +0 -4
  36. package/questions/leetcode-algo/two-sum/README.md +0 -58
  37. package/questions/leetcode-algo/two-sum/scorecard.json +0 -1
  38. package/questions/leetcode-algo/two-sum/solution.test.ts +0 -55
  39. package/questions/leetcode-algo/two-sum/solution.ts +0 -4
  40. package/questions/leetcode-ds/lru-cache/README.md +0 -70
  41. package/questions/leetcode-ds/lru-cache/scorecard.json +0 -1
  42. package/questions/leetcode-ds/lru-cache/solution.test.ts +0 -82
  43. package/questions/leetcode-ds/lru-cache/solution.ts +0 -14
  44. package/questions/react-apps/todo-app/App.test.tsx +0 -130
  45. package/questions/react-apps/todo-app/App.tsx +0 -10
  46. package/questions/react-apps/todo-app/README.md +0 -23
  47. package/questions/react-apps/todo-app/scorecard.json +0 -9
  48. package/questions/web-components/star-rating/README.md +0 -45
  49. package/questions/web-components/star-rating/component.test.ts +0 -64
  50. package/questions/web-components/star-rating/component.ts +0 -28
  51. package/questions/web-components/star-rating/index.html +0 -14
  52. package/questions/web-components/star-rating/scorecard.json +0 -9
package/README.md CHANGED
@@ -1,10 +1,9 @@
1
1
  # ace
2
2
 
3
3
  [![npm version](https://img.shields.io/npm/v/ace-interview-prep.svg)](https://www.npmjs.com/package/ace-interview-prep)
4
- [![CI](https://github.com/neel/ace-interview-prep/actions/workflows/ci.yml/badge.svg)](https://github.com/neel/ace-interview-prep/actions/workflows/ci.yml)
5
4
  [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE)
6
5
 
7
- A CLI tool for staff-engineer-level frontend interview preparation. Scaffolds questions with test cases, tracks progress with scorecards, and provides LLM-powered feedback.
6
+ A CLI tool for interview prep focusing on frontend. Scaffolds questions with test cases, tracks progress with scorecards, and provides LLM-powered feedback.
8
7
 
9
8
  ## Install
10
9
 
@@ -12,12 +11,6 @@ A CLI tool for staff-engineer-level frontend interview preparation. Scaffolds qu
12
11
  npm install -g ace-interview-prep
13
12
  ```
14
13
 
15
- Or run directly with npx:
16
-
17
- ```bash
18
- npx ace-interview-prep help
19
- ```
20
-
21
14
  ## Quick Start
22
15
 
23
16
  ### 1. Configure API Keys
@@ -27,30 +20,50 @@ ace setup
27
20
  ```
28
21
 
29
22
  Stores your OpenAI / Anthropic API keys in `~/.ace/config.json` (one-time, works across all workspaces).
23
+ If both keys are valid, `ace setup` prompts you to choose a default provider (`openai` or `anthropic`).
30
24
 
31
25
  ```bash
32
26
  # Non-interactive
33
27
  ace setup --openai-key sk-... --anthropic-key sk-ant-...
28
+
29
+ # Set default provider explicitly when both keys are configured
30
+ ace setup --openai-key sk-... --anthropic-key sk-ant-... --default-provider anthropic
34
31
  ```
35
32
 
36
33
  ### 2. Initialize a Workspace
37
34
 
38
- Navigate to any folder where you want to practice:
35
+ Navigate to any folder where you want to practice, then run:
39
36
 
40
37
  ```bash
41
38
  ace init
42
39
  ```
43
40
 
44
- Creates a `questions/` directory and vitest config files. Then install the test dependencies:
41
+ `ace init` bootstraps the workspace and installs dependencies for you. It:
42
+
43
+ ```bash
44
+ # Creates/updates workspace files
45
+ # - questions/
46
+ # - package.json (adds test scripts + devDependencies if missing)
47
+ # - tsconfig.json
48
+ # - vitest.config.ts
49
+ # - vitest.setup.ts
50
+ #
51
+ # Installs dependencies automatically via npm install
52
+ ```
53
+
54
+ If you need to regenerate workspace files:
45
55
 
46
56
  ```bash
47
- npm install vitest happy-dom @testing-library/jest-dom
57
+ ace init --force
48
58
  ```
49
59
 
50
60
  ### 3. Practice
51
61
 
52
62
  ```bash
53
- # Generate a question via LLM
63
+ # Generate a question interactively (prompts for category, difficulty, topic)
64
+ ace generate
65
+
66
+ # Or pass flags to skip prompts
54
67
  ace generate --topic "debounce" --category js-ts --difficulty medium
55
68
 
56
69
  # Interactive brainstorm mode
@@ -61,43 +74,77 @@ ace add
61
74
 
62
75
  # List all questions
63
76
  ace list
77
+ ```
78
+
79
+ ### 4. Test, Review, Track
64
80
 
81
+ All commands below work in three modes:
82
+
83
+ - **Interactive** — run with no arguments to pick from a selectable list
84
+ - **Direct** — pass a slug to target a specific question
85
+ - **All** — pass `--all` to operate on every question
86
+
87
+ ```bash
65
88
  # Run tests
66
- ace test debounce
67
- ace test # run all
68
- ace test --watch # watch mode
89
+ ace test # pick from list
90
+ ace test debounce # specific question
91
+ ace test --all # run all tests
92
+ ace test --watch # watch mode (with --all)
69
93
 
70
- # Get LLM feedback
71
- ace feedback debounce
94
+ # Get LLM feedback on your solution
95
+ ace feedback # pick from list
96
+ ace feedback debounce # specific question
97
+ ace feedback --all # review all questions (confirms each one)
72
98
 
73
99
  # View scorecard
74
- ace score debounce
100
+ ace score # pick from list
101
+ ace score debounce # specific question
102
+ ace score --all # show all scorecards
103
+
104
+ # Reset a question to its stub
105
+ ace reset # pick from list
106
+ ace reset debounce # specific question
107
+ ace reset --all # reset everything (with confirmation)
108
+ ```
109
+
110
+ ### 5. Dispute Potentially Incorrect Tests
111
+
112
+ Use this when your implementation appears correct but a generated test assertion might be wrong.
75
113
 
76
- # Reset a question
77
- ace reset debounce
114
+ ```bash
115
+ # Dispute interactively (pick a question)
116
+ ace dispute
117
+
118
+ # Dispute a specific question
119
+ ace dispute debounce
120
+
121
+ # Optional: force a provider for dispute analysis
122
+ ace dispute debounce --provider anthropic
78
123
  ```
79
124
 
125
+ If the verdict says the test is incorrect (or ambiguous), ace can apply a corrected test file and re-run tests.
126
+
80
127
  ## Question Categories
81
128
 
82
- | Category | Slug | Type |
83
- |----------|------|------|
84
- | JS/TS Puzzles | `js-ts` | Coding |
85
- | Web Components | `web-components` | Coding |
86
- | React Web Apps | `react-apps` | Coding |
87
- | LeetCode Data Structures | `leetcode-ds` | Coding |
88
- | LeetCode Algorithms | `leetcode-algo` | Coding |
89
- | System Design — Frontend | `design-fe` | Design |
90
- | System Design — Backend | `design-be` | Design |
91
- | System Design — Full Stack | `design-full` | Design |
129
+ | Category | Slug | Type | Focus |
130
+ |----------|------|------|-------|
131
+ | JS/TS Puzzles | `js-ts` | Coding | Closures, async patterns, type utilities |
132
+ | React Components | `web-components` | Coding | Props, events, composition, reusable UI |
133
+ | React Web Apps | `react-apps` | Coding | Hooks, state, routing, full features |
134
+ | LeetCode Data Structures | `leetcode-ds` | Coding | Trees, graphs, heaps, hash maps |
135
+ | LeetCode Algorithms | `leetcode-algo` | Coding | DP, greedy, two pointers, sorting |
136
+ | System Design — Frontend | `design-fe` | Design | Component architecture, state, rendering |
137
+ | System Design — Backend | `design-be` | Design | APIs, databases, caching, queues |
138
+ | System Design — Full Stack | `design-full` | Design | End-to-end systems, trade-offs |
92
139
 
93
140
  ## How It Works
94
141
 
95
- 1. **Pick a question** from the dashboard (`ace list`) or generate one (`ace generate`).
142
+ 1. **Generate a question** run `ace generate` and follow the prompts (category, difficulty, topic), or use `ace generate --brainstorm` for an interactive design session.
96
143
  2. **Open the question folder** — read `README.md` for the problem statement.
97
- 3. **Write your solution** in the solution file (`solution.ts`, `App.tsx`, `component.ts`, or `notes.md`).
98
- 4. **Run tests** with `ace test <slug>` to check your work.
99
- 5. **Get feedback** with `ace feedback <slug>` for an LLM-powered code or design review.
100
- 6. **Track progress** with `ace score <slug>` and `ace list`.
144
+ 3. **Write your solution** in the solution file (`solution.ts`, `App.tsx`, `Component.tsx`, or `notes.md`).
145
+ 4. **Run tests** with `ace test` to pick a question and check your work.
146
+ 5. **Get feedback** with `ace feedback` for an LLM-powered code or design review.
147
+ 6. **Track progress** with `ace score` and `ace list`.
101
148
 
102
149
  ## Configuration
103
150
 
@@ -107,11 +154,13 @@ ace reset debounce
107
154
  - `~/.ace/.env` — fallback (dotenv format)
108
155
  - Environment variables — final fallback
109
156
 
110
- **Workspace** — each workspace gets its own `questions/` directory and test config.
157
+ Typical `~/.ace/config.json` keys:
111
158
 
112
- ## Seed Questions
159
+ - `OPENAI_API_KEY`
160
+ - `ANTHROPIC_API_KEY`
161
+ - `default_provider` (set automatically or via `ace setup --default-provider ...`)
113
162
 
114
- Ships with 8 starter questions (one per category) so you can begin practicing immediately after install.
163
+ **Workspace** each workspace gets its own `questions/` directory and test config.
115
164
 
116
165
  ## Development
117
166
 
@@ -0,0 +1,226 @@
1
+ import fs from "node:fs";
2
+ import path from "node:path";
3
+ import { spawnSync } from "node:child_process";
4
+ import prompts from "prompts";
5
+ import chalk from "chalk";
6
+ import { findQuestion, readScorecard, writeScorecard, promptForSlug } from "../lib/scorecard.js";
7
+ import { CATEGORIES, isDesignCategory } from "../lib/categories.js";
8
+ import { chat, requireProvider } from "../lib/llm.js";
9
+ import { resolveWorkspaceRoot, isWorkspaceInitialized } from "../lib/paths.js";
10
+ const PROMPTS_DIR = path.resolve(import.meta.dirname, "../prompts");
11
+ function loadPrompt(name) {
12
+ return fs.readFileSync(path.join(PROMPTS_DIR, name), "utf-8");
13
+ }
14
+ function extractJSON(text) {
15
+ const match = text.match(/```json\s*([\s\S]*?)```/);
16
+ if (match) return match[1].trim();
17
+ const jsonMatch = text.match(/\{[\s\S]*\}/);
18
+ if (jsonMatch) return jsonMatch[0];
19
+ return text;
20
+ }
21
+ function parseArgs(args) {
22
+ let slug;
23
+ let provider;
24
+ for (let i = 0; i < args.length; i++) {
25
+ const arg = args[i];
26
+ if (arg === "--provider" && args[i + 1]) {
27
+ provider = args[++i];
28
+ } else if (!arg.startsWith("--")) {
29
+ slug = arg;
30
+ }
31
+ }
32
+ return { slug, provider };
33
+ }
34
+ function runTestsAndCapture(projectRoot, questionDir) {
35
+ const result = spawnSync("npx", ["vitest", "run", questionDir], {
36
+ cwd: projectRoot,
37
+ encoding: "utf-8"
38
+ });
39
+ const stdout = result.stdout || "";
40
+ const stderr = result.stderr || "";
41
+ const output = [stdout, stderr].filter(Boolean).join("\n");
42
+ const exitCode = typeof result.status === "number" ? result.status : 1;
43
+ return { output, exitCode };
44
+ }
45
+ async function run(args) {
46
+ const projectRoot = resolveWorkspaceRoot();
47
+ if (!isWorkspaceInitialized(projectRoot)) {
48
+ console.error(chalk.red("\nError: Workspace not initialized."));
49
+ console.error(chalk.dim("Run `ace init` in this folder first.\n"));
50
+ process.exit(1);
51
+ }
52
+ const parsed = parseArgs(args);
53
+ let selectedSlug = parsed.slug;
54
+ if (!selectedSlug) {
55
+ selectedSlug = await promptForSlug();
56
+ if (!selectedSlug) return;
57
+ }
58
+ const question = findQuestion(selectedSlug);
59
+ if (!question) {
60
+ console.error(chalk.red(`Question not found: ${selectedSlug}`));
61
+ return;
62
+ }
63
+ if (isDesignCategory(question.category)) {
64
+ console.log(chalk.yellow(`"${selectedSlug}" is a system design question \u2014 no tests to dispute.`));
65
+ return;
66
+ }
67
+ const config = CATEGORIES[question.category];
68
+ const readmePath = path.join(question.dir, "README.md");
69
+ const readme = fs.existsSync(readmePath) ? fs.readFileSync(readmePath, "utf-8") : "";
70
+ if (!readme.trim()) {
71
+ console.error(chalk.red("No README.md found for this question."));
72
+ return;
73
+ }
74
+ let solutionContent = "";
75
+ for (const f of config.solutionFiles) {
76
+ const fp = path.join(question.dir, f);
77
+ if (fs.existsSync(fp)) {
78
+ const content = fs.readFileSync(fp, "utf-8");
79
+ solutionContent += `
80
+ --- ${f} ---
81
+ ${content}
82
+ `;
83
+ }
84
+ }
85
+ if (!solutionContent.trim() || solutionContent.includes("// TODO: implement")) {
86
+ console.error(chalk.yellow("Solution appears to be the default stub. Write your solution first!"));
87
+ return;
88
+ }
89
+ let testContent = "";
90
+ let testFilePath = "";
91
+ for (const f of config.testFiles) {
92
+ const fp = path.join(question.dir, f);
93
+ if (fs.existsSync(fp)) {
94
+ testContent += fs.readFileSync(fp, "utf-8");
95
+ testFilePath = fp;
96
+ }
97
+ }
98
+ if (!testContent.trim()) {
99
+ console.error(chalk.red("No test file found for this question."));
100
+ return;
101
+ }
102
+ console.log(chalk.cyan(`
103
+ Running tests for "${selectedSlug}" to capture failures...
104
+ `));
105
+ const initialRun = runTestsAndCapture(projectRoot, question.dir);
106
+ const testOutput = initialRun.output;
107
+ if (initialRun.exitCode === 0) {
108
+ console.log(chalk.green("All tests are passing \u2014 nothing to dispute."));
109
+ return;
110
+ }
111
+ const provider = requireProvider(parsed.provider);
112
+ console.log(chalk.yellow("\nFailing tests detected. Sending to LLM for analysis...\n"));
113
+ const systemPrompt = loadPrompt("test-dispute.md");
114
+ const userContent = `## Problem Statement
115
+ ${readme}
116
+
117
+ ## Solution Code
118
+ ${solutionContent}
119
+
120
+ ## Test File
121
+ ${testContent}
122
+
123
+ ## Test Failure Output
124
+ \`\`\`
125
+ ${testOutput}
126
+ \`\`\``;
127
+ const messages = [
128
+ { role: "system", content: systemPrompt },
129
+ { role: "user", content: userContent }
130
+ ];
131
+ const spinner = ["|", "/", "-", "\\"];
132
+ let spinIdx = 0;
133
+ const interval = setInterval(() => {
134
+ process.stdout.write(`\r${chalk.cyan(spinner[spinIdx++ % spinner.length])} Analyzing...`);
135
+ }, 120);
136
+ let response;
137
+ try {
138
+ response = await chat(provider, messages, true);
139
+ } finally {
140
+ clearInterval(interval);
141
+ process.stdout.write("\r" + " ".repeat(30) + "\r");
142
+ }
143
+ let result;
144
+ try {
145
+ result = JSON.parse(extractJSON(response));
146
+ } catch {
147
+ console.error(chalk.red("Failed to parse LLM response."));
148
+ console.log(chalk.dim(response));
149
+ return;
150
+ }
151
+ const verdictColors = {
152
+ test_incorrect: chalk.green,
153
+ solution_incorrect: chalk.red,
154
+ ambiguous: chalk.yellow
155
+ };
156
+ const verdictLabels = {
157
+ test_incorrect: "Test is incorrect",
158
+ solution_incorrect: "Solution has a bug",
159
+ ambiguous: "Ambiguous specification"
160
+ };
161
+ const color = verdictColors[result.verdict] || chalk.white;
162
+ const label = verdictLabels[result.verdict] || result.verdict;
163
+ console.log(chalk.bold(`
164
+ Verdict: ${color(label)}
165
+ `));
166
+ console.log(result.summary);
167
+ console.log(chalk.dim("\n--- Details ---\n"));
168
+ console.log(result.details);
169
+ if (result.failingTests?.length) {
170
+ console.log(chalk.dim("\n--- Per-Test Breakdown ---\n"));
171
+ for (const t of result.failingTests) {
172
+ const tColor = verdictColors[t.verdict] || chalk.white;
173
+ console.log(` ${tColor("\u25CF")} ${chalk.bold(t.testName)}: ${tColor(verdictLabels[t.verdict] || t.verdict)}`);
174
+ console.log(` ${t.explanation}`);
175
+ if (t.fixedAssertion) {
176
+ console.log(chalk.dim(` Fix: ${t.fixedAssertion}`));
177
+ }
178
+ }
179
+ }
180
+ if ((result.verdict === "test_incorrect" || result.verdict === "ambiguous") && result.fixedTestCode) {
181
+ console.log("");
182
+ const { confirm } = await prompts({
183
+ type: "confirm",
184
+ name: "confirm",
185
+ message: "Apply the corrected test file?",
186
+ initial: true
187
+ });
188
+ if (confirm) {
189
+ fs.writeFileSync(testFilePath, result.fixedTestCode, "utf-8");
190
+ console.log(chalk.green(`
191
+ Test file updated: ${path.relative(projectRoot, testFilePath)}`));
192
+ console.log(chalk.cyan("\nRe-running tests to verify...\n"));
193
+ const verifyRun = runTestsAndCapture(projectRoot, question.dir);
194
+ const verifyOutput = verifyRun.output;
195
+ console.log(verifyOutput);
196
+ const scorecard = readScorecard(question.category, selectedSlug);
197
+ if (scorecard) {
198
+ const passMatch = verifyOutput.match(/(\d+)\s+passed/);
199
+ const failMatch = verifyOutput.match(/(\d+)\s+failed/);
200
+ const passed = passMatch ? parseInt(passMatch[1], 10) : 0;
201
+ const failed = failMatch ? parseInt(failMatch[1], 10) : 0;
202
+ const total = passed + failed;
203
+ if (total > 0) {
204
+ if (scorecard.attempts.length === 0) {
205
+ scorecard.attempts.push({ attempt: 1, testsTotal: 0, testsPassed: 0, llmScore: null });
206
+ }
207
+ const current = scorecard.attempts[scorecard.attempts.length - 1];
208
+ current.testsTotal = total;
209
+ current.testsPassed = passed;
210
+ scorecard.status = passed === total ? "solved" : "attempted";
211
+ writeScorecard(question.category, selectedSlug, scorecard);
212
+ const resultColor = passed === total ? chalk.green : chalk.yellow;
213
+ console.log(resultColor(`Scorecard updated: ${passed}/${total} tests passed`));
214
+ }
215
+ }
216
+ } else {
217
+ console.log(chalk.dim("No changes made."));
218
+ }
219
+ } else if (result.verdict === "solution_incorrect" && result.hint) {
220
+ console.log(chalk.dim("\n--- Hint ---\n"));
221
+ console.log(chalk.yellow(result.hint));
222
+ }
223
+ }
224
+ export {
225
+ run
226
+ };
@@ -1,7 +1,8 @@
1
1
  import fs from "node:fs";
2
2
  import path from "node:path";
3
+ import prompts from "prompts";
3
4
  import chalk from "chalk";
4
- import { findQuestion, readScorecard, writeScorecard } from "../lib/scorecard.js";
5
+ import { findQuestion, readScorecard, writeScorecard, getAllQuestions, promptForSlug } from "../lib/scorecard.js";
5
6
  import { CATEGORIES, isDesignCategory } from "../lib/categories.js";
6
7
  import { chatStream, requireProvider } from "../lib/llm.js";
7
8
  import { resolveWorkspaceRoot, isWorkspaceInitialized } from "../lib/paths.js";
@@ -12,35 +13,25 @@ function loadPrompt(name) {
12
13
  function parseArgs(args) {
13
14
  let slug;
14
15
  let provider;
16
+ let all = false;
15
17
  for (let i = 0; i < args.length; i++) {
16
18
  const arg = args[i];
17
19
  if (arg === "--provider" && args[i + 1]) {
18
20
  provider = args[++i];
21
+ } else if (arg === "--all" || arg === "all") {
22
+ all = true;
19
23
  } else if (!arg.startsWith("--")) {
20
24
  slug = arg;
21
25
  }
22
26
  }
23
- return { slug, provider };
27
+ return { slug, provider, all };
24
28
  }
25
- async function run(args) {
26
- const root = resolveWorkspaceRoot();
27
- if (!isWorkspaceInitialized(root)) {
28
- console.error(chalk.red("\nError: Workspace not initialized."));
29
- console.error(chalk.dim("Run `ace init` in this folder first.\n"));
30
- process.exit(1);
31
- }
32
- const parsed = parseArgs(args);
33
- if (!parsed.slug) {
34
- console.error(chalk.red("Missing question slug."));
35
- console.error(chalk.dim("Usage: npm run ace feedback <slug>"));
36
- return;
37
- }
38
- const question = findQuestion(parsed.slug);
29
+ async function runFeedbackForSlug(slug, provider) {
30
+ const question = findQuestion(slug);
39
31
  if (!question) {
40
- console.error(chalk.red(`Question not found: ${parsed.slug}`));
32
+ console.error(chalk.red(`Question not found: ${slug}`));
41
33
  return;
42
34
  }
43
- const provider = requireProvider(parsed.provider);
44
35
  const config = CATEGORIES[question.category];
45
36
  const isDesign = isDesignCategory(question.category);
46
37
  const readmePath = path.join(question.dir, "README.md");
@@ -53,7 +44,7 @@ async function run(args) {
53
44
  const notes = fs.existsSync(notesPath) ? fs.readFileSync(notesPath, "utf-8") : "";
54
45
  if (!notes.trim() || notes.includes("<!-- List the core features")) {
55
46
  console.error(chalk.yellow("Notes file appears to be empty. Write your design notes first!"));
56
- console.error(chalk.dim(`Edit: questions/${question.category}/${parsed.slug}/notes.md`));
47
+ console.error(chalk.dim(`Edit: questions/${question.category}/${slug}/notes.md`));
57
48
  return;
58
49
  }
59
50
  const designSubType = question.category === "design-fe" ? "frontend" : question.category === "design-be" ? "backend" : "full-stack";
@@ -102,7 +93,7 @@ ${solutionContent}
102
93
  ${testContent}`;
103
94
  }
104
95
  console.log(chalk.cyan(`
105
- --- LLM ${isDesign ? "Design" : "Code"} Review: ${parsed.slug} ---`));
96
+ --- LLM ${isDesign ? "Design" : "Code"} Review: ${slug} ---`));
106
97
  console.log(chalk.dim(`Provider: ${provider}
107
98
  `));
108
99
  const messages = [
@@ -116,7 +107,7 @@ ${testContent}`;
116
107
  fullResponse += chunk;
117
108
  }
118
109
  console.log("\n");
119
- const scorecard = readScorecard(question.category, parsed.slug);
110
+ const scorecard = readScorecard(question.category, slug);
120
111
  if (scorecard) {
121
112
  scorecard.llmFeedback = fullResponse;
122
113
  const scoreMatch = fullResponse.match(/Overall.*?(\d+(?:\.\d+)?)\s*\/\s*5/i);
@@ -124,10 +115,54 @@ ${testContent}`;
124
115
  const lastAttempt = scorecard.attempts[scorecard.attempts.length - 1];
125
116
  lastAttempt.llmScore = parseFloat(scoreMatch[1]);
126
117
  }
127
- writeScorecard(question.category, parsed.slug, scorecard);
118
+ writeScorecard(question.category, slug, scorecard);
128
119
  console.log(chalk.dim("Feedback saved to scorecard."));
129
120
  }
130
121
  }
122
+ async function run(args) {
123
+ const root = resolveWorkspaceRoot();
124
+ if (!isWorkspaceInitialized(root)) {
125
+ console.error(chalk.red("\nError: Workspace not initialized."));
126
+ console.error(chalk.dim("Run `ace init` in this folder first.\n"));
127
+ process.exit(1);
128
+ }
129
+ const parsed = parseArgs(args);
130
+ const provider = requireProvider(parsed.provider);
131
+ if (parsed.all) {
132
+ const questions = getAllQuestions();
133
+ if (questions.length === 0) {
134
+ console.log(chalk.yellow("No questions found. Create one first with `ace generate` or `ace add`."));
135
+ return;
136
+ }
137
+ console.log(chalk.cyan(`
138
+ Running feedback for ${questions.length} question(s)...
139
+ `));
140
+ for (let i = 0; i < questions.length; i++) {
141
+ const q = questions[i];
142
+ console.log(chalk.bold(`
143
+ [${i + 1}/${questions.length}] ${q.slug}`));
144
+ const { confirm } = await prompts({
145
+ type: "confirm",
146
+ name: "confirm",
147
+ message: `Run feedback for "${q.slug}"?`,
148
+ initial: true
149
+ });
150
+ if (!confirm) {
151
+ console.log(chalk.dim("Skipped."));
152
+ continue;
153
+ }
154
+ await runFeedbackForSlug(q.slug, provider);
155
+ }
156
+ console.log(chalk.green("\nCompleted feedback for all questions."));
157
+ return;
158
+ }
159
+ let selectedSlug = parsed.slug;
160
+ if (!selectedSlug) {
161
+ selectedSlug = await promptForSlug();
162
+ if (!selectedSlug) return;
163
+ }
164
+ await runFeedbackForSlug(selectedSlug, provider);
165
+ }
131
166
  export {
132
167
  run
133
168
  };
@@ -56,6 +56,9 @@ Question type: ${categoryConfig.type}`;
56
56
  return;
57
57
  }
58
58
  const slug = parsed.slug || slugify(parsed.title || topic);
59
+ if (parsed.solutionCode) {
60
+ console.log(chalk.dim("Note: Discarded LLM solutionCode; using signature-based stub."));
61
+ }
59
62
  const questionDir = scaffoldQuestion({
60
63
  title: parsed.title || topic,
61
64
  slug,
@@ -63,8 +66,7 @@ Question type: ${categoryConfig.type}`;
63
66
  difficulty,
64
67
  description: parsed.description || "",
65
68
  signature: parsed.signature,
66
- testCode: parsed.testCode,
67
- solutionCode: parsed.solutionCode
69
+ testCode: parsed.testCode
68
70
  });
69
71
  console.log(chalk.green(`
70
72
  Created: questions/${category}/${slug}/`));
@@ -116,7 +118,11 @@ async function brainstormMode(provider) {
116
118
  type: "select",
117
119
  name: "category",
118
120
  message: "Which category?",
119
- choices: CATEGORY_SLUGS.map((s) => ({ title: CATEGORIES[s].name, value: s }))
121
+ choices: CATEGORY_SLUGS.map((s) => ({
122
+ title: CATEGORIES[s].name,
123
+ description: CATEGORIES[s].hint,
124
+ value: s
125
+ }))
120
126
  });
121
127
  const { difficulty } = await prompts({
122
128
  type: "select",
@@ -158,6 +164,9 @@ ${brainstormSummary}`
158
164
  return;
159
165
  }
160
166
  const slug = parsed.slug || slugify(parsed.title || "brainstorm-question");
167
+ if (parsed.solutionCode) {
168
+ console.log(chalk.dim("Note: Discarded LLM solutionCode; using signature-based stub."));
169
+ }
161
170
  const questionDir = scaffoldQuestion({
162
171
  title: parsed.title,
163
172
  slug,
@@ -165,8 +174,7 @@ ${brainstormSummary}`
165
174
  difficulty,
166
175
  description: parsed.description || "",
167
176
  signature: parsed.signature,
168
- testCode: parsed.testCode,
169
- solutionCode: parsed.solutionCode
177
+ testCode: parsed.testCode
170
178
  });
171
179
  console.log(chalk.green(`
172
180
  Created: questions/${category}/${slug}/`));
@@ -187,22 +195,23 @@ async function run(args) {
187
195
  await brainstormMode(provider);
188
196
  return;
189
197
  }
190
- if (!parsed.topic) {
191
- console.error(chalk.red("Missing --topic. Use --brainstorm for interactive mode."));
192
- console.error(chalk.dim('Example: npm run ace generate -- --topic "debounce" --category js-ts --difficulty medium'));
193
- return;
194
- }
195
198
  let category = parsed.category;
196
199
  let difficulty = parsed.difficulty;
200
+ let topic = parsed.topic;
197
201
  if (!category) {
198
202
  const result = await prompts({
199
203
  type: "select",
200
204
  name: "category",
201
205
  message: "Which category?",
202
- choices: CATEGORY_SLUGS.map((s) => ({ title: CATEGORIES[s].name, value: s }))
206
+ choices: CATEGORY_SLUGS.map((s) => ({
207
+ title: CATEGORIES[s].name,
208
+ description: CATEGORIES[s].hint,
209
+ value: s
210
+ }))
203
211
  });
204
212
  category = result.category;
205
213
  }
214
+ if (!category) return;
206
215
  if (!difficulty) {
207
216
  const result = await prompts({
208
217
  type: "select",
@@ -216,8 +225,17 @@ async function run(args) {
216
225
  });
217
226
  difficulty = result.difficulty;
218
227
  }
219
- if (!category || !difficulty) return;
220
- await directMode(provider, parsed.topic, category, difficulty);
228
+ if (!difficulty) return;
229
+ if (!topic) {
230
+ const result = await prompts({
231
+ type: "text",
232
+ name: "topic",
233
+ message: "What topic do you want to practice?"
234
+ });
235
+ topic = result.topic;
236
+ }
237
+ if (!topic) return;
238
+ await directMode(provider, topic, category, difficulty);
221
239
  }
222
240
  export {
223
241
  run