wiggum-cli 0.5.4 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +57 -12
- package/dist/ai/conversation/conversation-manager.d.ts +84 -0
- package/dist/ai/conversation/conversation-manager.d.ts.map +1 -0
- package/dist/ai/conversation/conversation-manager.js +159 -0
- package/dist/ai/conversation/conversation-manager.js.map +1 -0
- package/dist/ai/conversation/index.d.ts +8 -0
- package/dist/ai/conversation/index.d.ts.map +1 -0
- package/dist/ai/conversation/index.js +8 -0
- package/dist/ai/conversation/index.js.map +1 -0
- package/dist/ai/conversation/spec-generator.d.ts +62 -0
- package/dist/ai/conversation/spec-generator.d.ts.map +1 -0
- package/dist/ai/conversation/spec-generator.js +267 -0
- package/dist/ai/conversation/spec-generator.js.map +1 -0
- package/dist/ai/conversation/url-fetcher.d.ts +26 -0
- package/dist/ai/conversation/url-fetcher.d.ts.map +1 -0
- package/dist/ai/conversation/url-fetcher.js +145 -0
- package/dist/ai/conversation/url-fetcher.js.map +1 -0
- package/dist/cli.d.ts.map +1 -1
- package/dist/cli.js +44 -34
- package/dist/cli.js.map +1 -1
- package/dist/commands/init.d.ts +1 -0
- package/dist/commands/init.d.ts.map +1 -1
- package/dist/commands/init.js +12 -4
- package/dist/commands/init.js.map +1 -1
- package/dist/commands/new.d.ts +11 -1
- package/dist/commands/new.d.ts.map +1 -1
- package/dist/commands/new.js +102 -43
- package/dist/commands/new.js.map +1 -1
- package/dist/commands/run.js +3 -3
- package/dist/commands/run.js.map +1 -1
- package/dist/generator/config.d.ts.map +1 -1
- package/dist/generator/config.js +2 -0
- package/dist/generator/config.js.map +1 -1
- package/dist/repl/command-parser.d.ts +79 -0
- package/dist/repl/command-parser.d.ts.map +1 -0
- package/dist/repl/command-parser.js +107 -0
- package/dist/repl/command-parser.js.map +1 -0
- package/dist/repl/index.d.ts +8 -0
- package/dist/repl/index.d.ts.map +1 -0
- package/dist/repl/index.js +8 -0
- package/dist/repl/index.js.map +1 -0
- package/dist/repl/repl-loop.d.ts +30 -0
- package/dist/repl/repl-loop.d.ts.map +1 -0
- package/dist/repl/repl-loop.js +201 -0
- package/dist/repl/repl-loop.js.map +1 -0
- package/dist/repl/session-state.d.ts +35 -0
- package/dist/repl/session-state.d.ts.map +1 -0
- package/dist/repl/session-state.js +25 -0
- package/dist/repl/session-state.js.map +1 -0
- package/package.json +1 -1
- package/src/ai/conversation/conversation-manager.ts +230 -0
- package/src/ai/conversation/index.ts +23 -0
- package/src/ai/conversation/spec-generator.ts +327 -0
- package/src/ai/conversation/url-fetcher.ts +180 -0
- package/src/cli.ts +47 -34
- package/src/commands/init.ts +20 -4
- package/src/commands/new.ts +121 -44
- package/src/commands/run.ts +3 -3
- package/src/generator/config.ts +2 -0
- package/src/repl/command-parser.ts +149 -0
- package/src/repl/index.ts +23 -0
- package/src/repl/repl-loop.ts +269 -0
- package/src/repl/session-state.ts +59 -0
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Conversation Manager
|
|
3
|
+
* Manages multi-turn AI conversations for spec generation
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { generateText, streamText } from 'ai';
|
|
7
|
+
import { getModel, isReasoningModel, type AIProvider } from '../providers.js';
|
|
8
|
+
import type { ScanResult } from '../../scanner/types.js';
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Message type for AI SDK
|
|
12
|
+
*/
|
|
13
|
+
type AIMessage = {
|
|
14
|
+
role: 'user' | 'assistant' | 'system';
|
|
15
|
+
content: string;
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Conversation message
|
|
20
|
+
*/
|
|
21
|
+
export interface ConversationMessage {
|
|
22
|
+
role: 'user' | 'assistant' | 'system';
|
|
23
|
+
content: string;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Conversation context
|
|
28
|
+
*/
|
|
29
|
+
export interface ConversationContext {
|
|
30
|
+
codebaseSummary?: string;
|
|
31
|
+
references: Array<{ source: string; content: string }>;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Conversation manager options
|
|
36
|
+
*/
|
|
37
|
+
export interface ConversationManagerOptions {
|
|
38
|
+
provider: AIProvider;
|
|
39
|
+
model: string;
|
|
40
|
+
systemPrompt?: string;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Format scan result into a concise codebase summary
|
|
45
|
+
*/
|
|
46
|
+
function formatCodebaseSummary(scanResult: ScanResult): string {
|
|
47
|
+
const { stack } = scanResult;
|
|
48
|
+
|
|
49
|
+
const parts: string[] = [];
|
|
50
|
+
|
|
51
|
+
if (stack.framework) {
|
|
52
|
+
parts.push(`Framework: ${stack.framework.name}${stack.framework.version ? ` v${stack.framework.version}` : ''}`);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
if (stack.testing?.unit) {
|
|
56
|
+
parts.push(`Unit Testing: ${stack.testing.unit.name}`);
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
if (stack.testing?.e2e) {
|
|
60
|
+
parts.push(`E2E Testing: ${stack.testing.e2e.name}`);
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if (stack.styling) {
|
|
64
|
+
parts.push(`Styling: ${stack.styling.name}`);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
if (stack.packageManager) {
|
|
68
|
+
parts.push(`Package Manager: ${stack.packageManager.name}`);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
return parts.join('\n');
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Manages a multi-turn conversation with an AI model
|
|
76
|
+
*/
|
|
77
|
+
export class ConversationManager {
|
|
78
|
+
private messages: ConversationMessage[] = [];
|
|
79
|
+
private context: ConversationContext = { references: [] };
|
|
80
|
+
private readonly provider: AIProvider;
|
|
81
|
+
private readonly modelId: string;
|
|
82
|
+
private readonly systemPrompt: string;
|
|
83
|
+
|
|
84
|
+
constructor(options: ConversationManagerOptions) {
|
|
85
|
+
this.provider = options.provider;
|
|
86
|
+
this.modelId = options.model;
|
|
87
|
+
this.systemPrompt = options.systemPrompt || this.getDefaultSystemPrompt();
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
private getDefaultSystemPrompt(): string {
|
|
91
|
+
return `You are a helpful assistant that helps developers create feature specifications.
|
|
92
|
+
You ask clarifying questions to understand the user's requirements and then help generate a detailed specification.
|
|
93
|
+
Be concise but thorough. Focus on understanding the user's needs before proposing solutions.`;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Set codebase context from scan result
|
|
98
|
+
*/
|
|
99
|
+
setCodebaseContext(scanResult: ScanResult): void {
|
|
100
|
+
this.context.codebaseSummary = formatCodebaseSummary(scanResult);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Add a reference document to the context
|
|
105
|
+
*/
|
|
106
|
+
addReference(content: string, source: string): void {
|
|
107
|
+
this.context.references.push({ source, content });
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Clear all references
|
|
112
|
+
*/
|
|
113
|
+
clearReferences(): void {
|
|
114
|
+
this.context.references = [];
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
/**
|
|
118
|
+
* Get the current context as a string for inclusion in prompts
|
|
119
|
+
*/
|
|
120
|
+
private getContextString(): string {
|
|
121
|
+
const parts: string[] = [];
|
|
122
|
+
|
|
123
|
+
if (this.context.codebaseSummary) {
|
|
124
|
+
parts.push(`## Project Tech Stack\n${this.context.codebaseSummary}`);
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
if (this.context.references.length > 0) {
|
|
128
|
+
parts.push('## Reference Documents');
|
|
129
|
+
for (const ref of this.context.references) {
|
|
130
|
+
parts.push(`### ${ref.source}\n${ref.content}`);
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
return parts.join('\n\n');
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Build the full message array for the AI
|
|
139
|
+
*/
|
|
140
|
+
private buildMessages(): AIMessage[] {
|
|
141
|
+
const contextString = this.getContextString();
|
|
142
|
+
const fullSystemPrompt = contextString
|
|
143
|
+
? `${this.systemPrompt}\n\n${contextString}`
|
|
144
|
+
: this.systemPrompt;
|
|
145
|
+
|
|
146
|
+
const aiMessages: AIMessage[] = [
|
|
147
|
+
{ role: 'system', content: fullSystemPrompt },
|
|
148
|
+
];
|
|
149
|
+
|
|
150
|
+
for (const msg of this.messages) {
|
|
151
|
+
if (msg.role === 'user' || msg.role === 'assistant') {
|
|
152
|
+
aiMessages.push({ role: msg.role, content: msg.content });
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
return aiMessages;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Send a message and get a response
|
|
161
|
+
*/
|
|
162
|
+
async chat(userMessage: string): Promise<string> {
|
|
163
|
+
// Add user message to history
|
|
164
|
+
this.messages.push({ role: 'user', content: userMessage });
|
|
165
|
+
|
|
166
|
+
const { model } = getModel(this.provider, this.modelId);
|
|
167
|
+
const messages = this.buildMessages();
|
|
168
|
+
|
|
169
|
+
const result = await generateText({
|
|
170
|
+
model,
|
|
171
|
+
messages,
|
|
172
|
+
...(isReasoningModel(this.modelId) ? {} : { temperature: 0.7 }),
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
const assistantMessage = result.text;
|
|
176
|
+
|
|
177
|
+
// Add assistant response to history
|
|
178
|
+
this.messages.push({ role: 'assistant', content: assistantMessage });
|
|
179
|
+
|
|
180
|
+
return assistantMessage;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
/**
|
|
184
|
+
* Send a message and stream the response
|
|
185
|
+
*/
|
|
186
|
+
async *chatStream(userMessage: string): AsyncIterable<string> {
|
|
187
|
+
// Add user message to history
|
|
188
|
+
this.messages.push({ role: 'user', content: userMessage });
|
|
189
|
+
|
|
190
|
+
const { model } = getModel(this.provider, this.modelId);
|
|
191
|
+
const messages = this.buildMessages();
|
|
192
|
+
|
|
193
|
+
const result = streamText({
|
|
194
|
+
model,
|
|
195
|
+
messages,
|
|
196
|
+
...(isReasoningModel(this.modelId) ? {} : { temperature: 0.7 }),
|
|
197
|
+
});
|
|
198
|
+
|
|
199
|
+
let fullResponse = '';
|
|
200
|
+
|
|
201
|
+
for await (const textPart of result.textStream) {
|
|
202
|
+
fullResponse += textPart;
|
|
203
|
+
yield textPart;
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
// Add assistant response to history
|
|
207
|
+
this.messages.push({ role: 'assistant', content: fullResponse });
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
/**
|
|
211
|
+
* Get conversation history
|
|
212
|
+
*/
|
|
213
|
+
getHistory(): ConversationMessage[] {
|
|
214
|
+
return [...this.messages];
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
/**
|
|
218
|
+
* Clear conversation history
|
|
219
|
+
*/
|
|
220
|
+
clearHistory(): void {
|
|
221
|
+
this.messages = [];
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
/**
|
|
225
|
+
* Add a message to history without sending to AI
|
|
226
|
+
*/
|
|
227
|
+
addToHistory(message: ConversationMessage): void {
|
|
228
|
+
this.messages.push(message);
|
|
229
|
+
}
|
|
230
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Conversation Module
|
|
3
|
+
* AI-powered conversation and spec generation
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
export {
|
|
7
|
+
ConversationManager,
|
|
8
|
+
type ConversationMessage,
|
|
9
|
+
type ConversationContext,
|
|
10
|
+
type ConversationManagerOptions,
|
|
11
|
+
} from './conversation-manager.js';
|
|
12
|
+
|
|
13
|
+
export {
|
|
14
|
+
SpecGenerator,
|
|
15
|
+
type SpecGeneratorOptions,
|
|
16
|
+
} from './spec-generator.js';
|
|
17
|
+
|
|
18
|
+
export {
|
|
19
|
+
fetchContent,
|
|
20
|
+
fetchMultipleSources,
|
|
21
|
+
isUrl,
|
|
22
|
+
type FetchedContent,
|
|
23
|
+
} from './url-fetcher.js';
|
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Spec Generator
|
|
3
|
+
* AI-powered feature specification generator with interview flow
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import readline from 'node:readline';
|
|
7
|
+
import pc from 'picocolors';
|
|
8
|
+
import { ConversationManager } from './conversation-manager.js';
|
|
9
|
+
import { fetchContent, isUrl, type FetchedContent } from './url-fetcher.js';
|
|
10
|
+
import type { AIProvider } from '../providers.js';
|
|
11
|
+
import type { ScanResult } from '../../scanner/types.js';
|
|
12
|
+
import { simpson } from '../../utils/colors.js';
|
|
13
|
+
|
|
14
|
+
/** Maximum number of interview questions before auto-completing */
|
|
15
|
+
const MAX_INTERVIEW_QUESTIONS = 10;
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Spec generator options
|
|
19
|
+
*/
|
|
20
|
+
export interface SpecGeneratorOptions {
|
|
21
|
+
featureName: string;
|
|
22
|
+
projectRoot: string;
|
|
23
|
+
provider: AIProvider;
|
|
24
|
+
model: string;
|
|
25
|
+
scanResult?: ScanResult;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Generation phases
|
|
30
|
+
*/
|
|
31
|
+
type GeneratorPhase = 'context' | 'goals' | 'interview' | 'generation' | 'complete';
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Prompt for user input
|
|
35
|
+
*/
|
|
36
|
+
async function promptUser(prompt: string): Promise<string> {
|
|
37
|
+
const rl = readline.createInterface({
|
|
38
|
+
input: process.stdin,
|
|
39
|
+
output: process.stdout,
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
return new Promise((resolve) => {
|
|
43
|
+
rl.question(prompt, (answer) => {
|
|
44
|
+
rl.close();
|
|
45
|
+
resolve(answer.trim());
|
|
46
|
+
});
|
|
47
|
+
});
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Display streaming text
|
|
52
|
+
*/
|
|
53
|
+
async function displayStream(stream: AsyncIterable<string>): Promise<string> {
|
|
54
|
+
let fullText = '';
|
|
55
|
+
for await (const chunk of stream) {
|
|
56
|
+
process.stdout.write(chunk);
|
|
57
|
+
fullText += chunk;
|
|
58
|
+
}
|
|
59
|
+
console.log(''); // New line after stream
|
|
60
|
+
return fullText;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
const SPEC_SYSTEM_PROMPT = `You are an expert product manager and technical writer helping to create detailed feature specifications.
|
|
64
|
+
|
|
65
|
+
Your role is to:
|
|
66
|
+
1. Understand the user's feature goals through targeted questions
|
|
67
|
+
2. Identify edge cases and potential issues
|
|
68
|
+
3. Generate a comprehensive, actionable specification
|
|
69
|
+
|
|
70
|
+
When interviewing:
|
|
71
|
+
- Ask one focused question at a time
|
|
72
|
+
- Acknowledge answers before asking the next question
|
|
73
|
+
- Stop asking when you have enough information (usually 3-5 questions)
|
|
74
|
+
- Say "I have enough information to generate the spec" when ready
|
|
75
|
+
|
|
76
|
+
When generating the spec, use this format:
|
|
77
|
+
|
|
78
|
+
# [Feature Name] Feature Specification
|
|
79
|
+
|
|
80
|
+
**Status:** Planned
|
|
81
|
+
**Version:** 1.0
|
|
82
|
+
**Last Updated:** [date]
|
|
83
|
+
|
|
84
|
+
## Purpose
|
|
85
|
+
[Brief description]
|
|
86
|
+
|
|
87
|
+
## User Stories
|
|
88
|
+
- As a [user], I want [action] so that [benefit]
|
|
89
|
+
|
|
90
|
+
## Requirements
|
|
91
|
+
|
|
92
|
+
### Functional Requirements
|
|
93
|
+
- [ ] Requirement with clear acceptance criteria
|
|
94
|
+
|
|
95
|
+
### Non-Functional Requirements
|
|
96
|
+
- [ ] Performance, security, accessibility requirements
|
|
97
|
+
|
|
98
|
+
## Technical Notes
|
|
99
|
+
- Implementation approach
|
|
100
|
+
- Key dependencies
|
|
101
|
+
- Database changes if needed
|
|
102
|
+
|
|
103
|
+
## Acceptance Criteria
|
|
104
|
+
- [ ] Specific, testable conditions
|
|
105
|
+
|
|
106
|
+
## Out of Scope
|
|
107
|
+
- Items explicitly not included
|
|
108
|
+
`;
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* AI-powered spec generator with interview flow
|
|
112
|
+
*/
|
|
113
|
+
export class SpecGenerator {
|
|
114
|
+
private conversation: ConversationManager;
|
|
115
|
+
private phase: GeneratorPhase = 'context';
|
|
116
|
+
private readonly featureName: string;
|
|
117
|
+
private readonly projectRoot: string;
|
|
118
|
+
private generatedSpec: string = '';
|
|
119
|
+
|
|
120
|
+
constructor(options: SpecGeneratorOptions) {
|
|
121
|
+
this.featureName = options.featureName;
|
|
122
|
+
this.projectRoot = options.projectRoot;
|
|
123
|
+
|
|
124
|
+
this.conversation = new ConversationManager({
|
|
125
|
+
provider: options.provider,
|
|
126
|
+
model: options.model,
|
|
127
|
+
systemPrompt: SPEC_SYSTEM_PROMPT,
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
if (options.scanResult) {
|
|
131
|
+
this.conversation.setCodebaseContext(options.scanResult);
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Phase 1: Gather context from URLs/files
|
|
137
|
+
*/
|
|
138
|
+
private async gatherContext(): Promise<void> {
|
|
139
|
+
console.log('');
|
|
140
|
+
console.log(simpson.yellow('Context Gathering'));
|
|
141
|
+
console.log(pc.dim('Share any reference URLs or files (press Enter to skip):'));
|
|
142
|
+
console.log('');
|
|
143
|
+
|
|
144
|
+
while (true) {
|
|
145
|
+
const input = await promptUser(`${simpson.brown('ref>')} `);
|
|
146
|
+
|
|
147
|
+
if (!input) {
|
|
148
|
+
break;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
process.stdout.write(pc.dim('Fetching... '));
|
|
152
|
+
const result = await fetchContent(input, this.projectRoot);
|
|
153
|
+
|
|
154
|
+
if (result.error) {
|
|
155
|
+
console.log(pc.red(`Error: ${result.error}`));
|
|
156
|
+
} else {
|
|
157
|
+
this.conversation.addReference(result.content, result.source);
|
|
158
|
+
console.log(pc.green(`Added reference from ${result.source}${result.truncated ? ' (truncated)' : ''}`));
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
this.phase = 'goals';
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
* Phase 2: Discuss goals
|
|
167
|
+
*/
|
|
168
|
+
private async discussGoals(): Promise<void> {
|
|
169
|
+
console.log('');
|
|
170
|
+
console.log(simpson.yellow('Feature Goals'));
|
|
171
|
+
console.log(pc.dim('Describe what you want to build:'));
|
|
172
|
+
console.log('');
|
|
173
|
+
|
|
174
|
+
const goals = await promptUser(`${simpson.brown('goals>')} `);
|
|
175
|
+
|
|
176
|
+
if (!goals) {
|
|
177
|
+
console.log(pc.dim('No goals provided, using feature name as description.'));
|
|
178
|
+
this.conversation.addToHistory({
|
|
179
|
+
role: 'user',
|
|
180
|
+
content: `I want to create a feature called "${this.featureName}".`,
|
|
181
|
+
});
|
|
182
|
+
} else {
|
|
183
|
+
this.conversation.addToHistory({
|
|
184
|
+
role: 'user',
|
|
185
|
+
content: `I want to create a feature called "${this.featureName}". Here's what I'm thinking:\n\n${goals}`,
|
|
186
|
+
});
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
console.log('');
|
|
190
|
+
const response = await this.conversation.chat(
|
|
191
|
+
`The user wants to create a feature called "${this.featureName}". Acknowledge their goals and ask your first clarifying question to better understand the requirements.`
|
|
192
|
+
);
|
|
193
|
+
|
|
194
|
+
console.log(simpson.blue('AI:'), response);
|
|
195
|
+
console.log('');
|
|
196
|
+
|
|
197
|
+
this.phase = 'interview';
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* Phase 3: Conduct interview
|
|
202
|
+
*/
|
|
203
|
+
private async conductInterview(): Promise<void> {
|
|
204
|
+
console.log(simpson.yellow('Interview'));
|
|
205
|
+
console.log(pc.dim('Answer the questions (type "done" when ready to generate spec):'));
|
|
206
|
+
console.log('');
|
|
207
|
+
|
|
208
|
+
let questionCount = 0;
|
|
209
|
+
|
|
210
|
+
while (questionCount < MAX_INTERVIEW_QUESTIONS) {
|
|
211
|
+
const answer = await promptUser(`${simpson.brown('you>')} `);
|
|
212
|
+
|
|
213
|
+
if (answer.toLowerCase() === 'done' || answer.toLowerCase() === 'skip') {
|
|
214
|
+
break;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
if (!answer) {
|
|
218
|
+
console.log(pc.dim('(Press Enter again to skip, or type your answer)'));
|
|
219
|
+
const confirm = await promptUser(`${simpson.brown('you>')} `);
|
|
220
|
+
if (!confirm) {
|
|
221
|
+
break;
|
|
222
|
+
}
|
|
223
|
+
// Process the confirmation as the answer
|
|
224
|
+
console.log('');
|
|
225
|
+
const response = await this.conversation.chat(confirm);
|
|
226
|
+
console.log(simpson.blue('AI:'), response);
|
|
227
|
+
console.log('');
|
|
228
|
+
} else {
|
|
229
|
+
console.log('');
|
|
230
|
+
const response = await this.conversation.chat(answer);
|
|
231
|
+
console.log(simpson.blue('AI:'), response);
|
|
232
|
+
console.log('');
|
|
233
|
+
|
|
234
|
+
// Check if AI indicates it has enough information
|
|
235
|
+
if (
|
|
236
|
+
response.toLowerCase().includes('enough information') ||
|
|
237
|
+
response.toLowerCase().includes('ready to generate') ||
|
|
238
|
+
response.toLowerCase().includes("let me generate") ||
|
|
239
|
+
response.toLowerCase().includes("i'll now generate")
|
|
240
|
+
) {
|
|
241
|
+
break;
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
questionCount++;
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
this.phase = 'generation';
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
/**
|
|
252
|
+
* Phase 4: Generate spec
|
|
253
|
+
*/
|
|
254
|
+
private async generateSpec(): Promise<string> {
|
|
255
|
+
console.log('');
|
|
256
|
+
console.log(simpson.yellow('Generating Specification...'));
|
|
257
|
+
console.log('');
|
|
258
|
+
|
|
259
|
+
const prompt = `Based on our conversation, generate a complete feature specification for "${this.featureName}".
|
|
260
|
+
|
|
261
|
+
Use the format from your instructions. Be specific and actionable. Include:
|
|
262
|
+
- Clear user stories
|
|
263
|
+
- Detailed requirements with acceptance criteria
|
|
264
|
+
- Technical notes based on the project's tech stack
|
|
265
|
+
- Specific acceptance criteria that can be tested
|
|
266
|
+
|
|
267
|
+
Today's date is ${new Date().toISOString().split('T')[0]}.`;
|
|
268
|
+
|
|
269
|
+
const stream = this.conversation.chatStream(prompt);
|
|
270
|
+
this.generatedSpec = await displayStream(stream);
|
|
271
|
+
|
|
272
|
+
this.phase = 'complete';
|
|
273
|
+
return this.generatedSpec;
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
/**
|
|
277
|
+
* Run the full spec generation flow
|
|
278
|
+
* Returns the generated spec or null if cancelled
|
|
279
|
+
*/
|
|
280
|
+
async run(): Promise<string | null> {
|
|
281
|
+
try {
|
|
282
|
+
// Phase 1: Context gathering
|
|
283
|
+
await this.gatherContext();
|
|
284
|
+
|
|
285
|
+
// Phase 2: Goals discussion
|
|
286
|
+
await this.discussGoals();
|
|
287
|
+
|
|
288
|
+
// Phase 3: Interview
|
|
289
|
+
await this.conductInterview();
|
|
290
|
+
|
|
291
|
+
// Phase 4: Generate spec
|
|
292
|
+
const spec = await this.generateSpec();
|
|
293
|
+
|
|
294
|
+
return spec;
|
|
295
|
+
} catch (error) {
|
|
296
|
+
// Handle user cancellation (Ctrl+C, Ctrl+D, or readline closed)
|
|
297
|
+
if (error instanceof Error) {
|
|
298
|
+
const message = error.message.toLowerCase();
|
|
299
|
+
if (
|
|
300
|
+
message.includes('readline was closed') ||
|
|
301
|
+
message.includes('aborted') ||
|
|
302
|
+
message.includes('cancel')
|
|
303
|
+
) {
|
|
304
|
+
console.log('');
|
|
305
|
+
console.log(pc.dim('Spec generation cancelled.'));
|
|
306
|
+
return null;
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
// Re-throw unexpected errors
|
|
310
|
+
throw error;
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
/**
|
|
315
|
+
* Get the generated spec
|
|
316
|
+
*/
|
|
317
|
+
getSpec(): string {
|
|
318
|
+
return this.generatedSpec;
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
/**
|
|
322
|
+
* Get current phase
|
|
323
|
+
*/
|
|
324
|
+
getPhase(): GeneratorPhase {
|
|
325
|
+
return this.phase;
|
|
326
|
+
}
|
|
327
|
+
}
|