task-o-matic 0.0.7 ā 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +86 -23
- package/dist/commands/benchmark.d.ts +3 -0
- package/dist/commands/benchmark.d.ts.map +1 -0
- package/dist/commands/benchmark.js +227 -0
- package/dist/commands/prd.d.ts.map +1 -1
- package/dist/commands/prd.js +203 -9
- package/dist/commands/workflow.d.ts.map +1 -1
- package/dist/commands/workflow.js +452 -331
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -0
- package/dist/lib/ai-service/ai-operations.d.ts +5 -0
- package/dist/lib/ai-service/ai-operations.d.ts.map +1 -1
- package/dist/lib/ai-service/ai-operations.js +167 -0
- package/dist/lib/benchmark/registry.d.ts +11 -0
- package/dist/lib/benchmark/registry.d.ts.map +1 -0
- package/dist/lib/benchmark/registry.js +78 -0
- package/dist/lib/benchmark/runner.d.ts +6 -0
- package/dist/lib/benchmark/runner.d.ts.map +1 -0
- package/dist/lib/benchmark/runner.js +150 -0
- package/dist/lib/benchmark/storage.d.ts +13 -0
- package/dist/lib/benchmark/storage.d.ts.map +1 -0
- package/dist/lib/benchmark/storage.js +99 -0
- package/dist/lib/benchmark/types.d.ts +54 -0
- package/dist/lib/benchmark/types.d.ts.map +1 -0
- package/dist/lib/benchmark/types.js +2 -0
- package/dist/lib/index.d.ts +9 -0
- package/dist/lib/index.d.ts.map +1 -1
- package/dist/lib/index.js +7 -1
- package/dist/lib/prompt-registry.d.ts.map +1 -1
- package/dist/lib/prompt-registry.js +23 -0
- package/dist/prompts/index.d.ts +7 -6
- package/dist/prompts/index.d.ts.map +1 -1
- package/dist/prompts/index.js +1 -0
- package/dist/prompts/prd-question.d.ts +3 -0
- package/dist/prompts/prd-question.d.ts.map +1 -0
- package/dist/prompts/prd-question.js +40 -0
- package/dist/services/benchmark.d.ts +12 -0
- package/dist/services/benchmark.d.ts.map +1 -0
- package/dist/services/benchmark.js +18 -0
- package/dist/services/prd.d.ts +25 -0
- package/dist/services/prd.d.ts.map +1 -1
- package/dist/services/prd.js +188 -28
- package/dist/services/workflow.d.ts +85 -0
- package/dist/services/workflow.d.ts.map +1 -0
- package/dist/services/workflow.js +363 -0
- package/dist/types/index.d.ts +3 -0
- package/dist/types/index.d.ts.map +1 -1
- package/dist/types/options.d.ts +2 -1
- package/dist/types/options.d.ts.map +1 -1
- package/dist/types/options.js +16 -0
- package/dist/types/workflow-options.d.ts +45 -0
- package/dist/types/workflow-options.d.ts.map +1 -0
- package/dist/types/workflow-options.js +2 -0
- package/dist/types/workflow-results.d.ts +55 -0
- package/dist/types/workflow-results.d.ts.map +1 -0
- package/dist/types/workflow-results.js +2 -0
- package/package.json +1 -1
|
@@ -1,5 +1,38 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
"use strict";
|
|
3
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
4
|
+
if (k2 === undefined) k2 = k;
|
|
5
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
6
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
7
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
8
|
+
}
|
|
9
|
+
Object.defineProperty(o, k2, desc);
|
|
10
|
+
}) : (function(o, m, k, k2) {
|
|
11
|
+
if (k2 === undefined) k2 = k;
|
|
12
|
+
o[k2] = m[k];
|
|
13
|
+
}));
|
|
14
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
15
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
16
|
+
}) : function(o, v) {
|
|
17
|
+
o["default"] = v;
|
|
18
|
+
});
|
|
19
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
20
|
+
var ownKeys = function(o) {
|
|
21
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
22
|
+
var ar = [];
|
|
23
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
24
|
+
return ar;
|
|
25
|
+
};
|
|
26
|
+
return ownKeys(o);
|
|
27
|
+
};
|
|
28
|
+
return function (mod) {
|
|
29
|
+
if (mod && mod.__esModule) return mod;
|
|
30
|
+
var result = {};
|
|
31
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
32
|
+
__setModuleDefault(result, mod);
|
|
33
|
+
return result;
|
|
34
|
+
};
|
|
35
|
+
})();
|
|
3
36
|
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
4
37
|
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
5
38
|
};
|
|
@@ -9,27 +42,84 @@ const commander_1 = require("commander");
|
|
|
9
42
|
const chalk_1 = __importDefault(require("chalk"));
|
|
10
43
|
const fs_1 = require("fs");
|
|
11
44
|
const path_1 = require("path");
|
|
12
|
-
const
|
|
13
|
-
const better_t_stack_cli_1 = require("../lib/better-t-stack-cli");
|
|
45
|
+
const workflow_1 = require("../services/workflow");
|
|
14
46
|
const prd_1 = require("../services/prd");
|
|
15
|
-
const
|
|
16
|
-
const workflow_ai_assistant_1 = require("../services/workflow-ai-assistant");
|
|
47
|
+
const inquirer_1 = __importDefault(require("inquirer"));
|
|
17
48
|
const workflow_prompts_1 = require("../utils/workflow-prompts");
|
|
18
49
|
const streaming_options_1 = require("../utils/streaming-options");
|
|
19
50
|
const progress_1 = require("../cli/display/progress");
|
|
20
51
|
exports.workflowCommand = new commander_1.Command("workflow")
|
|
21
52
|
.description("Interactive workflow for complete project setup and task management")
|
|
53
|
+
// Existing AI options
|
|
22
54
|
.option("--stream", "Show streaming AI output")
|
|
23
55
|
.option("--ai-provider <provider>", "AI provider override")
|
|
24
56
|
.option("--ai-model <model>", "AI model override")
|
|
25
57
|
.option("--ai-key <key>", "AI API key override")
|
|
26
|
-
.
|
|
58
|
+
.option("--ai-provider-url <url>", "AI provider URL override")
|
|
59
|
+
// Global workflow control
|
|
60
|
+
.option("--skip-all", "Skip all optional steps (use defaults)")
|
|
61
|
+
.option("--auto-accept", "Auto-accept all AI suggestions")
|
|
62
|
+
.option("--config-file <path>", "Load workflow options from JSON file")
|
|
63
|
+
// Step 1: Initialize
|
|
64
|
+
.option("--skip-init", "Skip initialization step")
|
|
65
|
+
.option("--project-name <name>", "Project name")
|
|
66
|
+
.option("--init-method <method>", "Initialization method: quick, custom, ai")
|
|
67
|
+
.option("--project-description <desc>", "Project description for AI-assisted init")
|
|
68
|
+
.option("--use-existing-config", "Use existing configuration if found")
|
|
69
|
+
.option("--frontend <framework>", "Frontend framework")
|
|
70
|
+
.option("--backend <framework>", "Backend framework")
|
|
71
|
+
.option("--database <db>", "Database choice")
|
|
72
|
+
.option("--auth", "Include authentication")
|
|
73
|
+
.option("--no-auth", "Exclude authentication")
|
|
74
|
+
.option("--bootstrap", "Bootstrap with Better-T-Stack")
|
|
75
|
+
.option("--no-bootstrap", "Skip bootstrapping")
|
|
76
|
+
// Step 2: Define PRD
|
|
77
|
+
.option("--skip-prd", "Skip PRD definition")
|
|
78
|
+
.option("--prd-method <method>", "PRD method: upload, manual, ai, skip")
|
|
79
|
+
.option("--prd-file <path>", "Path to existing PRD file")
|
|
80
|
+
.option("--prd-description <desc>", "Product description for AI-assisted PRD")
|
|
81
|
+
.option("--prd-content <content>", "Direct PRD content")
|
|
82
|
+
// Step 2.5: PRD Question/Refine (NEW)
|
|
83
|
+
.option("--skip-prd-question-refine", "Skip PRD question/refine step")
|
|
84
|
+
.option("--prd-question-refine", "Use question-based PRD refinement")
|
|
85
|
+
.option("--prd-answer-mode <mode>", "Who answers questions: user, ai")
|
|
86
|
+
.option("--prd-answer-ai-provider <provider>", "AI provider for answering (optional override)")
|
|
87
|
+
.option("--prd-answer-ai-model <model>", "AI model for answering (optional override)")
|
|
88
|
+
.option("--prd-answer-ai-reasoning", "Enable reasoning for AI answering model (if supported)")
|
|
89
|
+
// Step 3: Refine PRD
|
|
90
|
+
.option("--skip-refine", "Skip PRD refinement")
|
|
91
|
+
.option("--refine-method <method>", "Refinement method: manual, ai, skip")
|
|
92
|
+
.option("--refine-feedback <feedback>", "Feedback for AI refinement")
|
|
93
|
+
// Step 4: Generate Tasks
|
|
94
|
+
.option("--skip-generate", "Skip task generation")
|
|
95
|
+
.option("--generate-method <method>", "Generation method: standard, ai")
|
|
96
|
+
.option("--generate-instructions <instructions>", "Custom task generation instructions")
|
|
97
|
+
// Step 5: Split Tasks
|
|
98
|
+
.option("--skip-split", "Skip task splitting")
|
|
99
|
+
.option("--split-tasks <ids>", "Comma-separated task IDs to split")
|
|
100
|
+
.option("--split-all", "Split all tasks")
|
|
101
|
+
.option("--split-method <method>", "Split method: interactive, standard, custom")
|
|
102
|
+
.option("--split-instructions <instructions>", "Custom split instructions")
|
|
103
|
+
.action(async (cliOptions) => {
|
|
27
104
|
try {
|
|
105
|
+
// Load and merge options from config file if specified
|
|
106
|
+
const options = await loadWorkflowOptions(cliOptions);
|
|
28
107
|
console.log(chalk_1.default.blue.bold("\nš Task-O-Matic Interactive Workflow\n"));
|
|
108
|
+
// Show automation status
|
|
109
|
+
if (options.configFile) {
|
|
110
|
+
console.log(chalk_1.default.cyan(`š Using config: ${options.configFile}`));
|
|
111
|
+
}
|
|
112
|
+
if (options.skipAll) {
|
|
113
|
+
console.log(chalk_1.default.yellow("ā” Fast mode: Skipping all optional steps"));
|
|
114
|
+
}
|
|
115
|
+
if (options.autoAccept) {
|
|
116
|
+
console.log(chalk_1.default.yellow("ā Auto-accepting all AI suggestions"));
|
|
117
|
+
}
|
|
29
118
|
console.log(chalk_1.default.gray("This wizard will guide you through:"));
|
|
30
119
|
console.log(chalk_1.default.gray(" 1. Project initialization & bootstrap"));
|
|
31
120
|
console.log(chalk_1.default.gray(" 2. PRD definition"));
|
|
32
|
-
console.log(chalk_1.default.gray("
|
|
121
|
+
console.log(chalk_1.default.gray(" 2.5. PRD question & refinement (optional)"));
|
|
122
|
+
console.log(chalk_1.default.gray(" 3. PRD manual refinement (optional)"));
|
|
33
123
|
console.log(chalk_1.default.gray(" 4. Task generation"));
|
|
34
124
|
console.log(chalk_1.default.gray(" 5. Task splitting\n"));
|
|
35
125
|
const state = {
|
|
@@ -37,23 +127,19 @@ exports.workflowCommand = new commander_1.Command("workflow")
|
|
|
37
127
|
currentStep: "initialize",
|
|
38
128
|
projectDir: process.cwd(),
|
|
39
129
|
};
|
|
40
|
-
// Store AI options for later use
|
|
41
|
-
const aiOptions = {
|
|
42
|
-
aiProvider: options.aiProvider,
|
|
43
|
-
aiModel: options.aiModel,
|
|
44
|
-
aiKey: options.aiKey,
|
|
45
|
-
};
|
|
46
130
|
const streamingOptions = (0, streaming_options_1.createStreamingOptions)(options.stream, "Workflow");
|
|
47
131
|
// Step 1: Initialize/Bootstrap
|
|
48
|
-
await stepInitialize(state,
|
|
132
|
+
await stepInitialize(state, options, streamingOptions);
|
|
49
133
|
// Step 2: Define PRD
|
|
50
|
-
await stepDefinePRD(state,
|
|
134
|
+
await stepDefinePRD(state, options, streamingOptions);
|
|
135
|
+
// Step 2.5: PRD Question/Refine (NEW)
|
|
136
|
+
await stepPRDQuestionRefine(state, options, streamingOptions);
|
|
51
137
|
// Step 3: Refine PRD
|
|
52
|
-
await stepRefinePRD(state,
|
|
138
|
+
await stepRefinePRD(state, options, streamingOptions);
|
|
53
139
|
// Step 4: Generate Tasks
|
|
54
|
-
await stepGenerateTasks(state,
|
|
140
|
+
await stepGenerateTasks(state, options, streamingOptions);
|
|
55
141
|
// Step 5: Split Tasks
|
|
56
|
-
await stepSplitTasks(state,
|
|
142
|
+
await stepSplitTasks(state, options, streamingOptions);
|
|
57
143
|
// Complete
|
|
58
144
|
state.currentStep = "complete";
|
|
59
145
|
console.log(chalk_1.default.green.bold("\nā
Workflow Complete!\n"));
|
|
@@ -68,412 +154,417 @@ exports.workflowCommand = new commander_1.Command("workflow")
|
|
|
68
154
|
process.exit(1);
|
|
69
155
|
}
|
|
70
156
|
});
|
|
157
|
+
/**
|
|
158
|
+
* Load and merge workflow options from config file if specified
|
|
159
|
+
*/
|
|
160
|
+
async function loadWorkflowOptions(cliOptions) {
|
|
161
|
+
let options = { ...cliOptions };
|
|
162
|
+
if (cliOptions.configFile) {
|
|
163
|
+
try {
|
|
164
|
+
const configPath = (0, path_1.resolve)(cliOptions.configFile);
|
|
165
|
+
const { existsSync, readFileSync } = await Promise.resolve().then(() => __importStar(require("fs")));
|
|
166
|
+
if (existsSync(configPath)) {
|
|
167
|
+
const configContent = readFileSync(configPath, "utf-8");
|
|
168
|
+
const fileOptions = JSON.parse(configContent);
|
|
169
|
+
// CLI options override file options
|
|
170
|
+
options = { ...fileOptions, ...cliOptions };
|
|
171
|
+
console.log(chalk_1.default.green(`ā Loaded workflow config from ${configPath}`));
|
|
172
|
+
}
|
|
173
|
+
else {
|
|
174
|
+
console.log(chalk_1.default.yellow(`ā Config file not found: ${configPath}`));
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
catch (error) {
|
|
178
|
+
console.log(chalk_1.default.red(`ā Failed to load config file: ${error}`));
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
return options;
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* Helper to get pre-answered value or prompt user
|
|
185
|
+
*/
|
|
186
|
+
async function getOrPrompt(preAnswered, promptFn, skipCondition = false) {
|
|
187
|
+
if (skipCondition) {
|
|
188
|
+
throw new Error("Step skipped");
|
|
189
|
+
}
|
|
190
|
+
if (preAnswered !== undefined) {
|
|
191
|
+
return preAnswered;
|
|
192
|
+
}
|
|
193
|
+
return promptFn();
|
|
194
|
+
}
|
|
71
195
|
/**
|
|
72
196
|
* Step 1: Initialize/Bootstrap
|
|
197
|
+
* Uses workflowService.initializeProject()
|
|
73
198
|
*/
|
|
74
|
-
async function stepInitialize(state,
|
|
199
|
+
async function stepInitialize(state, options, streamingOptions) {
|
|
75
200
|
console.log(chalk_1.default.blue.bold("\nš¦ Step 1: Project Initialization\n"));
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
const useExisting = await (0, workflow_prompts_1.confirmPrompt)("Use existing configuration?", true);
|
|
82
|
-
if (useExisting) {
|
|
83
|
-
state.initialized = true;
|
|
84
|
-
state.currentStep = "define-prd";
|
|
85
|
-
return;
|
|
86
|
-
}
|
|
201
|
+
if (options.skipInit) {
|
|
202
|
+
console.log(chalk_1.default.yellow("ā Skipping initialization (--skip-init)"));
|
|
203
|
+
state.initialized = false;
|
|
204
|
+
state.currentStep = "define-prd";
|
|
205
|
+
return;
|
|
87
206
|
}
|
|
88
|
-
const shouldInitialize = await (0, workflow_prompts_1.confirmPrompt)("Initialize a new task-o-matic project?", true);
|
|
207
|
+
const shouldInitialize = await getOrPrompt(options.skipInit === false ? true : undefined, () => (0, workflow_prompts_1.confirmPrompt)("Initialize a new task-o-matic project?", true));
|
|
89
208
|
if (!shouldInitialize) {
|
|
90
209
|
console.log(chalk_1.default.yellow("ā Skipping initialization"));
|
|
91
210
|
state.initialized = false;
|
|
92
211
|
state.currentStep = "define-prd";
|
|
93
212
|
return;
|
|
94
213
|
}
|
|
95
|
-
const projectName = await (0, workflow_prompts_1.textInputPrompt)("What is the name of your project?", "my-app");
|
|
96
|
-
//
|
|
97
|
-
const
|
|
98
|
-
if (!(0, fs_1.existsSync)(projectDir)) {
|
|
99
|
-
(0, fs_1.mkdirSync)(projectDir, { recursive: true });
|
|
100
|
-
console.log(chalk_1.default.green(`\nā Created directory: ${projectName}`));
|
|
101
|
-
}
|
|
102
|
-
else {
|
|
103
|
-
console.log(chalk_1.default.yellow(`\nā Directory ${projectName} already exists`));
|
|
104
|
-
}
|
|
105
|
-
console.log(chalk_1.default.cyan(` š Switching to project directory: ${projectDir}\n`));
|
|
106
|
-
process.chdir(projectDir);
|
|
107
|
-
config_1.configManager.setWorkingDirectory(projectDir);
|
|
108
|
-
state.projectDir = projectDir;
|
|
109
|
-
// Initialize task-o-matic in the NEW directory
|
|
110
|
-
console.log(chalk_1.default.cyan(" Initializing task-o-matic...\n"));
|
|
111
|
-
const newTaskOMaticDir = (0, path_1.join)(projectDir, ".task-o-matic");
|
|
112
|
-
if (!(0, fs_1.existsSync)(newTaskOMaticDir)) {
|
|
113
|
-
(0, fs_1.mkdirSync)(newTaskOMaticDir, { recursive: true });
|
|
114
|
-
["tasks", "prd", "logs"].forEach((dir) => {
|
|
115
|
-
(0, fs_1.mkdirSync)((0, path_1.join)(newTaskOMaticDir, dir), { recursive: true });
|
|
116
|
-
});
|
|
117
|
-
}
|
|
118
|
-
// AI Configuration Step - ALWAYS ask for this first
|
|
119
|
-
console.log(chalk_1.default.blue.bold("\nš¤ Step 1.1: AI Configuration\n"));
|
|
120
|
-
const aiProvider = await (0, workflow_prompts_1.selectPrompt)("Select AI Provider:", [
|
|
121
|
-
{ name: "OpenRouter", value: "openrouter" },
|
|
122
|
-
{ name: "Anthropic", value: "anthropic" },
|
|
123
|
-
{ name: "OpenAI", value: "openai" },
|
|
124
|
-
{ name: "Custom (e.g. local LLM)", value: "custom" },
|
|
125
|
-
]);
|
|
126
|
-
let aiProviderUrl;
|
|
127
|
-
if (aiProvider === "custom") {
|
|
128
|
-
aiProviderUrl = await (0, workflow_prompts_1.textInputPrompt)("Enter Custom Provider URL:", "http://localhost:11434/v1");
|
|
129
|
-
}
|
|
130
|
-
const defaultModel = aiProvider === "openrouter"
|
|
131
|
-
? "anthropic/claude-3.5-sonnet"
|
|
132
|
-
: aiProvider === "anthropic"
|
|
133
|
-
? "claude-3-5-sonnet-20240620"
|
|
134
|
-
: aiProvider === "openai"
|
|
135
|
-
? "gpt-4o"
|
|
136
|
-
: "llama3";
|
|
137
|
-
const aiModel = await (0, workflow_prompts_1.textInputPrompt)("Enter AI Model:", defaultModel);
|
|
138
|
-
// Check/Ask for API Key
|
|
139
|
-
const providerKeyName = aiProvider === "openai"
|
|
140
|
-
? "OPENAI_API_KEY"
|
|
141
|
-
: aiProvider === "anthropic"
|
|
142
|
-
? "ANTHROPIC_API_KEY"
|
|
143
|
-
: aiProvider === "openrouter"
|
|
144
|
-
? "OPENROUTER_API_KEY"
|
|
145
|
-
: "AI_API_KEY";
|
|
146
|
-
// Check if key exists in current env
|
|
147
|
-
let apiKey = process.env[providerKeyName];
|
|
148
|
-
if (!apiKey) {
|
|
149
|
-
console.log(chalk_1.default.yellow(`\nā ļø No API key found for ${aiProvider}`));
|
|
150
|
-
apiKey = await (0, workflow_prompts_1.textInputPrompt)(`Enter your ${aiProvider} API Key:`);
|
|
151
|
-
}
|
|
152
|
-
// Save AI Config to .env immediately in the new project dir
|
|
153
|
-
const envPath = (0, path_1.join)(projectDir, ".env");
|
|
154
|
-
let envContent = "";
|
|
155
|
-
if ((0, fs_1.existsSync)(envPath)) {
|
|
156
|
-
envContent = (0, fs_1.readFileSync)(envPath, "utf-8");
|
|
157
|
-
}
|
|
158
|
-
if (!envContent.includes("AI_PROVIDER=")) {
|
|
159
|
-
envContent += `AI_PROVIDER=${aiProvider}\n`;
|
|
160
|
-
}
|
|
161
|
-
if (!envContent.includes("AI_MODEL=")) {
|
|
162
|
-
envContent += `AI_MODEL=${aiModel}\n`;
|
|
163
|
-
}
|
|
164
|
-
if (aiProviderUrl && !envContent.includes("AI_PROVIDER_URL=")) {
|
|
165
|
-
envContent += `AI_PROVIDER_URL=${aiProviderUrl}\n`;
|
|
166
|
-
}
|
|
167
|
-
if (!envContent.includes(`${providerKeyName}=`)) {
|
|
168
|
-
envContent += `${providerKeyName}=${apiKey}\n`;
|
|
169
|
-
}
|
|
170
|
-
(0, fs_1.writeFileSync)(envPath, envContent);
|
|
171
|
-
// Update process.env for immediate use
|
|
172
|
-
process.env.AI_PROVIDER = aiProvider;
|
|
173
|
-
process.env.AI_MODEL = aiModel;
|
|
174
|
-
process.env[providerKeyName] = apiKey;
|
|
175
|
-
if (aiProviderUrl) {
|
|
176
|
-
process.env.AI_PROVIDER_URL = aiProviderUrl;
|
|
177
|
-
}
|
|
178
|
-
// Update ConfigManager
|
|
179
|
-
config_1.configManager.setAIConfig({
|
|
180
|
-
provider: aiProvider,
|
|
181
|
-
model: aiModel,
|
|
182
|
-
apiKey: apiKey,
|
|
183
|
-
baseURL: aiProviderUrl,
|
|
184
|
-
});
|
|
185
|
-
console.log(chalk_1.default.green("ā AI Configuration saved"));
|
|
186
|
-
// Stack Configuration Step
|
|
187
|
-
console.log(chalk_1.default.blue.bold("\nš¦ Step 1.2: Stack Configuration\n"));
|
|
188
|
-
// Choose initialization method
|
|
189
|
-
let initMethod = await (0, workflow_prompts_1.selectPrompt)("How would you like to configure your project stack?", [
|
|
214
|
+
const projectName = await getOrPrompt(options.projectName, () => (0, workflow_prompts_1.textInputPrompt)("What is the name of your project?", "my-app"));
|
|
215
|
+
// Determine initialization method
|
|
216
|
+
const initMethod = await getOrPrompt(options.initMethod, () => (0, workflow_prompts_1.selectPrompt)("How would you like to configure your project stack?", [
|
|
190
217
|
{ name: "Quick start (recommended defaults)", value: "quick" },
|
|
191
218
|
{ name: "Custom configuration", value: "custom" },
|
|
192
219
|
{ name: "AI-assisted (describe your project)", value: "ai" },
|
|
193
|
-
]);
|
|
194
|
-
let
|
|
220
|
+
]));
|
|
221
|
+
let projectDescription;
|
|
195
222
|
if (initMethod === "ai") {
|
|
196
|
-
|
|
197
|
-
const description = await (0, workflow_prompts_1.textInputPrompt)("Describe your project (e.g., 'A SaaS app for team collaboration with real-time features'):");
|
|
198
|
-
console.log(chalk_1.default.gray("\n Analyzing your requirements...\n"));
|
|
199
|
-
config = await workflow_ai_assistant_1.workflowAIAssistant.assistInitConfig({
|
|
200
|
-
userDescription: description,
|
|
201
|
-
aiOptions: {
|
|
202
|
-
aiProvider,
|
|
203
|
-
aiModel,
|
|
204
|
-
aiKey: apiKey,
|
|
205
|
-
aiProviderUrl,
|
|
206
|
-
},
|
|
207
|
-
streamingOptions,
|
|
208
|
-
});
|
|
209
|
-
// Override AI's project name with user's choice
|
|
210
|
-
config.projectName = projectName;
|
|
211
|
-
// Override AI config with what we just set
|
|
212
|
-
config.aiProvider = aiProvider;
|
|
213
|
-
config.aiModel = aiModel;
|
|
214
|
-
console.log(chalk_1.default.green("\nā AI Recommendations:"));
|
|
215
|
-
console.log(chalk_1.default.gray(` Project: ${config.projectName}`));
|
|
216
|
-
console.log(chalk_1.default.gray(` Frontend: ${config.frontend || "none"}`));
|
|
217
|
-
console.log(chalk_1.default.gray(` Backend: ${config.backend || "none"}`));
|
|
218
|
-
console.log(chalk_1.default.gray(` Database: ${config.database || "none"}`));
|
|
219
|
-
console.log(chalk_1.default.gray(` Auth: ${config.auth ? "yes" : "no"}`));
|
|
220
|
-
if (config.reasoning) {
|
|
221
|
-
console.log(chalk_1.default.gray(`\n ${config.reasoning}\n`));
|
|
222
|
-
}
|
|
223
|
-
const acceptRecommendation = await (0, workflow_prompts_1.confirmPrompt)("Accept these recommendations?", true);
|
|
224
|
-
if (!acceptRecommendation) {
|
|
225
|
-
console.log(chalk_1.default.yellow("ā Falling back to custom configuration"));
|
|
226
|
-
initMethod = "custom";
|
|
227
|
-
}
|
|
228
|
-
}
|
|
229
|
-
if (initMethod === "quick") {
|
|
230
|
-
config = {
|
|
231
|
-
projectName: projectName,
|
|
232
|
-
aiProvider: aiProvider,
|
|
233
|
-
aiModel: aiModel,
|
|
234
|
-
frontend: "next",
|
|
235
|
-
backend: "hono",
|
|
236
|
-
database: "sqlite",
|
|
237
|
-
auth: true,
|
|
238
|
-
reasoning: "Modern, well-supported stack",
|
|
239
|
-
};
|
|
223
|
+
projectDescription = await getOrPrompt(options.projectDescription, () => (0, workflow_prompts_1.textInputPrompt)("Describe your project (e.g., 'A SaaS app for team collaboration with real-time features'):"));
|
|
240
224
|
}
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
aiModel: aiModel,
|
|
246
|
-
};
|
|
247
|
-
const shouldBootstrap = await (0, workflow_prompts_1.confirmPrompt)("Bootstrap with Better-T-Stack?", true);
|
|
225
|
+
// Collect stack config for custom mode
|
|
226
|
+
let stackConfig = {};
|
|
227
|
+
if (initMethod === "custom") {
|
|
228
|
+
const shouldBootstrap = await getOrPrompt(options.bootstrap, () => (0, workflow_prompts_1.confirmPrompt)("Bootstrap with Better-T-Stack?", true));
|
|
248
229
|
if (shouldBootstrap) {
|
|
249
|
-
|
|
230
|
+
stackConfig.frontend = await getOrPrompt(options.frontend, () => (0, workflow_prompts_1.selectPrompt)("Frontend framework:", [
|
|
250
231
|
"next",
|
|
251
232
|
"tanstack-router",
|
|
252
233
|
"react-router",
|
|
253
234
|
"vite-react",
|
|
254
235
|
"remix",
|
|
255
|
-
]);
|
|
256
|
-
|
|
236
|
+
]));
|
|
237
|
+
stackConfig.backend = await getOrPrompt(options.backend, () => (0, workflow_prompts_1.selectPrompt)("Backend framework:", [
|
|
257
238
|
"hono",
|
|
258
239
|
"express",
|
|
259
240
|
"elysia",
|
|
260
241
|
"fastify",
|
|
261
|
-
]);
|
|
262
|
-
|
|
242
|
+
]));
|
|
243
|
+
stackConfig.database = await getOrPrompt(options.database, () => (0, workflow_prompts_1.selectPrompt)("Database:", [
|
|
263
244
|
"sqlite",
|
|
264
245
|
"postgres",
|
|
265
246
|
"mysql",
|
|
266
247
|
"mongodb",
|
|
267
248
|
"turso",
|
|
268
249
|
"neon",
|
|
269
|
-
]);
|
|
270
|
-
|
|
250
|
+
]));
|
|
251
|
+
stackConfig.auth = await getOrPrompt(options.auth, () => (0, workflow_prompts_1.confirmPrompt)("Include authentication?", true));
|
|
271
252
|
}
|
|
272
253
|
}
|
|
273
|
-
//
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
noInstall: false,
|
|
292
|
-
noGit: false,
|
|
293
|
-
}, process.cwd());
|
|
294
|
-
if (result.success) {
|
|
295
|
-
console.log(chalk_1.default.green(`\nā ${result.message}\n`));
|
|
296
|
-
// Fix up the configuration files
|
|
297
|
-
// Because we passed ".", the config file is named ".-bts-config.json" and contains projectName: "."
|
|
298
|
-
const dotConfigPath = (0, path_1.join)(newTaskOMaticDir, ".-bts-config.json");
|
|
299
|
-
const realConfigPath = (0, path_1.join)(newTaskOMaticDir, `${projectName}-bts-config.json`);
|
|
300
|
-
const stackConfigPath = (0, path_1.join)(newTaskOMaticDir, "stack.json");
|
|
301
|
-
if ((0, fs_1.existsSync)(dotConfigPath)) {
|
|
302
|
-
const configContent = JSON.parse((0, fs_1.readFileSync)(dotConfigPath, "utf-8"));
|
|
303
|
-
configContent.projectName = projectName; // Fix the project name
|
|
304
|
-
const newContent = JSON.stringify(configContent, null, 2);
|
|
305
|
-
(0, fs_1.writeFileSync)(realConfigPath, newContent);
|
|
306
|
-
(0, fs_1.writeFileSync)(stackConfigPath, newContent);
|
|
307
|
-
// Remove the temporary dot config
|
|
308
|
-
const { unlinkSync } = require("fs");
|
|
309
|
-
unlinkSync(dotConfigPath);
|
|
310
|
-
}
|
|
311
|
-
}
|
|
312
|
-
else {
|
|
313
|
-
console.log(chalk_1.default.red(`\nā Bootstrap failed: ${result.message}\n`));
|
|
314
|
-
console.log(chalk_1.default.yellow("You can try running 'task-o-matic init bootstrap' manually later.\n"));
|
|
315
|
-
}
|
|
316
|
-
}
|
|
317
|
-
catch (error) {
|
|
318
|
-
console.log(chalk_1.default.red(`\nā Bootstrap failed: ${error}\n`));
|
|
319
|
-
}
|
|
320
|
-
}
|
|
321
|
-
}
|
|
322
|
-
// Save configuration
|
|
323
|
-
config_1.configManager.save();
|
|
254
|
+
// Determine if we should bootstrap
|
|
255
|
+
const bootstrap = initMethod === "quick" ||
|
|
256
|
+
(initMethod === "ai" && options.bootstrap !== false) ||
|
|
257
|
+
(initMethod === "custom" && Object.keys(stackConfig).length > 0);
|
|
258
|
+
// Call service
|
|
259
|
+
const result = await workflow_1.workflowService.initializeProject({
|
|
260
|
+
projectName,
|
|
261
|
+
initMethod: initMethod,
|
|
262
|
+
projectDescription,
|
|
263
|
+
aiOptions: options,
|
|
264
|
+
stackConfig,
|
|
265
|
+
bootstrap,
|
|
266
|
+
streamingOptions,
|
|
267
|
+
callbacks: {
|
|
268
|
+
onProgress: progress_1.displayProgress,
|
|
269
|
+
onError: progress_1.displayError,
|
|
270
|
+
},
|
|
271
|
+
});
|
|
324
272
|
console.log(chalk_1.default.green("ā Project initialized"));
|
|
325
273
|
state.initialized = true;
|
|
326
|
-
state.projectName =
|
|
327
|
-
state.
|
|
328
|
-
|
|
329
|
-
model: aiModel,
|
|
330
|
-
key: apiKey,
|
|
331
|
-
};
|
|
274
|
+
state.projectName = result.projectName;
|
|
275
|
+
state.projectDir = result.projectDir;
|
|
276
|
+
state.aiConfig = result.aiConfig;
|
|
332
277
|
state.currentStep = "define-prd";
|
|
333
278
|
}
|
|
334
279
|
/**
|
|
335
280
|
* Step 2: Define PRD
|
|
281
|
+
* Uses workflowService.definePRD()
|
|
336
282
|
*/
|
|
337
|
-
async function stepDefinePRD(state,
|
|
283
|
+
async function stepDefinePRD(state, options, streamingOptions) {
|
|
338
284
|
console.log(chalk_1.default.blue.bold("\nš Step 2: Define PRD\n"));
|
|
339
|
-
|
|
285
|
+
if (options.skipPrd) {
|
|
286
|
+
console.log(chalk_1.default.yellow("ā Skipping PRD definition (--skip-prd)"));
|
|
287
|
+
state.currentStep = "refine-prd";
|
|
288
|
+
return;
|
|
289
|
+
}
|
|
290
|
+
const prdMethod = await getOrPrompt(options.prdMethod, () => (0, workflow_prompts_1.selectPrompt)("How would you like to define your PRD?", [
|
|
340
291
|
{ name: "Upload existing file", value: "upload" },
|
|
341
292
|
{ name: "Write manually (open editor)", value: "manual" },
|
|
342
293
|
{ name: "AI-assisted creation", value: "ai" },
|
|
343
294
|
{ name: "Skip (use existing PRD)", value: "skip" },
|
|
344
|
-
]);
|
|
345
|
-
const taskOMaticDir = config_1.configManager.getTaskOMaticDir();
|
|
346
|
-
const prdDir = (0, path_1.join)(taskOMaticDir, "prd");
|
|
295
|
+
]));
|
|
347
296
|
if (prdMethod === "skip") {
|
|
348
297
|
console.log(chalk_1.default.yellow("ā Skipping PRD definition"));
|
|
349
298
|
state.currentStep = "refine-prd";
|
|
350
299
|
return;
|
|
351
300
|
}
|
|
352
|
-
let
|
|
353
|
-
let
|
|
301
|
+
let prdFile;
|
|
302
|
+
let prdDescription;
|
|
303
|
+
let prdContent;
|
|
354
304
|
if (prdMethod === "upload") {
|
|
355
|
-
|
|
356
|
-
if (!(0, fs_1.existsSync)(filePath)) {
|
|
357
|
-
console.log(chalk_1.default.red(`ā File not found: ${filePath}`));
|
|
358
|
-
return stepDefinePRD(state, aiOptions, streamingOptions);
|
|
359
|
-
}
|
|
360
|
-
prdContent = (0, fs_1.readFileSync)(filePath, "utf-8");
|
|
361
|
-
prdFilename = filePath.split("/").pop() || "prd.md";
|
|
305
|
+
prdFile = await getOrPrompt(options.prdFile, () => (0, workflow_prompts_1.textInputPrompt)("Path to PRD file:"));
|
|
362
306
|
}
|
|
363
307
|
else if (prdMethod === "manual") {
|
|
364
308
|
console.log(chalk_1.default.gray("\n Opening editor...\n"));
|
|
365
309
|
prdContent = await (0, workflow_prompts_1.editorPrompt)("Write your PRD (save and close editor when done):", "# Product Requirements Document\n\n## Overview\n\n## Objectives\n\n## Features\n\n");
|
|
366
310
|
}
|
|
367
311
|
else if (prdMethod === "ai") {
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
312
|
+
prdDescription = await getOrPrompt(options.prdDescription, () => (0, workflow_prompts_1.textInputPrompt)("Describe your product in detail:"));
|
|
313
|
+
}
|
|
314
|
+
// Call service
|
|
315
|
+
const result = await workflow_1.workflowService.definePRD({
|
|
316
|
+
method: prdMethod,
|
|
317
|
+
prdFile,
|
|
318
|
+
prdDescription,
|
|
319
|
+
prdContent,
|
|
320
|
+
projectDir: state.projectDir,
|
|
321
|
+
aiOptions: options,
|
|
322
|
+
streamingOptions,
|
|
323
|
+
callbacks: {
|
|
324
|
+
onProgress: progress_1.displayProgress,
|
|
325
|
+
onError: progress_1.displayError,
|
|
326
|
+
},
|
|
327
|
+
});
|
|
328
|
+
if (prdMethod === "ai") {
|
|
376
329
|
console.log(chalk_1.default.green("\nā PRD generated"));
|
|
377
|
-
console.log(chalk_1.default.gray("\n" + prdContent.substring(0, 500) + "...\n"));
|
|
378
|
-
const acceptPRD = await (0, workflow_prompts_1.confirmPrompt)("Accept this PRD?", true);
|
|
330
|
+
console.log(chalk_1.default.gray("\n" + result.prdContent.substring(0, 500) + "...\n"));
|
|
331
|
+
const acceptPRD = await getOrPrompt(options.autoAccept ? true : undefined, () => (0, workflow_prompts_1.confirmPrompt)("Accept this PRD?", true));
|
|
379
332
|
if (!acceptPRD) {
|
|
380
333
|
console.log(chalk_1.default.yellow("ā Regenerating..."));
|
|
381
|
-
return stepDefinePRD(state,
|
|
334
|
+
return stepDefinePRD(state, options, streamingOptions);
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
state.prdFile = result.prdFile;
|
|
338
|
+
state.prdContent = result.prdContent;
|
|
339
|
+
state.currentStep = "question-refine-prd";
|
|
340
|
+
}
|
|
341
|
+
/**
|
|
342
|
+
* Step 2.5: PRD Question/Refine (NEW)
|
|
343
|
+
* Uses prdService.refinePRDWithQuestions()
|
|
344
|
+
*/
|
|
345
|
+
async function stepPRDQuestionRefine(state, options, streamingOptions) {
|
|
346
|
+
console.log(chalk_1.default.blue.bold("\nā Step 2.5: PRD Question & Refine\n"));
|
|
347
|
+
if (!state.prdFile) {
|
|
348
|
+
console.log(chalk_1.default.yellow("ā No PRD file found, skipping"));
|
|
349
|
+
state.currentStep = "refine-prd";
|
|
350
|
+
return;
|
|
351
|
+
}
|
|
352
|
+
if (options.skipPrdQuestionRefine || options.skipAll) {
|
|
353
|
+
console.log(chalk_1.default.gray(" Skipping question-based refinement"));
|
|
354
|
+
state.currentStep = "refine-prd";
|
|
355
|
+
return;
|
|
356
|
+
}
|
|
357
|
+
// Ask if user wants question-based refinement
|
|
358
|
+
const useQuestions = await getOrPrompt(options.prdQuestionRefine, () => (0, workflow_prompts_1.confirmPrompt)("Refine PRD with clarifying questions?", false));
|
|
359
|
+
if (!useQuestions) {
|
|
360
|
+
console.log(chalk_1.default.gray(" Skipping question-based refinement"));
|
|
361
|
+
state.currentStep = "refine-prd";
|
|
362
|
+
return;
|
|
363
|
+
}
|
|
364
|
+
// Ask who should answer: user or AI
|
|
365
|
+
const answerMode = await getOrPrompt(options.prdAnswerMode, () => (0, workflow_prompts_1.selectPrompt)("Who should answer the questions?", [
|
|
366
|
+
{ name: "I will answer", value: "user" },
|
|
367
|
+
{ name: "AI assistant (uses PRD + stack context)", value: "ai" },
|
|
368
|
+
]));
|
|
369
|
+
let questionAIOptions = undefined;
|
|
370
|
+
if (answerMode === "ai") {
|
|
371
|
+
// Ask if they want to use a different AI model for answering
|
|
372
|
+
const useCustomAI = await getOrPrompt(options.prdAnswerAiProvider !== undefined, () => (0, workflow_prompts_1.confirmPrompt)("Use a different AI model for answering? (e.g., a smarter model)", false));
|
|
373
|
+
if (useCustomAI) {
|
|
374
|
+
const provider = await getOrPrompt(options.prdAnswerAiProvider, () => (0, workflow_prompts_1.selectPrompt)("AI Provider for answering:", [
|
|
375
|
+
{ name: "OpenRouter", value: "openrouter" },
|
|
376
|
+
{ name: "Anthropic", value: "anthropic" },
|
|
377
|
+
{ name: "OpenAI", value: "openai" },
|
|
378
|
+
]));
|
|
379
|
+
const model = await getOrPrompt(options.prdAnswerAiModel, () => (0, workflow_prompts_1.textInputPrompt)("AI Model for answering:", provider === "openrouter" ? "anthropic/claude-3.5-sonnet" : ""));
|
|
380
|
+
questionAIOptions = {
|
|
381
|
+
aiProvider: provider,
|
|
382
|
+
aiModel: model,
|
|
383
|
+
};
|
|
384
|
+
}
|
|
385
|
+
// Check if reasoning should be enabled
|
|
386
|
+
if (options.prdAnswerAiReasoning) {
|
|
387
|
+
if (!questionAIOptions) {
|
|
388
|
+
// No custom AI specified, use main AI with reasoning
|
|
389
|
+
questionAIOptions = {
|
|
390
|
+
aiProvider: options.aiProvider,
|
|
391
|
+
aiModel: options.aiModel,
|
|
392
|
+
aiReasoning: "enabled",
|
|
393
|
+
};
|
|
394
|
+
}
|
|
395
|
+
else {
|
|
396
|
+
// Custom AI specified, add reasoning to it
|
|
397
|
+
questionAIOptions.aiReasoning = "enabled";
|
|
398
|
+
}
|
|
382
399
|
}
|
|
383
400
|
}
|
|
384
|
-
//
|
|
385
|
-
|
|
386
|
-
(
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
401
|
+
// For user mode, we need to collect answers interactively
|
|
402
|
+
let answers;
|
|
403
|
+
if (answerMode === "user") {
|
|
404
|
+
// First, generate questions
|
|
405
|
+
console.log(chalk_1.default.cyan("\n Generating questions...\n"));
|
|
406
|
+
const questions = await prd_1.prdService.generateQuestions({
|
|
407
|
+
file: state.prdFile,
|
|
408
|
+
workingDirectory: state.projectDir,
|
|
409
|
+
aiOptions: options,
|
|
410
|
+
streamingOptions,
|
|
411
|
+
callbacks: {
|
|
412
|
+
onProgress: progress_1.displayProgress,
|
|
413
|
+
onError: progress_1.displayError,
|
|
414
|
+
},
|
|
415
|
+
});
|
|
416
|
+
if (questions.length === 0) {
|
|
417
|
+
console.log(chalk_1.default.yellow("No questions generated - PRD appears complete"));
|
|
418
|
+
state.currentStep = "refine-prd";
|
|
419
|
+
return;
|
|
420
|
+
}
|
|
421
|
+
console.log(chalk_1.default.blue(`\nPlease answer the following ${questions.length} questions to refine the PRD:\n`));
|
|
422
|
+
answers = {};
|
|
423
|
+
for (let i = 0; i < questions.length; i++) {
|
|
424
|
+
const q = questions[i];
|
|
425
|
+
const answer = await inquirer_1.default.prompt([
|
|
426
|
+
{
|
|
427
|
+
type: "input",
|
|
428
|
+
name: "response",
|
|
429
|
+
message: `${i + 1}/${questions.length}: ${q}`,
|
|
430
|
+
validate: (input) => input.trim().length > 0 || "Please provide an answer",
|
|
431
|
+
},
|
|
432
|
+
]);
|
|
433
|
+
answers[q] = answer.response;
|
|
434
|
+
}
|
|
435
|
+
}
|
|
436
|
+
// Call service - it will automatically refine after answering
|
|
437
|
+
console.log(chalk_1.default.cyan("\n Generating questions and refining PRD...\n"));
|
|
438
|
+
const result = await prd_1.prdService.refinePRDWithQuestions({
|
|
439
|
+
file: state.prdFile,
|
|
440
|
+
questionMode: answerMode,
|
|
441
|
+
answers, // Only provided for user mode
|
|
442
|
+
questionAIOptions,
|
|
443
|
+
workingDirectory: state.projectDir,
|
|
444
|
+
aiOptions: options,
|
|
445
|
+
streamingOptions,
|
|
446
|
+
callbacks: {
|
|
447
|
+
onProgress: progress_1.displayProgress,
|
|
448
|
+
onError: progress_1.displayError,
|
|
449
|
+
},
|
|
450
|
+
});
|
|
451
|
+
console.log(chalk_1.default.green(`\nā PRD refined with ${result.questions.length} questions answered`));
|
|
452
|
+
console.log(chalk_1.default.gray("\n Questions & Answers:"));
|
|
453
|
+
result.questions.forEach((q, i) => {
|
|
454
|
+
console.log(chalk_1.default.cyan(` Q${i + 1}: ${q}`));
|
|
455
|
+
console.log(chalk_1.default.gray(` A${i + 1}: ${result.answers[q]?.substring(0, 100)}...\n`));
|
|
456
|
+
});
|
|
457
|
+
// Update state with refined PRD
|
|
458
|
+
state.prdFile = result.refinedPRDPath;
|
|
459
|
+
state.prdContent = (0, fs_1.readFileSync)(result.refinedPRDPath, "utf-8");
|
|
390
460
|
state.currentStep = "refine-prd";
|
|
391
461
|
}
|
|
392
462
|
/**
|
|
393
463
|
* Step 3: Refine PRD
|
|
464
|
+
* Uses workflowService.refinePRD()
|
|
394
465
|
*/
|
|
395
|
-
async function stepRefinePRD(state,
|
|
466
|
+
async function stepRefinePRD(state, options, streamingOptions) {
|
|
396
467
|
console.log(chalk_1.default.blue.bold("\n⨠Step 3: Refine PRD\n"));
|
|
397
468
|
if (!state.prdFile) {
|
|
398
469
|
console.log(chalk_1.default.yellow("ā No PRD file found, skipping refinement"));
|
|
399
470
|
state.currentStep = "generate-tasks";
|
|
400
471
|
return;
|
|
401
472
|
}
|
|
402
|
-
|
|
473
|
+
if (options.skipRefine || options.skipAll) {
|
|
474
|
+
console.log(chalk_1.default.gray(" Skipping refinement"));
|
|
475
|
+
state.currentStep = "generate-tasks";
|
|
476
|
+
return;
|
|
477
|
+
}
|
|
478
|
+
const shouldRefine = await getOrPrompt(options.skipRefine === false ? true : undefined, () => (0, workflow_prompts_1.confirmPrompt)("Refine your PRD further?", false));
|
|
403
479
|
if (!shouldRefine) {
|
|
404
480
|
console.log(chalk_1.default.gray(" Skipping refinement"));
|
|
405
481
|
state.currentStep = "generate-tasks";
|
|
406
482
|
return;
|
|
407
483
|
}
|
|
408
|
-
const refineMethod = await (0, workflow_prompts_1.selectPrompt)("How would you like to refine?", [
|
|
484
|
+
const refineMethod = await getOrPrompt(options.refineMethod, () => (0, workflow_prompts_1.selectPrompt)("How would you like to refine?", [
|
|
409
485
|
{ name: "Manual editing (open editor)", value: "manual" },
|
|
410
486
|
{ name: "AI-assisted refinement", value: "ai" },
|
|
411
487
|
{ name: "Skip", value: "skip" },
|
|
412
|
-
]);
|
|
488
|
+
]));
|
|
413
489
|
if (refineMethod === "skip") {
|
|
414
490
|
state.currentStep = "generate-tasks";
|
|
415
491
|
return;
|
|
416
492
|
}
|
|
417
493
|
let refinedContent = state.prdContent || (0, fs_1.readFileSync)(state.prdFile, "utf-8");
|
|
494
|
+
let feedback;
|
|
418
495
|
if (refineMethod === "manual") {
|
|
419
496
|
console.log(chalk_1.default.gray("\n Opening editor...\n"));
|
|
420
497
|
refinedContent = await (0, workflow_prompts_1.editorPrompt)("Edit your PRD (save and close when done):", refinedContent);
|
|
421
498
|
}
|
|
422
499
|
else if (refineMethod === "ai") {
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
500
|
+
feedback = await getOrPrompt(options.refineFeedback, () => (0, workflow_prompts_1.textInputPrompt)("What would you like to improve? (e.g., 'Add more technical details', 'Focus on MVP features'):"));
|
|
501
|
+
}
|
|
502
|
+
// Call service
|
|
503
|
+
const result = await workflow_1.workflowService.refinePRD({
|
|
504
|
+
method: refineMethod,
|
|
505
|
+
prdFile: state.prdFile,
|
|
506
|
+
prdContent: refineMethod === "manual" ? refinedContent : undefined,
|
|
507
|
+
feedback,
|
|
508
|
+
projectDir: state.projectDir,
|
|
509
|
+
aiOptions: options,
|
|
510
|
+
streamingOptions,
|
|
511
|
+
callbacks: {
|
|
512
|
+
onProgress: progress_1.displayProgress,
|
|
513
|
+
onError: progress_1.displayError,
|
|
514
|
+
},
|
|
515
|
+
});
|
|
516
|
+
if (refineMethod === "ai") {
|
|
432
517
|
console.log(chalk_1.default.green("\nā PRD refined"));
|
|
433
|
-
console.log(chalk_1.default.gray("\n" +
|
|
434
|
-
const acceptRefinement = await (0, workflow_prompts_1.confirmPrompt)("Accept refinements?", true);
|
|
518
|
+
console.log(chalk_1.default.gray("\n" + result.prdContent.substring(0, 500) + "...\n"));
|
|
519
|
+
const acceptRefinement = await getOrPrompt(options.autoAccept ? true : undefined, () => (0, workflow_prompts_1.confirmPrompt)("Accept refinements?", true));
|
|
435
520
|
if (!acceptRefinement) {
|
|
436
521
|
console.log(chalk_1.default.yellow("ā Keeping original PRD"));
|
|
437
522
|
state.currentStep = "generate-tasks";
|
|
438
523
|
return;
|
|
439
524
|
}
|
|
440
525
|
}
|
|
441
|
-
|
|
442
|
-
(0, fs_1.writeFileSync)(state.prdFile, refinedContent);
|
|
443
|
-
state.prdContent = refinedContent;
|
|
526
|
+
state.prdContent = result.prdContent;
|
|
444
527
|
console.log(chalk_1.default.green(`ā PRD updated`));
|
|
445
528
|
state.currentStep = "generate-tasks";
|
|
446
529
|
}
|
|
447
530
|
/**
|
|
448
531
|
* Step 4: Generate Tasks
|
|
532
|
+
* Uses workflowService.generateTasks()
|
|
449
533
|
*/
|
|
450
|
-
async function stepGenerateTasks(state,
|
|
534
|
+
async function stepGenerateTasks(state, options, streamingOptions) {
|
|
451
535
|
console.log(chalk_1.default.blue.bold("\nšÆ Step 4: Generate Tasks\n"));
|
|
452
536
|
if (!state.prdFile) {
|
|
453
537
|
console.log(chalk_1.default.yellow("ā No PRD file found, skipping task generation"));
|
|
454
538
|
state.currentStep = "split-tasks";
|
|
455
539
|
return;
|
|
456
540
|
}
|
|
457
|
-
|
|
541
|
+
if (options.skipGenerate || options.skipAll) {
|
|
542
|
+
console.log(chalk_1.default.gray(" Skipping task generation"));
|
|
543
|
+
state.currentStep = "split-tasks";
|
|
544
|
+
return;
|
|
545
|
+
}
|
|
546
|
+
const shouldGenerate = await getOrPrompt(options.skipGenerate === false ? true : undefined, () => (0, workflow_prompts_1.confirmPrompt)("Generate tasks from PRD?", true));
|
|
458
547
|
if (!shouldGenerate) {
|
|
459
548
|
console.log(chalk_1.default.gray(" Skipping task generation"));
|
|
460
549
|
state.currentStep = "split-tasks";
|
|
461
550
|
return;
|
|
462
551
|
}
|
|
463
|
-
const generationMethod = await (0, workflow_prompts_1.selectPrompt)("Choose generation method:", [
|
|
552
|
+
const generationMethod = await getOrPrompt(options.generateMethod, () => (0, workflow_prompts_1.selectPrompt)("Choose generation method:", [
|
|
464
553
|
{ name: "Standard parsing", value: "standard" },
|
|
465
554
|
{ name: "AI-assisted with custom instructions", value: "ai" },
|
|
466
|
-
]);
|
|
555
|
+
]));
|
|
467
556
|
let customInstructions;
|
|
468
557
|
if (generationMethod === "ai") {
|
|
469
|
-
customInstructions = await (0, workflow_prompts_1.textInputPrompt)("Custom instructions (e.g., 'Focus on MVP features', 'Break into small tasks'):", "");
|
|
558
|
+
customInstructions = await getOrPrompt(options.generateInstructions, () => (0, workflow_prompts_1.textInputPrompt)("Custom instructions (e.g., 'Focus on MVP features', 'Break into small tasks'):", ""));
|
|
470
559
|
}
|
|
471
560
|
console.log(chalk_1.default.cyan("\n Parsing PRD and generating tasks...\n"));
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
561
|
+
// Call service
|
|
562
|
+
const result = await workflow_1.workflowService.generateTasks({
|
|
563
|
+
prdFile: state.prdFile,
|
|
564
|
+
method: generationMethod,
|
|
565
|
+
customInstructions,
|
|
566
|
+
projectDir: state.projectDir,
|
|
567
|
+
aiOptions: options,
|
|
477
568
|
streamingOptions,
|
|
478
569
|
callbacks: {
|
|
479
570
|
onProgress: progress_1.displayProgress,
|
|
@@ -495,23 +586,39 @@ async function stepGenerateTasks(state, aiOptions, streamingOptions) {
|
|
|
495
586
|
}
|
|
496
587
|
/**
|
|
497
588
|
* Step 5: Split Complex Tasks
|
|
589
|
+
* Uses workflowService.splitTasks()
|
|
498
590
|
*/
|
|
499
|
-
async function stepSplitTasks(state,
|
|
591
|
+
async function stepSplitTasks(state, options, streamingOptions) {
|
|
500
592
|
console.log(chalk_1.default.blue.bold("\nš Step 5: Split Complex Tasks\n"));
|
|
501
593
|
if (!state.tasks || state.tasks.length === 0) {
|
|
502
594
|
console.log(chalk_1.default.yellow("ā No tasks found, skipping splitting"));
|
|
503
595
|
return;
|
|
504
596
|
}
|
|
505
|
-
|
|
506
|
-
if (!shouldSplit) {
|
|
597
|
+
if (options.skipSplit || options.skipAll) {
|
|
507
598
|
console.log(chalk_1.default.gray(" Skipping task splitting"));
|
|
508
599
|
return;
|
|
509
600
|
}
|
|
510
|
-
//
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
601
|
+
// Handle --split-tasks and --split-all options
|
|
602
|
+
let tasksToSplit;
|
|
603
|
+
if (options.splitAll) {
|
|
604
|
+
tasksToSplit = state.tasks?.map((t) => t.id) || [];
|
|
605
|
+
console.log(chalk_1.default.cyan(` Splitting all ${tasksToSplit.length} tasks`));
|
|
606
|
+
}
|
|
607
|
+
else if (options.splitTasks) {
|
|
608
|
+
tasksToSplit = options.splitTasks.split(",").map((id) => id.trim());
|
|
609
|
+
console.log(chalk_1.default.cyan(` Splitting ${tasksToSplit.length} specified tasks`));
|
|
610
|
+
}
|
|
611
|
+
else {
|
|
612
|
+
const shouldSplit = await (0, workflow_prompts_1.confirmPrompt)("Split any complex tasks into subtasks?", false);
|
|
613
|
+
if (!shouldSplit) {
|
|
614
|
+
console.log(chalk_1.default.gray(" Skipping task splitting"));
|
|
615
|
+
return;
|
|
616
|
+
}
|
|
617
|
+
tasksToSplit = await (0, workflow_prompts_1.multiSelectPrompt)("Select tasks to split:", state.tasks.map((t) => ({
|
|
618
|
+
name: `${t.title}${t.description ? ` - ${t.description.substring(0, 50)}...` : ""}`,
|
|
619
|
+
value: t.id,
|
|
620
|
+
})));
|
|
621
|
+
}
|
|
515
622
|
if (tasksToSplit.length === 0) {
|
|
516
623
|
console.log(chalk_1.default.gray(" No tasks selected"));
|
|
517
624
|
return;
|
|
@@ -519,42 +626,56 @@ async function stepSplitTasks(state, aiOptions, streamingOptions) {
|
|
|
519
626
|
let globalSplitMethod = "interactive";
|
|
520
627
|
let globalCustomInstructions;
|
|
521
628
|
if (tasksToSplit.length > 1) {
|
|
522
|
-
globalSplitMethod = await (0, workflow_prompts_1.selectPrompt)("How would you like to split these tasks?", [
|
|
629
|
+
globalSplitMethod = await getOrPrompt(options.splitMethod, () => (0, workflow_prompts_1.selectPrompt)("How would you like to split these tasks?", [
|
|
523
630
|
{ name: "Interactive (ask for each task)", value: "interactive" },
|
|
524
631
|
{ name: "Standard AI split for ALL", value: "standard" },
|
|
525
632
|
{ name: "Same custom instructions for ALL", value: "custom" },
|
|
526
|
-
]);
|
|
633
|
+
]));
|
|
527
634
|
if (globalSplitMethod === "custom") {
|
|
528
|
-
globalCustomInstructions = await (0, workflow_prompts_1.textInputPrompt)("Custom instructions for ALL tasks (e.g., 'Break into 2-4 hour chunks'):", "");
|
|
635
|
+
globalCustomInstructions = await getOrPrompt(options.splitInstructions, () => (0, workflow_prompts_1.textInputPrompt)("Custom instructions for ALL tasks (e.g., 'Break into 2-4 hour chunks'):", ""));
|
|
529
636
|
}
|
|
530
637
|
}
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
splitMethod = await (0, workflow_prompts_1.selectPrompt)("Split method:", [
|
|
638
|
+
// Collect per-task instructions for interactive mode
|
|
639
|
+
const taskInstructions = {};
|
|
640
|
+
if (globalSplitMethod === "interactive") {
|
|
641
|
+
for (const taskId of tasksToSplit) {
|
|
642
|
+
const task = state.tasks.find((t) => t.id === taskId);
|
|
643
|
+
if (!task)
|
|
644
|
+
continue;
|
|
645
|
+
console.log(chalk_1.default.cyan(`\n Task: ${task.title}\n`));
|
|
646
|
+
const splitMethod = await (0, workflow_prompts_1.selectPrompt)("Split method:", [
|
|
540
647
|
{ name: "Standard AI split", value: "standard" },
|
|
541
648
|
{ name: "Custom instructions", value: "custom" },
|
|
542
649
|
]);
|
|
543
650
|
if (splitMethod === "custom") {
|
|
544
|
-
|
|
651
|
+
taskInstructions[taskId] = await (0, workflow_prompts_1.textInputPrompt)("Custom instructions (e.g., 'Break into 2-4 hour chunks'):", "");
|
|
545
652
|
}
|
|
546
653
|
}
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
654
|
+
}
|
|
655
|
+
// Call service
|
|
656
|
+
const result = await workflow_1.workflowService.splitTasks({
|
|
657
|
+
taskIds: tasksToSplit,
|
|
658
|
+
splitMethod: globalSplitMethod,
|
|
659
|
+
customInstructions: globalCustomInstructions,
|
|
660
|
+
aiOptions: options,
|
|
661
|
+
streamingOptions,
|
|
662
|
+
callbacks: {
|
|
663
|
+
onProgress: progress_1.displayProgress,
|
|
664
|
+
onError: progress_1.displayError,
|
|
665
|
+
},
|
|
666
|
+
});
|
|
667
|
+
// Display results
|
|
668
|
+
result.results.forEach((taskResult) => {
|
|
669
|
+
const task = state.tasks?.find((t) => t.id === taskResult.taskId);
|
|
670
|
+
if (taskResult.error) {
|
|
671
|
+
console.log(chalk_1.default.red(` ā Failed to split ${task?.title}: ${taskResult.error}`));
|
|
672
|
+
}
|
|
673
|
+
else {
|
|
674
|
+
console.log(chalk_1.default.green(` ā Split ${task?.title} into ${taskResult.subtasks.length} subtasks`));
|
|
675
|
+
taskResult.subtasks.forEach((subtask, index) => {
|
|
552
676
|
console.log(chalk_1.default.gray(` ${index + 1}. ${subtask.title}`));
|
|
553
677
|
});
|
|
554
678
|
}
|
|
555
|
-
|
|
556
|
-
console.log(chalk_1.default.red(` ā Failed to split task: ${error}`));
|
|
557
|
-
}
|
|
558
|
-
}
|
|
679
|
+
});
|
|
559
680
|
console.log(chalk_1.default.green("\nā Task splitting complete"));
|
|
560
681
|
}
|