task-o-matic 0.0.21 ā 0.0.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/install.d.ts +3 -0
- package/dist/commands/install.d.ts.map +1 -0
- package/dist/commands/install.js +150 -0
- package/dist/commands/prd.d.ts +5 -0
- package/dist/commands/prd.d.ts.map +1 -1
- package/dist/commands/prd.js +297 -189
- package/dist/commands/tasks/split.d.ts.map +1 -1
- package/dist/commands/tasks/split.js +129 -27
- package/dist/commands/utils/ai-parallel.d.ts +20 -0
- package/dist/commands/utils/ai-parallel.d.ts.map +1 -0
- package/dist/commands/utils/ai-parallel.js +115 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +3 -1
- package/dist/lib/ai-service/gemini-proxy.d.ts +15 -0
- package/dist/lib/ai-service/gemini-proxy.d.ts.map +1 -0
- package/dist/lib/ai-service/gemini-proxy.js +90 -0
- package/dist/lib/ai-service/json-parser.d.ts +16 -4
- package/dist/lib/ai-service/json-parser.d.ts.map +1 -1
- package/dist/lib/ai-service/json-parser.js +93 -19
- package/dist/lib/ai-service/model-provider.d.ts.map +1 -1
- package/dist/lib/ai-service/model-provider.js +31 -2
- package/dist/lib/ai-service/prd-operations.d.ts.map +1 -1
- package/dist/lib/ai-service/prd-operations.js +21 -5
- package/dist/lib/ai-service/task-operations.d.ts.map +1 -1
- package/dist/lib/ai-service/task-operations.js +10 -2
- package/dist/lib/better-t-stack-cli.d.ts.map +1 -1
- package/dist/lib/better-t-stack-cli.js +6 -5
- package/dist/lib/config-validation.d.ts +9 -9
- package/dist/lib/config-validation.d.ts.map +1 -1
- package/dist/lib/config-validation.js +11 -3
- package/dist/lib/config.d.ts.map +1 -1
- package/dist/lib/config.js +11 -2
- package/dist/lib/provider-defaults.json +11 -1
- package/dist/services/tasks.d.ts.map +1 -1
- package/dist/services/tasks.js +25 -15
- package/dist/test/commands.test.js +10 -10
- package/dist/test/model-parsing.test.d.ts +2 -0
- package/dist/test/model-parsing.test.d.ts.map +1 -0
- package/dist/test/model-parsing.test.js +73 -0
- package/dist/types/cli-options.d.ts +2 -0
- package/dist/types/cli-options.d.ts.map +1 -1
- package/dist/types/index.d.ts +12 -1
- package/dist/types/index.d.ts.map +1 -1
- package/dist/types/index.js +10 -0
- package/dist/utils/ai-operation-utility.d.ts.map +1 -1
- package/dist/utils/ai-operation-utility.js +26 -2
- package/dist/utils/metadata-utils.d.ts +1 -1
- package/dist/utils/streaming-utils.d.ts.map +1 -1
- package/dist/utils/streaming-utils.js +4 -0
- package/docs/agents/cli.md +19 -12
- package/package.json +1 -1
|
@@ -12,8 +12,12 @@ const task_1 = require("../../cli/display/task");
|
|
|
12
12
|
const progress_tracking_1 = require("../../utils/progress-tracking");
|
|
13
13
|
const cli_validators_1 = require("../../utils/cli-validators");
|
|
14
14
|
const bulk_operations_1 = require("../../utils/bulk-operations");
|
|
15
|
-
const confirmation_1 = require("../../utils/confirmation");
|
|
16
15
|
const command_error_handler_1 = require("../../utils/command-error-handler");
|
|
16
|
+
const ai_parallel_1 = require("../utils/ai-parallel");
|
|
17
|
+
const prd_1 = require("../prd");
|
|
18
|
+
const fs_1 = require("fs");
|
|
19
|
+
const path_1 = require("path");
|
|
20
|
+
const config_1 = require("../../lib/config");
|
|
17
21
|
exports.splitCommand = new commander_1.Command("split")
|
|
18
22
|
.description("Split a task into smaller subtasks using AI")
|
|
19
23
|
.option("--task-id <id>", "Task ID to split")
|
|
@@ -24,9 +28,10 @@ exports.splitCommand = new commander_1.Command("split")
|
|
|
24
28
|
.option("--force", "Skip confirmation prompt for bulk operations")
|
|
25
29
|
.option("--stream", "Show streaming AI output during breakdown")
|
|
26
30
|
.option("--ai-provider <provider>", "AI provider override")
|
|
27
|
-
.option("--ai-model <model>", "AI model override")
|
|
28
31
|
.option("--ai-key <key>", "AI API key override")
|
|
29
32
|
.option("--ai-provider-url <url>", "AI provider URL override")
|
|
33
|
+
.option("--ai <models...>", "AI model(s) to use. Format: [provider:]model[;reasoning[=budget]]")
|
|
34
|
+
.option("--combine-ai <provider:model>", "AI model to combine multiple split results")
|
|
30
35
|
.option("--reasoning <tokens>", "Enable reasoning for OpenRouter models (max reasoning tokens)")
|
|
31
36
|
.option("--tools", "Enable filesystem tools for project analysis")
|
|
32
37
|
.action((0, command_error_handler_1.wrapCommandHandler)("Task splitting", async (options) => {
|
|
@@ -42,27 +47,120 @@ exports.splitCommand = new commander_1.Command("split")
|
|
|
42
47
|
}
|
|
43
48
|
const streamingOptions = (0, streaming_options_1.createStreamingOptions)(options.stream, "Task breakdown");
|
|
44
49
|
try {
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
50
|
+
// Check for parallel/multi-model usage
|
|
51
|
+
const cliModels = Array.isArray(options.ai)
|
|
52
|
+
? options.ai
|
|
53
|
+
: options.ai
|
|
54
|
+
? [options.ai]
|
|
55
|
+
: [];
|
|
56
|
+
if (cliModels.length > 0 || options.combineAi) {
|
|
57
|
+
const modelsToUse = cliModels.length > 0
|
|
58
|
+
? cliModels
|
|
59
|
+
: [
|
|
60
|
+
`${options.aiProvider || "openai"}:${options.aiModel || "gpt-4o"}`,
|
|
61
|
+
];
|
|
62
|
+
const task = await tasks_1.taskService.getTask(taskId);
|
|
63
|
+
if (!task)
|
|
64
|
+
throw new Error(`Task ${taskId} not found`);
|
|
65
|
+
const taskOMaticDir = config_1.configManager.getTaskOMaticDir();
|
|
66
|
+
const tasksJsonPath = (0, path_1.join)(taskOMaticDir, "tasks.json");
|
|
67
|
+
const aiMetadataPath = (0, path_1.join)(taskOMaticDir, "ai-metadata.json");
|
|
68
|
+
const tasksBackupPath = (0, path_1.join)(taskOMaticDir, "tasks.json.bak");
|
|
69
|
+
const aiMetadataBackupPath = (0, path_1.join)(taskOMaticDir, "ai-metadata.json.bak");
|
|
70
|
+
// 1. Backup Phase
|
|
71
|
+
if ((0, fs_1.existsSync)(tasksJsonPath)) {
|
|
72
|
+
(0, fs_1.copyFileSync)(tasksJsonPath, tasksBackupPath);
|
|
73
|
+
}
|
|
74
|
+
if ((0, fs_1.existsSync)(aiMetadataPath)) {
|
|
75
|
+
(0, fs_1.copyFileSync)(aiMetadataPath, aiMetadataBackupPath);
|
|
76
|
+
}
|
|
77
|
+
let results = [];
|
|
78
|
+
try {
|
|
79
|
+
// 2. Parallel Generation (Service-Based)
|
|
80
|
+
// We let the service write to the file. It will be corrupted/racy, but we don't care.
|
|
81
|
+
results = await (0, ai_parallel_1.runAIParallel)(modelsToUse, async (modelConfig, streamingOptions) => {
|
|
82
|
+
const result = await tasks_1.taskService.splitTask(taskId, {
|
|
83
|
+
aiProvider: modelConfig.provider,
|
|
84
|
+
aiModel: modelConfig.model,
|
|
85
|
+
aiKey: options.aiKey,
|
|
86
|
+
aiProviderUrl: options.aiProviderUrl,
|
|
87
|
+
aiReasoning: options.reasoning || modelConfig.reasoning,
|
|
88
|
+
}, undefined, undefined, streamingOptions, options.tools);
|
|
89
|
+
return {
|
|
90
|
+
data: result.subtasks, // We only care about the returned data
|
|
91
|
+
stats: result.stats,
|
|
92
|
+
};
|
|
93
|
+
}, {
|
|
94
|
+
description: "Splitting Task",
|
|
95
|
+
showSummary: true,
|
|
96
|
+
});
|
|
97
|
+
}
|
|
98
|
+
finally {
|
|
99
|
+
// 3. Restore Phase
|
|
100
|
+
// Restore the original clean state
|
|
101
|
+
if ((0, fs_1.existsSync)(tasksBackupPath)) {
|
|
102
|
+
(0, fs_1.renameSync)(tasksBackupPath, tasksJsonPath);
|
|
103
|
+
}
|
|
104
|
+
if ((0, fs_1.existsSync)(aiMetadataBackupPath)) {
|
|
105
|
+
(0, fs_1.renameSync)(aiMetadataBackupPath, aiMetadataPath);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
if (options.combineAi && results.length > 0) {
|
|
109
|
+
const taskLists = results.map((r) => r.data);
|
|
110
|
+
const combineModelConfig = (0, prd_1.parseModelString)(options.combineAi);
|
|
111
|
+
console.log(chalk_1.default.blue(`\nCombining ${taskLists.length} subtask lists with ${combineModelConfig.model}...`));
|
|
112
|
+
// Construct the message with drafts
|
|
113
|
+
let draftsMessage = "Here are draft subtask lists generated by multiple models. Please combine them into the best possible single list ensuring strict schema compliance:\n\n";
|
|
114
|
+
results.forEach((r, idx) => {
|
|
115
|
+
draftsMessage += `--- Model ${idx + 1} Draft ---\n${JSON.stringify(r.data, null, 2)}\n\n`;
|
|
116
|
+
});
|
|
117
|
+
// 4. Service-Based Combination
|
|
118
|
+
const result = await tasks_1.taskService.splitTask(taskId, {
|
|
119
|
+
aiProvider: combineModelConfig.provider,
|
|
120
|
+
aiModel: combineModelConfig.model,
|
|
121
|
+
aiReasoning: options.reasoning || combineModelConfig.reasoning,
|
|
122
|
+
}, undefined, draftsMessage, // Inject drafts
|
|
123
|
+
(0, streaming_options_1.createStreamingOptions)(options.stream, "Combining"), options.tools);
|
|
124
|
+
(0, task_1.displaySubtaskCreation)(result.subtasks);
|
|
125
|
+
}
|
|
126
|
+
else if (results.length > 0) {
|
|
127
|
+
// Single select fallback (using service to save cleanly)
|
|
128
|
+
const bestResult = results[0].data;
|
|
129
|
+
console.log(chalk_1.default.yellow("\nā ļø Multiple models used without --combine-ai. Saving result from first model."));
|
|
130
|
+
const draftsMessage = `Here is the pre-generated subtask list. Please validate and save it exactly as is:\n\n${JSON.stringify(bestResult, null, 2)}`;
|
|
131
|
+
const result = await tasks_1.taskService.splitTask(taskId, {
|
|
132
|
+
aiProvider: options.aiProvider,
|
|
133
|
+
aiModel: options.aiModel,
|
|
134
|
+
aiReasoning: options.reasoning,
|
|
135
|
+
}, undefined, draftsMessage, (0, streaming_options_1.createStreamingOptions)(options.stream, "Saving"), options.tools);
|
|
136
|
+
(0, task_1.displaySubtaskCreation)(result.subtasks);
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
else {
|
|
140
|
+
// Standard single model execution
|
|
141
|
+
const result = await (0, progress_tracking_1.withProgressTracking)(async () => {
|
|
142
|
+
return await tasks_1.taskService.splitTask(taskId, {
|
|
143
|
+
aiProvider: options.aiProvider,
|
|
144
|
+
aiModel: options.aiModel,
|
|
145
|
+
aiKey: options.aiKey,
|
|
146
|
+
aiProviderUrl: options.aiProviderUrl,
|
|
147
|
+
aiReasoning: options.reasoning,
|
|
148
|
+
}, undefined, undefined, streamingOptions, options.tools);
|
|
149
|
+
});
|
|
150
|
+
(0, task_1.displaySubtaskCreation)(result.subtasks);
|
|
151
|
+
// Display AI metadata
|
|
152
|
+
console.log(chalk_1.default.gray(`\nš AI Splitting Details:`));
|
|
153
|
+
console.log(chalk_1.default.gray(` Provider: ${result.metadata.aiProvider}`));
|
|
154
|
+
console.log(chalk_1.default.gray(` Model: ${result.metadata.aiModel}`));
|
|
155
|
+
console.log(chalk_1.default.gray(` Subtasks created: ${result.subtasks.length}`));
|
|
156
|
+
console.log(chalk_1.default.gray(` Confidence: ${result.metadata.confidence
|
|
157
|
+
? (result.metadata.confidence * 100).toFixed(1)
|
|
158
|
+
: "N/A"}%`));
|
|
159
|
+
}
|
|
63
160
|
}
|
|
64
161
|
catch (error) {
|
|
65
|
-
if (error instanceof Error &&
|
|
162
|
+
if (error instanceof Error &&
|
|
163
|
+
error.message.includes("already has")) {
|
|
66
164
|
console.log(chalk_1.default.yellow(`ā ļø ${error.message}`));
|
|
67
165
|
return;
|
|
68
166
|
}
|
|
@@ -85,12 +183,16 @@ exports.splitCommand = new commander_1.Command("split")
|
|
|
85
183
|
console.log(chalk_1.default.yellow("No tasks found matching the filters"));
|
|
86
184
|
return;
|
|
87
185
|
}
|
|
88
|
-
// Confirm bulk operation
|
|
89
|
-
const confirmed = await
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
186
|
+
// // Confirm bulk operation
|
|
187
|
+
// const confirmed = await confirmBulkOperation(
|
|
188
|
+
// "split",
|
|
189
|
+
// tasks.length,
|
|
190
|
+
// options.force
|
|
191
|
+
// );
|
|
192
|
+
// if (!confirmed) {
|
|
193
|
+
// console.log(chalk.yellow("Operation cancelled"));
|
|
194
|
+
// return;
|
|
195
|
+
// }
|
|
94
196
|
await (0, bulk_operations_1.executeBulkOperation)((taskId) => splitSingleTask(taskId), {
|
|
95
197
|
operationName: "Splitting",
|
|
96
198
|
operationEmoji: "š§",
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
export interface AIParallelResult<T> {
|
|
2
|
+
modelId: string;
|
|
3
|
+
data: T;
|
|
4
|
+
stats: any;
|
|
5
|
+
}
|
|
6
|
+
/**
|
|
7
|
+
* Run AI operations in parallel with progress tracking
|
|
8
|
+
*/
|
|
9
|
+
export declare function runAIParallel<T>(models: string[], operation: (modelConfig: any, streamingOptions: any) => Promise<{
|
|
10
|
+
data: T;
|
|
11
|
+
stats: any;
|
|
12
|
+
}>, options: {
|
|
13
|
+
description: string;
|
|
14
|
+
showSummary?: boolean;
|
|
15
|
+
}): Promise<AIParallelResult<T>[]>;
|
|
16
|
+
/**
|
|
17
|
+
* Combine multiple PRDs into a single master PRD
|
|
18
|
+
*/
|
|
19
|
+
export declare function combinePRDs(prds: string[], description: string, modelStr: string, stream: boolean, reasoningOverride?: string): Promise<string>;
|
|
20
|
+
//# sourceMappingURL=ai-parallel.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ai-parallel.d.ts","sourceRoot":"","sources":["../../../src/commands/utils/ai-parallel.ts"],"names":[],"mappings":"AAYA,MAAM,WAAW,gBAAgB,CAAC,CAAC;IACjC,OAAO,EAAE,MAAM,CAAC;IAChB,IAAI,EAAE,CAAC,CAAC;IACR,KAAK,EAAE,GAAG,CAAC;CACZ;AAED;;GAEG;AACH,wBAAsB,aAAa,CAAC,CAAC,EACnC,MAAM,EAAE,MAAM,EAAE,EAChB,SAAS,EAAE,CACT,WAAW,EAAE,GAAG,EAChB,gBAAgB,EAAE,GAAG,KAClB,OAAO,CAAC;IAAE,IAAI,EAAE,CAAC,CAAC;IAAC,KAAK,EAAE,GAAG,CAAA;CAAE,CAAC,EACrC,OAAO,EAAE;IACP,WAAW,EAAE,MAAM,CAAC;IACpB,WAAW,CAAC,EAAE,OAAO,CAAC;CACvB,GACA,OAAO,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,CA6HhC;AAED;;GAEG;AACH,wBAAsB,WAAW,CAC/B,IAAI,EAAE,MAAM,EAAE,EACd,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,MAAM,EAChB,MAAM,EAAE,OAAO,EACf,iBAAiB,CAAC,EAAE,MAAM,GACzB,OAAO,CAAC,MAAM,CAAC,CA2BjB"}
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.runAIParallel = runAIParallel;
|
|
7
|
+
exports.combinePRDs = combinePRDs;
|
|
8
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
9
|
+
const ai_service_factory_1 = require("../../utils/ai-service-factory");
|
|
10
|
+
const prd_1 = require("../prd");
|
|
11
|
+
const streaming_options_1 = require("../../utils/streaming-options");
|
|
12
|
+
const config_1 = require("../../lib/config");
|
|
13
|
+
/**
|
|
14
|
+
* Run AI operations in parallel with progress tracking
|
|
15
|
+
*/
|
|
16
|
+
async function runAIParallel(models, operation, options) {
|
|
17
|
+
console.log(chalk_1.default.blue(`\n${options.description} with ${models.length} models concurrently...\n`));
|
|
18
|
+
const modelMap = new Map();
|
|
19
|
+
const modelStatus = new Map();
|
|
20
|
+
const results = [];
|
|
21
|
+
// Print initial lines
|
|
22
|
+
models.forEach((m, i) => {
|
|
23
|
+
modelMap.set(m, i);
|
|
24
|
+
modelStatus.set(m, "Waiting...");
|
|
25
|
+
console.log(chalk_1.default.dim(`- ${m}: Waiting...`));
|
|
26
|
+
});
|
|
27
|
+
const totalModels = models.length;
|
|
28
|
+
// Generate concurrently
|
|
29
|
+
const promises = models.map(async (modelStr) => {
|
|
30
|
+
const modelConfig = (0, prd_1.parseModelString)(modelStr);
|
|
31
|
+
const index = modelMap.get(modelStr);
|
|
32
|
+
// Update status: Starting
|
|
33
|
+
const up = totalModels - index;
|
|
34
|
+
process.stdout.write(`\x1B[${up}A`);
|
|
35
|
+
process.stdout.write(`\x1B[2K`);
|
|
36
|
+
process.stdout.write(`- ${chalk_1.default.bold(modelStr)}: ${chalk_1.default.yellow("Starting...")}\r`);
|
|
37
|
+
process.stdout.write(`\x1B[${up}B`);
|
|
38
|
+
try {
|
|
39
|
+
const streamingOptions = {
|
|
40
|
+
onReasoning: (text) => {
|
|
41
|
+
const up = totalModels - index;
|
|
42
|
+
process.stdout.write(`\x1B[${up}A`);
|
|
43
|
+
process.stdout.write(`\x1B[2K`);
|
|
44
|
+
process.stdout.write(`- ${chalk_1.default.bold(modelStr)}: ${chalk_1.default.magenta("Reasoning...")}\r`);
|
|
45
|
+
process.stdout.write(`\x1B[${up}B`);
|
|
46
|
+
},
|
|
47
|
+
onChunk: (chunk) => {
|
|
48
|
+
const up = totalModels - index;
|
|
49
|
+
process.stdout.write(`\x1B[${up}A`);
|
|
50
|
+
process.stdout.write(`\x1B[2K`);
|
|
51
|
+
process.stdout.write(`- ${chalk_1.default.bold(modelStr)}: ${chalk_1.default.blue("Generating...")}\r`);
|
|
52
|
+
process.stdout.write(`\x1B[${up}B`);
|
|
53
|
+
},
|
|
54
|
+
};
|
|
55
|
+
const result = await operation(modelConfig, streamingOptions);
|
|
56
|
+
// Update status: Completed
|
|
57
|
+
const up2 = totalModels - index;
|
|
58
|
+
process.stdout.write(`\x1B[${up2}A`);
|
|
59
|
+
process.stdout.write(`\x1B[2K`);
|
|
60
|
+
process.stdout.write(`- ${chalk_1.default.bold(modelStr)}: ${chalk_1.default.green(`Completed (${result.stats.duration}ms)`)}\r`);
|
|
61
|
+
process.stdout.write(`\x1B[${up2}B`);
|
|
62
|
+
results.push({
|
|
63
|
+
modelId: modelStr,
|
|
64
|
+
data: result.data,
|
|
65
|
+
stats: result.stats,
|
|
66
|
+
});
|
|
67
|
+
return result;
|
|
68
|
+
}
|
|
69
|
+
catch (error) {
|
|
70
|
+
const up2 = totalModels - index;
|
|
71
|
+
process.stdout.write(`\x1B[${up2}A`);
|
|
72
|
+
process.stdout.write(`\x1B[2K`);
|
|
73
|
+
process.stdout.write(`- ${chalk_1.default.bold(modelStr)}: ${chalk_1.default.red(`Failed: ${error instanceof Error ? error.message : String(error)}`)}\r`);
|
|
74
|
+
process.stdout.write(`\x1B[${up2}B`);
|
|
75
|
+
throw error;
|
|
76
|
+
}
|
|
77
|
+
});
|
|
78
|
+
await Promise.allSettled(promises);
|
|
79
|
+
if (options.showSummary) {
|
|
80
|
+
// Display summary
|
|
81
|
+
console.log(chalk_1.default.green(`\nā ${options.description} completed (${results.length}/${models.length} success)\n`));
|
|
82
|
+
console.log(chalk_1.default.bold(`${"Model".padEnd(40)} | ${"Duration".padEnd(10)} | ${"TTFT".padEnd(10)} | ${"Tokens".padEnd(10)}`));
|
|
83
|
+
console.log("-".repeat(80));
|
|
84
|
+
results.forEach((r) => {
|
|
85
|
+
const duration = `${r.stats.duration}ms`;
|
|
86
|
+
const ttft = r.stats.timeToFirstToken
|
|
87
|
+
? `${r.stats.timeToFirstToken}ms`
|
|
88
|
+
: "N/A";
|
|
89
|
+
const tokens = r.stats.tokenUsage
|
|
90
|
+
? r.stats.tokenUsage.total.toString()
|
|
91
|
+
: "N/A";
|
|
92
|
+
console.log(`${r.modelId.padEnd(40)} | ${duration.padEnd(10)} | ${ttft.padEnd(10)} | ${tokens.padEnd(10)}`);
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
return results;
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Combine multiple PRDs into a single master PRD
|
|
99
|
+
*/
|
|
100
|
+
async function combinePRDs(prds, description, modelStr, stream, reasoningOverride) {
|
|
101
|
+
console.log(chalk_1.default.blue("\nCombining PRDs into master PRD..."));
|
|
102
|
+
const aiOperations = (0, ai_service_factory_1.getAIOperations)();
|
|
103
|
+
const combineModelConfig = (0, prd_1.parseModelString)(modelStr);
|
|
104
|
+
const streamingOptions = (0, streaming_options_1.createStreamingOptions)(stream, "Combining PRDs");
|
|
105
|
+
return await aiOperations.combinePRDs(prds, description, {
|
|
106
|
+
provider: (combineModelConfig.provider ||
|
|
107
|
+
config_1.configManager.getAIConfig().provider),
|
|
108
|
+
model: combineModelConfig.model,
|
|
109
|
+
reasoning: reasoningOverride || combineModelConfig.reasoning
|
|
110
|
+
? {
|
|
111
|
+
maxTokens: parseInt(reasoningOverride || combineModelConfig.reasoning || "0"),
|
|
112
|
+
}
|
|
113
|
+
: undefined,
|
|
114
|
+
}, undefined, undefined, streamingOptions);
|
|
115
|
+
}
|
package/dist/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,EAAE,OAAO,EAAE,MAAM,WAAW,CAAC;
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,EAAE,OAAO,EAAE,MAAM,WAAW,CAAC;AAapC,QAAA,MAAM,OAAO,SAAgB,CAAC;AAgD9B;;;GAGG;AACH,eAAO,MAAM,MAAM,qBAgBlB,CAAC;AAEF;;GAEG;AACH,OAAO,EAAE,OAAO,EAAE,CAAC"}
|
package/dist/index.js
CHANGED
|
@@ -19,6 +19,7 @@ const init_1 = require("./commands/init");
|
|
|
19
19
|
const prompt_1 = require("./commands/prompt");
|
|
20
20
|
const workflow_1 = require("./commands/workflow");
|
|
21
21
|
const benchmark_1 = require("./commands/benchmark");
|
|
22
|
+
const install_1 = require("./commands/install");
|
|
22
23
|
const config_2 = require("./lib/config");
|
|
23
24
|
const logger_1 = require("./lib/hooks/logger");
|
|
24
25
|
const program = new commander_1.Command();
|
|
@@ -36,6 +37,7 @@ program.addCommand(prompt_1.promptCommand);
|
|
|
36
37
|
program.addCommand(init_1.initCommand);
|
|
37
38
|
program.addCommand(workflow_1.workflowCommand);
|
|
38
39
|
program.addCommand(benchmark_1.benchmarkCommand);
|
|
40
|
+
program.addCommand(install_1.installCommand);
|
|
39
41
|
// Default action - show help
|
|
40
42
|
program.action(() => {
|
|
41
43
|
console.log(chalk_1.default.blue("š AI-Powered Task Management CLI"));
|
|
@@ -52,7 +54,7 @@ program.action(() => {
|
|
|
52
54
|
// Error handling
|
|
53
55
|
program.on("command:*", (operands) => {
|
|
54
56
|
console.error(chalk_1.default.red(`Unknown command: ${operands[0]}`));
|
|
55
|
-
console.log(chalk_1.default.blue("Available commands: config, tasks, prd, prompt, init, workflow"));
|
|
57
|
+
console.log(chalk_1.default.blue("Available commands: config, tasks, prd, prompt, init, workflow, benchmark, install"));
|
|
56
58
|
console.log(chalk_1.default.blue("Use --help for available commands"));
|
|
57
59
|
process.exit(1);
|
|
58
60
|
});
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { LanguageModelV2 } from "@ai-sdk/provider";
|
|
2
|
+
export declare class GeminiProviderProxy implements LanguageModelV2 {
|
|
3
|
+
readonly specificationVersion = "v2";
|
|
4
|
+
readonly provider = "gemini";
|
|
5
|
+
readonly modelId: string;
|
|
6
|
+
readonly defaultObjectGenerationMode = "json";
|
|
7
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
8
|
+
private readonly config;
|
|
9
|
+
private realModel;
|
|
10
|
+
constructor(modelId: string, config: any);
|
|
11
|
+
private getRealModel;
|
|
12
|
+
doGenerate(options: any): Promise<any>;
|
|
13
|
+
doStream(options: any): Promise<any>;
|
|
14
|
+
}
|
|
15
|
+
//# sourceMappingURL=gemini-proxy.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"gemini-proxy.d.ts","sourceRoot":"","sources":["../../../src/lib/ai-service/gemini-proxy.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,eAAe,EAAE,MAAM,kBAAkB,CAAC;AAInD,qBAAa,mBAAoB,YAAW,eAAe;IACzD,QAAQ,CAAC,oBAAoB,QAAQ;IACrC,QAAQ,CAAC,QAAQ,YAAY;IAC7B,QAAQ,CAAC,OAAO,EAAE,MAAM,CAAC;IACzB,QAAQ,CAAC,2BAA2B,UAAU;IAC9C,QAAQ,CAAC,aAAa,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAM;IACtD,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAM;IAC7B,OAAO,CAAC,SAAS,CAAgC;gBAErC,OAAO,EAAE,MAAM,EAAE,MAAM,EAAE,GAAG;YAK1B,YAAY;IAsDpB,UAAU,CAAC,OAAO,EAAE,GAAG,GAAG,OAAO,CAAC,GAAG,CAAC;IAKtC,QAAQ,CAAC,OAAO,EAAE,GAAG,GAAG,OAAO,CAAC,GAAG,CAAC;CAI3C"}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
+
exports.GeminiProviderProxy = void 0;
|
|
37
|
+
const fs = __importStar(require("fs"));
|
|
38
|
+
const path = __importStar(require("path"));
|
|
39
|
+
class GeminiProviderProxy {
|
|
40
|
+
specificationVersion = "v2";
|
|
41
|
+
provider = "gemini";
|
|
42
|
+
modelId;
|
|
43
|
+
defaultObjectGenerationMode = "json";
|
|
44
|
+
supportedUrls = {}; // Explicitly typed to match interface
|
|
45
|
+
config;
|
|
46
|
+
realModel = null;
|
|
47
|
+
constructor(modelId, config) {
|
|
48
|
+
this.modelId = modelId;
|
|
49
|
+
this.config = config;
|
|
50
|
+
}
|
|
51
|
+
async getRealModel() {
|
|
52
|
+
if (!this.realModel) {
|
|
53
|
+
// The dependency 'ai-sdk-provider-gemini-cli' is misconfigured:
|
|
54
|
+
// it claims to be ESM ("type": "module") but its "main" points to CJS.
|
|
55
|
+
// This crashes Node.js with "module is not defined".
|
|
56
|
+
// We must load the ESM entry point ("index.mjs") explicitly.
|
|
57
|
+
const projectRoot = process.cwd();
|
|
58
|
+
// Attempt to resolve the path explicitly to dist/index.mjs
|
|
59
|
+
let modulePath = path.join(projectRoot, "node_modules", "ai-sdk-provider-gemini-cli", "dist", "index.mjs");
|
|
60
|
+
if (!fs.existsSync(modulePath)) {
|
|
61
|
+
// Fallback for nested dep or monorepo structures (common in workspaces)
|
|
62
|
+
modulePath = path.join(projectRoot, "..", "..", "node_modules", "ai-sdk-provider-gemini-cli", "dist", "index.mjs");
|
|
63
|
+
if (!fs.existsSync(modulePath)) {
|
|
64
|
+
console.warn("GeminiProxy: Could not locate index.mjs manually. Falling back to default resolution.");
|
|
65
|
+
// Last ditch: try to rely on standard resolution.
|
|
66
|
+
// This will likely crash if the package isn't fixed, but it's the only remaining option.
|
|
67
|
+
const { createGeminiProvider } = await Promise.resolve().then(() => __importStar(require("ai-sdk-provider-gemini-cli")));
|
|
68
|
+
const provider = createGeminiProvider(this.config);
|
|
69
|
+
this.realModel = provider(this.modelId);
|
|
70
|
+
return this.realModel;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
// Dynamic import of the absolute path bypasses 'exports' restrictions
|
|
74
|
+
// and forces Node to load the correct ESM file.
|
|
75
|
+
const { createGeminiProvider } = await Promise.resolve(`${modulePath}`).then(s => __importStar(require(s)));
|
|
76
|
+
const provider = createGeminiProvider(this.config);
|
|
77
|
+
this.realModel = provider(this.modelId);
|
|
78
|
+
}
|
|
79
|
+
return this.realModel;
|
|
80
|
+
}
|
|
81
|
+
async doGenerate(options) {
|
|
82
|
+
const model = await this.getRealModel();
|
|
83
|
+
return model.doGenerate(options);
|
|
84
|
+
}
|
|
85
|
+
async doStream(options) {
|
|
86
|
+
const model = await this.getRealModel();
|
|
87
|
+
return model.doStream(options);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
exports.GeminiProviderProxy = GeminiProviderProxy;
|
|
@@ -3,7 +3,20 @@ export declare class JSONParser {
|
|
|
3
3
|
/**
|
|
4
4
|
* Extracts JSON from text that may contain markdown codeblocks or other formatting
|
|
5
5
|
*/
|
|
6
|
+
/**
|
|
7
|
+
* Extracts the first valid JSON object or array from text using a stack-based approach
|
|
8
|
+
* This is much more robust than regex for nested structures or text with multiple brace pairs
|
|
9
|
+
*/
|
|
6
10
|
private extractJSONString;
|
|
11
|
+
/**
|
|
12
|
+
* Extracts a balanced string starting from a specific index
|
|
13
|
+
*/
|
|
14
|
+
private extractBalancedString;
|
|
15
|
+
/**
|
|
16
|
+
* Cleans JSON string by removing comments and trailing commas
|
|
17
|
+
* This handles "valid JS object" format that LLMs often output
|
|
18
|
+
*/
|
|
19
|
+
private cleanJSON;
|
|
7
20
|
/**
|
|
8
21
|
* Normalizes object keys to handle case variations (e.g., "Tasks" -> "tasks")
|
|
9
22
|
*/
|
|
@@ -12,10 +25,9 @@ export declare class JSONParser {
|
|
|
12
25
|
* Parses JSON from AI text response with improved error handling
|
|
13
26
|
* Now supports:
|
|
14
27
|
* - Extracting from markdown codeblocks (```json, ```JSON, or ```)
|
|
15
|
-
* -
|
|
16
|
-
* -
|
|
17
|
-
*
|
|
18
|
-
* @deprecated Use generateObject instead for structured output
|
|
28
|
+
* - Stack-based extraction for robustness against surrounding text
|
|
29
|
+
* - Comment and trailing comma removal
|
|
30
|
+
* - Case-insensitive property names
|
|
19
31
|
*/
|
|
20
32
|
parseJSONFromResponse<T>(text: string): JSONParseResult<T>;
|
|
21
33
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"json-parser.d.ts","sourceRoot":"","sources":["../../../src/lib/ai-service/json-parser.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAE9C,qBAAa,UAAU;IACrB;;OAEG;IACH,OAAO,CAAC,iBAAiB;
|
|
1
|
+
{"version":3,"file":"json-parser.d.ts","sourceRoot":"","sources":["../../../src/lib/ai-service/json-parser.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAE9C,qBAAa,UAAU;IACrB;;OAEG;IACH;;;OAGG;IACH,OAAO,CAAC,iBAAiB;IA8DzB;;OAEG;IACH,OAAO,CAAC,qBAAqB;IA2C7B;;;OAGG;IACH,OAAO,CAAC,SAAS;IAOjB;;OAEG;IACH,OAAO,CAAC,aAAa;IAsBrB;;;;;;;OAOG;IACH,qBAAqB,CAAC,CAAC,EAAE,IAAI,EAAE,MAAM,GAAG,eAAe,CAAC,CAAC,CAAC;CAwC3D"}
|