mulby-cli 1.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/PLUGIN_DEVELOP_PROMPT.md +1164 -0
- package/README.md +852 -0
- package/assets/default-icon.png +0 -0
- package/dist/commands/ai-session.js +44 -0
- package/dist/commands/build.js +111 -0
- package/dist/commands/config-ai.js +291 -0
- package/dist/commands/config.js +53 -0
- package/dist/commands/create/ai-create.js +183 -0
- package/dist/commands/create/assets.js +53 -0
- package/dist/commands/create/basic.js +72 -0
- package/dist/commands/create/index.js +73 -0
- package/dist/commands/create/react.js +136 -0
- package/dist/commands/create/templates/basic.js +383 -0
- package/dist/commands/create/templates/react/backend.js +72 -0
- package/dist/commands/create/templates/react/config.js +166 -0
- package/dist/commands/create/templates/react/docs.js +78 -0
- package/dist/commands/create/templates/react/hooks.js +469 -0
- package/dist/commands/create/templates/react/index.js +41 -0
- package/dist/commands/create/templates/react/types.js +1228 -0
- package/dist/commands/create/templates/react/ui.js +528 -0
- package/dist/commands/create/templates/react.js +1888 -0
- package/dist/commands/dev.js +141 -0
- package/dist/commands/pack.js +160 -0
- package/dist/commands/resume.js +97 -0
- package/dist/commands/test-ui.js +50 -0
- package/dist/index.js +71 -0
- package/dist/services/ai/PLUGIN_API.md +1102 -0
- package/dist/services/ai/PLUGIN_DEVELOP_PROMPT.md +1164 -0
- package/dist/services/ai/context-manager.js +639 -0
- package/dist/services/ai/index.js +88 -0
- package/dist/services/ai/knowledge.js +52 -0
- package/dist/services/ai/prompts.js +114 -0
- package/dist/services/ai/providers/base.js +38 -0
- package/dist/services/ai/providers/claude.js +284 -0
- package/dist/services/ai/providers/deepseek.js +28 -0
- package/dist/services/ai/providers/gemini.js +191 -0
- package/dist/services/ai/providers/glm.js +31 -0
- package/dist/services/ai/providers/minimax.js +27 -0
- package/dist/services/ai/providers/openai.js +177 -0
- package/dist/services/ai/tools.js +204 -0
- package/dist/services/ai-generator.js +968 -0
- package/dist/services/config-manager.js +117 -0
- package/dist/services/dependency-manager.js +236 -0
- package/dist/services/file-writer.js +66 -0
- package/dist/services/plan-adapter.js +244 -0
- package/dist/services/plan-command-handler.js +172 -0
- package/dist/services/plan-manager.js +502 -0
- package/dist/services/session-manager.js +113 -0
- package/dist/services/task-analyzer.js +136 -0
- package/dist/services/tui/index.js +57 -0
- package/dist/services/tui/store.js +123 -0
- package/dist/types/ai.js +172 -0
- package/dist/types/plan.js +2 -0
- package/dist/ui/Terminal.js +56 -0
- package/dist/ui/components/InputArea.js +176 -0
- package/dist/ui/components/LogArea.js +19 -0
- package/dist/ui/components/PlanPanel.js +69 -0
- package/dist/ui/components/SelectArea.js +13 -0
- package/package.json +45 -0
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.GeminiProvider = void 0;
|
|
4
|
+
const base_1 = require("./base");
|
|
5
|
+
class GeminiProvider extends base_1.BaseAIProvider {
|
|
6
|
+
constructor(config) {
|
|
7
|
+
super(config);
|
|
8
|
+
this.apiKey = config.apiKey;
|
|
9
|
+
this.baseURL = config.apiEndpoint || 'https://generativelanguage.googleapis.com/v1beta';
|
|
10
|
+
this.model = config.model || 'gemini-3-pro-preview';
|
|
11
|
+
}
|
|
12
|
+
async chat(messages, options) {
|
|
13
|
+
const { contents, systemInstruction } = this.convertMessages(messages);
|
|
14
|
+
const tools = options?.tools ? this.convertTools(options.tools) : undefined;
|
|
15
|
+
// maxTokens 是最大输出 token 数,使用 getMaxOutputTokens() 获取模型默认值
|
|
16
|
+
const maxOutputTokens = options?.maxTokens || this.config.maxTokens || this.getMaxOutputTokens();
|
|
17
|
+
const requestBody = {
|
|
18
|
+
contents,
|
|
19
|
+
generationConfig: {
|
|
20
|
+
maxOutputTokens,
|
|
21
|
+
temperature: 0.7,
|
|
22
|
+
}
|
|
23
|
+
};
|
|
24
|
+
if (systemInstruction) {
|
|
25
|
+
requestBody.systemInstruction = { parts: [{ text: systemInstruction }] };
|
|
26
|
+
}
|
|
27
|
+
if (tools) {
|
|
28
|
+
requestBody.tools = [tools];
|
|
29
|
+
}
|
|
30
|
+
const url = `${this.baseURL}/models/${this.model}:generateContent?key=${this.apiKey}`;
|
|
31
|
+
const response = await fetch(url, {
|
|
32
|
+
method: 'POST',
|
|
33
|
+
headers: {
|
|
34
|
+
'Content-Type': 'application/json',
|
|
35
|
+
},
|
|
36
|
+
body: JSON.stringify(requestBody),
|
|
37
|
+
});
|
|
38
|
+
if (!response.ok) {
|
|
39
|
+
const error = await response.text();
|
|
40
|
+
throw new Error(`Gemini API error: ${response.status} ${error}`);
|
|
41
|
+
}
|
|
42
|
+
const data = await response.json();
|
|
43
|
+
return this.parseResponse(data);
|
|
44
|
+
}
|
|
45
|
+
async chatStream(messages, onChunk, options) {
|
|
46
|
+
const { contents, systemInstruction } = this.convertMessages(messages);
|
|
47
|
+
const tools = options?.tools ? this.convertTools(options.tools) : undefined;
|
|
48
|
+
// maxTokens 是最大输出 token 数,使用 getMaxOutputTokens() 获取模型默认值
|
|
49
|
+
const maxOutputTokens = options?.maxTokens || this.config.maxTokens || this.getMaxOutputTokens();
|
|
50
|
+
const requestBody = {
|
|
51
|
+
contents,
|
|
52
|
+
generationConfig: {
|
|
53
|
+
maxOutputTokens,
|
|
54
|
+
temperature: 0.7,
|
|
55
|
+
}
|
|
56
|
+
};
|
|
57
|
+
if (systemInstruction) {
|
|
58
|
+
requestBody.systemInstruction = { parts: [{ text: systemInstruction }] };
|
|
59
|
+
}
|
|
60
|
+
if (tools) {
|
|
61
|
+
requestBody.tools = [tools];
|
|
62
|
+
}
|
|
63
|
+
const url = `${this.baseURL}/models/${this.model}:streamGenerateContent?key=${this.apiKey}&alt=sse`;
|
|
64
|
+
const response = await fetch(url, {
|
|
65
|
+
method: 'POST',
|
|
66
|
+
headers: {
|
|
67
|
+
'Content-Type': 'application/json',
|
|
68
|
+
},
|
|
69
|
+
body: JSON.stringify(requestBody),
|
|
70
|
+
});
|
|
71
|
+
if (!response.ok) {
|
|
72
|
+
const error = await response.text();
|
|
73
|
+
throw new Error(`Gemini API error: ${response.status} ${error}`);
|
|
74
|
+
}
|
|
75
|
+
let fullContent = '';
|
|
76
|
+
const reader = response.body?.getReader();
|
|
77
|
+
const decoder = new TextDecoder();
|
|
78
|
+
if (!reader) {
|
|
79
|
+
throw new Error('Response body is not readable');
|
|
80
|
+
}
|
|
81
|
+
while (true) {
|
|
82
|
+
const { done, value } = await reader.read();
|
|
83
|
+
if (done)
|
|
84
|
+
break;
|
|
85
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
86
|
+
const lines = chunk.split('\n');
|
|
87
|
+
for (const line of lines) {
|
|
88
|
+
if (line.startsWith('data: ')) {
|
|
89
|
+
try {
|
|
90
|
+
const data = JSON.parse(line.slice(6));
|
|
91
|
+
const text = data.candidates?.[0]?.content?.parts?.[0]?.text;
|
|
92
|
+
if (text) {
|
|
93
|
+
fullContent += text;
|
|
94
|
+
onChunk(text);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
catch (e) {
|
|
98
|
+
// Ignore parse errors
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
// TODO: Handle tool calls in stream if needed
|
|
104
|
+
return { content: fullContent };
|
|
105
|
+
}
|
|
106
|
+
convertMessages(messages) {
|
|
107
|
+
let systemInstruction;
|
|
108
|
+
const contents = [];
|
|
109
|
+
for (const msg of messages) {
|
|
110
|
+
if (msg.role === 'system') {
|
|
111
|
+
systemInstruction = typeof msg.content === 'string' ? msg.content : '';
|
|
112
|
+
continue;
|
|
113
|
+
}
|
|
114
|
+
if (msg.role === 'tool') {
|
|
115
|
+
// 工具响应
|
|
116
|
+
const lastMessage = contents[contents.length - 1];
|
|
117
|
+
if (lastMessage && lastMessage.role === 'model') {
|
|
118
|
+
lastMessage.parts.push({
|
|
119
|
+
functionResponse: {
|
|
120
|
+
name: msg.name,
|
|
121
|
+
response: {
|
|
122
|
+
content: msg.content
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
continue;
|
|
128
|
+
}
|
|
129
|
+
const role = msg.role === 'assistant' ? 'model' : 'user';
|
|
130
|
+
const parts = [];
|
|
131
|
+
if (msg.content) {
|
|
132
|
+
parts.push({ text: msg.content });
|
|
133
|
+
}
|
|
134
|
+
if (msg.tool_calls) {
|
|
135
|
+
for (const toolCall of msg.tool_calls) {
|
|
136
|
+
parts.push({
|
|
137
|
+
functionCall: {
|
|
138
|
+
name: toolCall.function.name,
|
|
139
|
+
args: JSON.parse(toolCall.function.arguments)
|
|
140
|
+
}
|
|
141
|
+
});
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
contents.push({ role, parts });
|
|
145
|
+
}
|
|
146
|
+
return { contents, systemInstruction };
|
|
147
|
+
}
|
|
148
|
+
convertTools(tools) {
|
|
149
|
+
return {
|
|
150
|
+
functionDeclarations: tools.map(tool => ({
|
|
151
|
+
name: tool.function.name,
|
|
152
|
+
description: tool.function.description,
|
|
153
|
+
parameters: tool.function.parameters
|
|
154
|
+
}))
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
parseResponse(data) {
|
|
158
|
+
const candidate = data.candidates?.[0];
|
|
159
|
+
if (!candidate) {
|
|
160
|
+
return { content: null };
|
|
161
|
+
}
|
|
162
|
+
const parts = candidate.content?.parts || [];
|
|
163
|
+
let content = '';
|
|
164
|
+
const toolCalls = [];
|
|
165
|
+
for (const part of parts) {
|
|
166
|
+
if (part.text) {
|
|
167
|
+
content += part.text;
|
|
168
|
+
}
|
|
169
|
+
if (part.functionCall) {
|
|
170
|
+
toolCalls.push({
|
|
171
|
+
id: `call_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
|
|
172
|
+
type: 'function',
|
|
173
|
+
function: {
|
|
174
|
+
name: part.functionCall.name,
|
|
175
|
+
arguments: JSON.stringify(part.functionCall.args)
|
|
176
|
+
}
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
return {
|
|
181
|
+
content: content || null,
|
|
182
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
183
|
+
usage: data.usageMetadata ? {
|
|
184
|
+
promptTokens: data.usageMetadata.promptTokenCount || 0,
|
|
185
|
+
completionTokens: data.usageMetadata.candidatesTokenCount || 0,
|
|
186
|
+
totalTokens: data.usageMetadata.totalTokenCount || 0
|
|
187
|
+
} : undefined
|
|
188
|
+
};
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
exports.GeminiProvider = GeminiProvider;
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.GLMProvider = void 0;
|
|
4
|
+
const openai_1 = require("./openai");
|
|
5
|
+
/**
|
|
6
|
+
* GLM (智谱AI) Provider
|
|
7
|
+
* 智谱AI提供与OpenAI兼容的API接口
|
|
8
|
+
* 文档: https://docs.bigmodel.cn/cn/guide/develop/openai/introduction
|
|
9
|
+
*
|
|
10
|
+
* 支持模型:
|
|
11
|
+
* - glm-4.7: 最新旗舰模型 (200K上下文, 128K输出)
|
|
12
|
+
* - glm-4.6: 上一代旗舰 (200K上下文, 8K输出)
|
|
13
|
+
* - glm-4-plus/glm-4: 标准模型 (128K上下文, 4K输出)
|
|
14
|
+
* - glm-4-long: 长上下文模型 (1M上下文, 4K输出)
|
|
15
|
+
*/
|
|
16
|
+
class GLMProvider extends openai_1.OpenAIProvider {
|
|
17
|
+
constructor(config) {
|
|
18
|
+
// 智谱AI使用OpenAI兼容接口
|
|
19
|
+
// 注意: maxTokens 是最大输出 token 数,不是上下文窗口
|
|
20
|
+
const glmConfig = {
|
|
21
|
+
...config,
|
|
22
|
+
provider: 'glm',
|
|
23
|
+
apiEndpoint: config.apiEndpoint || 'https://open.bigmodel.cn/api/paas/v4',
|
|
24
|
+
model: config.model || 'glm-4-plus',
|
|
25
|
+
enableThinking: config.enableThinking ?? true,
|
|
26
|
+
// 不设置默认值,让 getMaxOutputTokens() 从模型推断
|
|
27
|
+
};
|
|
28
|
+
super(glmConfig);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
exports.GLMProvider = GLMProvider;
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.MiniMaxProvider = void 0;
|
|
4
|
+
const claude_1 = require("./claude");
|
|
5
|
+
/**
|
|
6
|
+
* MiniMax Provider - 使用 Anthropic SDK 兼容接口
|
|
7
|
+
*
|
|
8
|
+
* 支持模型:
|
|
9
|
+
* - MiniMax-M2.1: 强大多语言编程实力 (200K上下文, 8K输出)
|
|
10
|
+
* - MiniMax-M2.1-lightning: 极速版 (200K上下文, 8K输出)
|
|
11
|
+
* - MiniMax-M2: 基础版 (200K上下文, 8K输出)
|
|
12
|
+
*
|
|
13
|
+
* API 端点: https://api.minimaxi.com/anthropic
|
|
14
|
+
*/
|
|
15
|
+
class MiniMaxProvider extends claude_1.ClaudeProvider {
|
|
16
|
+
constructor(config) {
|
|
17
|
+
const minimaxConfig = {
|
|
18
|
+
...config,
|
|
19
|
+
provider: 'minimax',
|
|
20
|
+
apiEndpoint: config.apiEndpoint || 'https://api.minimaxi.com/anthropic',
|
|
21
|
+
model: config.model || 'MiniMax-M2.1',
|
|
22
|
+
// 不设置默认值,让 getMaxOutputTokens() 从模型推断
|
|
23
|
+
};
|
|
24
|
+
super(minimaxConfig);
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
exports.MiniMaxProvider = MiniMaxProvider;
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.OpenAIProvider = void 0;
|
|
7
|
+
const openai_1 = __importDefault(require("openai"));
|
|
8
|
+
const base_1 = require("./base");
|
|
9
|
+
class OpenAIProvider extends base_1.BaseAIProvider {
|
|
10
|
+
constructor(config) {
|
|
11
|
+
super(config);
|
|
12
|
+
this.client = new openai_1.default({
|
|
13
|
+
apiKey: config.apiKey,
|
|
14
|
+
baseURL: config.apiEndpoint,
|
|
15
|
+
timeout: (config.timeout || 60) * 1000,
|
|
16
|
+
});
|
|
17
|
+
}
|
|
18
|
+
async chat(messages, options) {
|
|
19
|
+
const model = options?.model || this.config.model || 'gpt-5.2';
|
|
20
|
+
// maxTokens 是最大输出 token 数,使用 getMaxOutputTokens() 获取模型默认值
|
|
21
|
+
let maxTokens = options?.maxTokens || this.config.maxTokens || this.getMaxOutputTokens();
|
|
22
|
+
// Final safety cast to number
|
|
23
|
+
maxTokens = Number(maxTokens);
|
|
24
|
+
if (isNaN(maxTokens))
|
|
25
|
+
maxTokens = 128000; // gpt-5.2 默认最大输出
|
|
26
|
+
const requestOptions = {
|
|
27
|
+
model: model,
|
|
28
|
+
messages: messages, // Type casting for compatibility
|
|
29
|
+
temperature: options?.temperature,
|
|
30
|
+
max_tokens: maxTokens,
|
|
31
|
+
tools: options?.tools,
|
|
32
|
+
tool_choice: options?.toolChoice,
|
|
33
|
+
stream: false,
|
|
34
|
+
};
|
|
35
|
+
if (options?.enableThinking || this.config.enableThinking) {
|
|
36
|
+
requestOptions.extra_body = {
|
|
37
|
+
thinking: {
|
|
38
|
+
type: "enabled",
|
|
39
|
+
},
|
|
40
|
+
};
|
|
41
|
+
}
|
|
42
|
+
const response = await this.client.chat.completions.create(requestOptions);
|
|
43
|
+
const choice = response.choices[0];
|
|
44
|
+
const message = choice.message;
|
|
45
|
+
let content = message.content;
|
|
46
|
+
// DeepSeek & GLM reasoning support
|
|
47
|
+
let reasoningContent;
|
|
48
|
+
if (message.reasoning_content) {
|
|
49
|
+
reasoningContent = message.reasoning_content;
|
|
50
|
+
// Removed: content = `<think>\n${thinking}\n</think>\n\n${content || ''}`;
|
|
51
|
+
// We now keep content clean and return reasoning_content separately
|
|
52
|
+
}
|
|
53
|
+
return {
|
|
54
|
+
content: content,
|
|
55
|
+
reasoning_content: reasoningContent,
|
|
56
|
+
toolCalls: message.tool_calls,
|
|
57
|
+
usage: response.usage ? {
|
|
58
|
+
promptTokens: response.usage.prompt_tokens,
|
|
59
|
+
completionTokens: response.usage.completion_tokens,
|
|
60
|
+
totalTokens: response.usage.total_tokens
|
|
61
|
+
} : undefined
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
async chatStream(messages, onChunk, options) {
|
|
65
|
+
const model = options?.model || this.config.model || 'gpt-5.2';
|
|
66
|
+
// maxTokens 是最大输出 token 数,使用 getMaxOutputTokens() 获取模型默认值
|
|
67
|
+
let maxTokens = options?.maxTokens || this.config.maxTokens || this.getMaxOutputTokens();
|
|
68
|
+
// Final safety cast to number
|
|
69
|
+
maxTokens = Number(maxTokens);
|
|
70
|
+
if (isNaN(maxTokens))
|
|
71
|
+
maxTokens = 128000; // gpt-5.2 默认最大输出
|
|
72
|
+
const requestOptions = {
|
|
73
|
+
model: model,
|
|
74
|
+
messages: messages,
|
|
75
|
+
temperature: options?.temperature,
|
|
76
|
+
max_tokens: maxTokens,
|
|
77
|
+
tools: options?.tools,
|
|
78
|
+
tool_choice: options?.toolChoice,
|
|
79
|
+
stream: true,
|
|
80
|
+
stream_options: { include_usage: true }
|
|
81
|
+
};
|
|
82
|
+
if (options?.enableThinking || this.config.enableThinking) {
|
|
83
|
+
requestOptions.extra_body = {
|
|
84
|
+
thinking: {
|
|
85
|
+
type: "enabled",
|
|
86
|
+
},
|
|
87
|
+
};
|
|
88
|
+
}
|
|
89
|
+
const stream = await this.client.chat.completions.create(requestOptions);
|
|
90
|
+
let cleanContent = '';
|
|
91
|
+
let reasoningContent = '';
|
|
92
|
+
const toolCallsMap = {};
|
|
93
|
+
let hasStartedThinking = false;
|
|
94
|
+
let hasEndedThinking = false;
|
|
95
|
+
let finalUsage = undefined;
|
|
96
|
+
for await (const chunk of stream) {
|
|
97
|
+
if (chunk.usage) {
|
|
98
|
+
finalUsage = chunk.usage;
|
|
99
|
+
}
|
|
100
|
+
const choices = chunk.choices || [];
|
|
101
|
+
if (choices.length === 0)
|
|
102
|
+
continue;
|
|
103
|
+
const delta = choices[0].delta;
|
|
104
|
+
// Handle reasoning content (GLM/DeepSeek)
|
|
105
|
+
if (delta?.reasoning_content) {
|
|
106
|
+
if (!hasStartedThinking) {
|
|
107
|
+
const startTag = '<think>\n';
|
|
108
|
+
onChunk(startTag);
|
|
109
|
+
hasStartedThinking = true;
|
|
110
|
+
}
|
|
111
|
+
const reasoning = delta.reasoning_content;
|
|
112
|
+
reasoningContent += reasoning;
|
|
113
|
+
onChunk(reasoning);
|
|
114
|
+
continue;
|
|
115
|
+
}
|
|
116
|
+
// Close thinking tag if we switch to normal content or tools
|
|
117
|
+
if (hasStartedThinking && !hasEndedThinking && (delta?.content || delta?.tool_calls)) {
|
|
118
|
+
const endTag = '\n</think>\n\n';
|
|
119
|
+
onChunk(endTag);
|
|
120
|
+
hasEndedThinking = true;
|
|
121
|
+
}
|
|
122
|
+
// Handle Content
|
|
123
|
+
const content = delta?.content || '';
|
|
124
|
+
if (content) {
|
|
125
|
+
cleanContent += content;
|
|
126
|
+
onChunk(content);
|
|
127
|
+
}
|
|
128
|
+
// Handle Tool Calls
|
|
129
|
+
if (delta?.tool_calls) {
|
|
130
|
+
for (const toolCall of delta.tool_calls) {
|
|
131
|
+
const index = toolCall.index;
|
|
132
|
+
if (!toolCallsMap[index]) {
|
|
133
|
+
toolCallsMap[index] = {
|
|
134
|
+
id: toolCall.id || '',
|
|
135
|
+
type: toolCall.type || 'function',
|
|
136
|
+
function: {
|
|
137
|
+
name: toolCall.function?.name || '',
|
|
138
|
+
arguments: toolCall.function?.arguments || ''
|
|
139
|
+
}
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
else {
|
|
143
|
+
// Append arguments
|
|
144
|
+
if (toolCall.function?.arguments) {
|
|
145
|
+
toolCallsMap[index].function.arguments += toolCall.function.arguments;
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
// Ensure thinking tag is closed if stream ends just after reasoning
|
|
152
|
+
if (hasStartedThinking && !hasEndedThinking) {
|
|
153
|
+
const endTag = '\n</think>\n\n';
|
|
154
|
+
onChunk(endTag);
|
|
155
|
+
hasEndedThinking = true;
|
|
156
|
+
}
|
|
157
|
+
const toolCalls = Object.values(toolCallsMap).map((tc) => ({
|
|
158
|
+
id: tc.id,
|
|
159
|
+
type: tc.type,
|
|
160
|
+
function: {
|
|
161
|
+
name: tc.function.name,
|
|
162
|
+
arguments: tc.function.arguments // Keep as string, caller parses it
|
|
163
|
+
}
|
|
164
|
+
}));
|
|
165
|
+
return {
|
|
166
|
+
content: cleanContent,
|
|
167
|
+
reasoning_content: reasoningContent || undefined,
|
|
168
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
169
|
+
usage: finalUsage ? {
|
|
170
|
+
promptTokens: finalUsage.prompt_tokens,
|
|
171
|
+
completionTokens: finalUsage.completion_tokens,
|
|
172
|
+
totalTokens: finalUsage.total_tokens
|
|
173
|
+
} : undefined
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
exports.OpenAIProvider = OpenAIProvider;
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.PLUGIN_GENERATION_TOOLS = void 0;
|
|
4
|
+
exports.PLUGIN_GENERATION_TOOLS = [
|
|
5
|
+
{
|
|
6
|
+
type: 'function',
|
|
7
|
+
function: {
|
|
8
|
+
name: 'read_file',
|
|
9
|
+
description: 'Read the content of a file. Use this to examine existing code or check file status.',
|
|
10
|
+
parameters: {
|
|
11
|
+
type: 'object',
|
|
12
|
+
properties: {
|
|
13
|
+
path: { type: 'string', description: 'Relative path to the file (e.g., package.json, src/ui/App.tsx)' }
|
|
14
|
+
},
|
|
15
|
+
required: ['path']
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
type: 'function',
|
|
21
|
+
function: {
|
|
22
|
+
name: 'replace_in_file',
|
|
23
|
+
description: 'Replace a specific part of a file. Use this for small edits (bug fixes, tweaks) to save tokens. It fails if the target string is not found or found multiple times (unless expected).',
|
|
24
|
+
parameters: {
|
|
25
|
+
type: 'object',
|
|
26
|
+
properties: {
|
|
27
|
+
path: { type: 'string', description: 'Relative path to the file' },
|
|
28
|
+
target: { type: 'string', description: 'Exact string to be replaced (must be unique in file)' },
|
|
29
|
+
replacement: { type: 'string', description: 'New content to replace the target with' }
|
|
30
|
+
},
|
|
31
|
+
required: ['path', 'target', 'replacement']
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
type: 'function',
|
|
37
|
+
function: {
|
|
38
|
+
name: 'write_file',
|
|
39
|
+
description: 'Create or overwrite a file with new content.',
|
|
40
|
+
parameters: {
|
|
41
|
+
type: 'object',
|
|
42
|
+
properties: {
|
|
43
|
+
path: { type: 'string', description: 'Relative path to the file' },
|
|
44
|
+
content: { type: 'string', description: 'Complete content of the file' }
|
|
45
|
+
},
|
|
46
|
+
required: ['path', 'content']
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
},
|
|
50
|
+
{
|
|
51
|
+
type: 'function',
|
|
52
|
+
function: {
|
|
53
|
+
name: 'run_command',
|
|
54
|
+
description: 'Execute a shell command. Use this for installing dependencies (npm install) or other necessary shell operations. Do NOT run long-running processes like "npm run dev".',
|
|
55
|
+
parameters: {
|
|
56
|
+
type: 'object',
|
|
57
|
+
properties: {
|
|
58
|
+
command: { type: 'string', description: 'The shell command to execute' }
|
|
59
|
+
},
|
|
60
|
+
required: ['command']
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
type: 'function',
|
|
66
|
+
function: {
|
|
67
|
+
name: 'ask_user',
|
|
68
|
+
description: 'Ask the user a question to clarify requirements or request a decision. Use this frequently during the Product Consultant phase.',
|
|
69
|
+
parameters: {
|
|
70
|
+
type: 'object',
|
|
71
|
+
properties: {
|
|
72
|
+
question: { type: 'string', description: 'The question to ask the user' }
|
|
73
|
+
},
|
|
74
|
+
required: ['question']
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
},
|
|
78
|
+
{
|
|
79
|
+
type: 'function',
|
|
80
|
+
function: {
|
|
81
|
+
name: 'scaffold_project',
|
|
82
|
+
description: 'Create the project scaffold (React + Vite template). Call this ONLY after you have confirmed requirements with the user in the Product Consultant phase. This will generate: package.json, manifest.json, vite.config.ts, src/ui/App.tsx, etc.',
|
|
83
|
+
parameters: {
|
|
84
|
+
type: 'object',
|
|
85
|
+
properties: {
|
|
86
|
+
reason: { type: 'string', description: 'Reason for creating scaffold (e.g., "Requirements confirmed: image stitching tool with drag-drop UI")' }
|
|
87
|
+
},
|
|
88
|
+
required: ['reason']
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
type: 'function',
|
|
94
|
+
function: {
|
|
95
|
+
name: 'list_dir',
|
|
96
|
+
description: 'List files and directories in a specific path. Use this to explore the project structure.',
|
|
97
|
+
parameters: {
|
|
98
|
+
type: 'object',
|
|
99
|
+
properties: {
|
|
100
|
+
path: { type: 'string', description: 'Relative path to list (e.g., ".", "src/components")' }
|
|
101
|
+
},
|
|
102
|
+
required: ['path']
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
type: 'function',
|
|
108
|
+
function: {
|
|
109
|
+
name: 'search_files',
|
|
110
|
+
description: 'Search for a string or pattern in files. Useful for finding component usage or specific code snippets.',
|
|
111
|
+
parameters: {
|
|
112
|
+
type: 'object',
|
|
113
|
+
properties: {
|
|
114
|
+
query: { type: 'string', description: 'String or Regex to search for' },
|
|
115
|
+
path: { type: 'string', description: 'Directory to search in (default: ".")' }
|
|
116
|
+
},
|
|
117
|
+
required: ['query']
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
type: 'function',
|
|
123
|
+
function: {
|
|
124
|
+
name: 'read_file_outline',
|
|
125
|
+
description: 'Read the outline (symbols, functions, classes) of a file without reading the full content. Saves tokens.',
|
|
126
|
+
parameters: {
|
|
127
|
+
type: 'object',
|
|
128
|
+
properties: {
|
|
129
|
+
path: { type: 'string', description: 'Relative path to the file' }
|
|
130
|
+
},
|
|
131
|
+
required: ['path']
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
type: 'function',
|
|
137
|
+
function: {
|
|
138
|
+
name: 'delete_file',
|
|
139
|
+
description: 'Delete a file or directory.',
|
|
140
|
+
parameters: {
|
|
141
|
+
type: 'object',
|
|
142
|
+
properties: {
|
|
143
|
+
path: { type: 'string', description: 'Relative path to delete' }
|
|
144
|
+
},
|
|
145
|
+
required: ['path']
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
},
|
|
149
|
+
{
|
|
150
|
+
type: 'function',
|
|
151
|
+
function: {
|
|
152
|
+
name: 'move_file',
|
|
153
|
+
description: 'Move or rename a file.',
|
|
154
|
+
parameters: {
|
|
155
|
+
type: 'object',
|
|
156
|
+
properties: {
|
|
157
|
+
source: { type: 'string', description: 'Current path' },
|
|
158
|
+
destination: { type: 'string', description: 'New path' }
|
|
159
|
+
},
|
|
160
|
+
required: ['source', 'destination']
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
},
|
|
164
|
+
{
|
|
165
|
+
type: 'function',
|
|
166
|
+
function: {
|
|
167
|
+
name: 'fetch_url',
|
|
168
|
+
description: 'Fetch content from a URL to read documentation or external data (converts to Markdown/Text).',
|
|
169
|
+
parameters: {
|
|
170
|
+
type: 'object',
|
|
171
|
+
properties: {
|
|
172
|
+
url: { type: 'string', description: 'The URL to fetch' }
|
|
173
|
+
},
|
|
174
|
+
required: ['url']
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
type: 'function',
|
|
180
|
+
function: {
|
|
181
|
+
name: 'check_types',
|
|
182
|
+
description: 'Run TypeScript compiler to check for type errors.',
|
|
183
|
+
parameters: {
|
|
184
|
+
type: 'object',
|
|
185
|
+
properties: {},
|
|
186
|
+
required: []
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
},
|
|
190
|
+
{
|
|
191
|
+
type: 'function',
|
|
192
|
+
function: {
|
|
193
|
+
name: 'finish',
|
|
194
|
+
description: 'Mark the task as complete when all requirements are met.',
|
|
195
|
+
parameters: {
|
|
196
|
+
type: 'object',
|
|
197
|
+
properties: {
|
|
198
|
+
summary: { type: 'string', description: 'Summary of what was done and instructions for the user.' }
|
|
199
|
+
},
|
|
200
|
+
required: ['summary']
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
];
|