closer-code 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +83 -0
- package/API_GUIDE.md +1411 -0
- package/AUTO_MKDIR_IMPROVEMENT.md +354 -0
- package/CLAUDE.md +55 -0
- package/CTRL_C_EXPERIMENT.md +90 -0
- package/PROJECT_CLEANUP_SUMMARY.md +121 -0
- package/README.md +686 -0
- package/cloco.md +51 -0
- package/config.example.json +116 -0
- package/dist/bash-runner.js +128 -0
- package/dist/batch-cli.js +20736 -0
- package/dist/closer-cli.js +21190 -0
- package/dist/index.js +31228 -0
- package/docs/EXPORT_COMMAND.md +152 -0
- package/docs/FILE_NAMING_IMPROVEMENT.md +168 -0
- package/docs/GLOBAL_CONFIG.md +128 -0
- package/docs/LONG_MESSAGE_DISPLAY_FIX.md +202 -0
- package/docs/PROJECT_HISTORY_ISOLATION.md +315 -0
- package/docs/QUICK_START_HISTORY.md +207 -0
- package/docs/TASK_PROGRESS_FEATURE.md +190 -0
- package/docs/THINKING_CONTENT_RESEARCH.md +267 -0
- package/docs/THINKING_FEATURE.md +187 -0
- package/docs/THINKING_IMPROVEMENT_COMPARISON.md +193 -0
- package/docs/THINKING_OPTIMIZATION_SUMMARY.md +242 -0
- package/docs/UI_IMPROVEMENTS_2025-01-18.md +256 -0
- package/docs/WHY_THINKING_SHORT.md +201 -0
- package/package.json +49 -0
- package/scenarios/README.md +234 -0
- package/scenarios/run-all-scenarios.js +342 -0
- package/scenarios/scenario1-batch-converter.js +247 -0
- package/scenarios/scenario2-code-analyzer.js +375 -0
- package/scenarios/scenario3-doc-generator.js +371 -0
- package/scenarios/scenario4-log-analyzer.js +496 -0
- package/scenarios/scenario5-tdd-helper.js +681 -0
- package/src/ai-client-legacy.js +171 -0
- package/src/ai-client.js +221 -0
- package/src/bash-runner.js +148 -0
- package/src/batch-cli.js +327 -0
- package/src/cli.jsx +166 -0
- package/src/closer-cli.jsx +1103 -0
- package/src/closer-cli.jsx.backup +948 -0
- package/src/commands/batch.js +62 -0
- package/src/commands/chat.js +10 -0
- package/src/commands/config.js +154 -0
- package/src/commands/help.js +76 -0
- package/src/commands/history.js +192 -0
- package/src/commands/setup.js +17 -0
- package/src/commands/upgrade.js +101 -0
- package/src/commands/workflow-tests.js +125 -0
- package/src/config.js +343 -0
- package/src/conversation.js +962 -0
- package/src/git-helper.js +349 -0
- package/src/index.js +88 -0
- package/src/logger.js +347 -0
- package/src/plan.js +193 -0
- package/src/planner.js +397 -0
- package/src/search.js +195 -0
- package/src/setup.js +147 -0
- package/src/shortcuts.js +269 -0
- package/src/snippets.js +430 -0
- package/src/test-modules.js +118 -0
- package/src/tools.js +398 -0
- package/src/utils/cli.js +124 -0
- package/src/utils/validator.js +184 -0
- package/src/utils/version.js +33 -0
- package/src/utils/workflow-test.js +271 -0
- package/src/utils/workflow.js +268 -0
- package/test/demo-file-naming.js +92 -0
- package/test/demo-thinking.js +124 -0
- package/test/final-verification-report.md +303 -0
- package/test/research-thinking.js +130 -0
- package/test/test-auto-mkdir.js +123 -0
- package/test/test-e2e-empty-dir.md +108 -0
- package/test/test-export-logic.js +119 -0
- package/test/test-global-cloco.js +126 -0
- package/test/test-history-isolation.js +291 -0
- package/test/test-improved-thinking.js +43 -0
- package/test/test-long-message.js +65 -0
- package/test/test-plan-functionality.js +95 -0
- package/test/test-real-scenario.js +216 -0
- package/test/test-thinking-display.js +65 -0
- package/test/ui-verification-test.js +203 -0
- package/test/verify-history-isolation.sh +71 -0
- package/test/verify-thinking.js +339 -0
- package/test/workflows/empty-dir-creation.md +51 -0
- package/test/workflows/inventor/ascii-teacup.js +199 -0
- package/test/workflows/inventor/ascii-teacup.mjs +199 -0
- package/test/workflows/inventor/ascii_apple.hs +84 -0
- package/test/workflows/inventor/ascii_apple.py +91 -0
- package/test/workflows/inventor/cloco.md +3 -0
- package/test/workflows/longtalk/cloco.md +19 -0
- package/test/workflows/longtalk/emoji_500.txt +63 -0
- package/test/workflows/longtalk/emoji_list.txt +20 -0
- package/test/workflows/programmer/adder.md +33 -0
- package/test/workflows/programmer/expect.md +2 -0
- package/test/workflows/programmer/prompt.md +3 -0
- package/test/workflows/test-empty-dir-creation.js +113 -0
- package/test-ctrl-c.jsx +126 -0
- package/test-manual-file-creation.js +151 -0
- package/winfix.md +3 -0
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
// OpenAI 客户端
|
|
2
|
+
export class OpenAIClient {
|
|
3
|
+
constructor(config) {
|
|
4
|
+
this.apiKey = config.apiKey;
|
|
5
|
+
this.baseURL = config.baseURL || 'https://api.openai.com/v1';
|
|
6
|
+
this.model = config.model || 'gpt-4o';
|
|
7
|
+
this.maxTokens = config.maxTokens || 4096;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
async chat(messages, options = {}) {
|
|
11
|
+
const system = options.system || 'You are a helpful AI programming assistant.';
|
|
12
|
+
const tools = options.tools || [];
|
|
13
|
+
const temperature = options.temperature ?? 0.7;
|
|
14
|
+
|
|
15
|
+
// 添加系统消息
|
|
16
|
+
const formattedMessages = [
|
|
17
|
+
{ role: 'system', content: system },
|
|
18
|
+
...messages.map(m => ({
|
|
19
|
+
role: m.role,
|
|
20
|
+
content: m.content
|
|
21
|
+
}))
|
|
22
|
+
];
|
|
23
|
+
|
|
24
|
+
const requestBody = {
|
|
25
|
+
model: this.model,
|
|
26
|
+
messages: formattedMessages,
|
|
27
|
+
temperature,
|
|
28
|
+
max_tokens: this.maxTokens
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
if (tools.length > 0) {
|
|
32
|
+
requestBody.tools = tools.map(tool => ({
|
|
33
|
+
type: 'function',
|
|
34
|
+
function: {
|
|
35
|
+
name: tool.name,
|
|
36
|
+
description: tool.description,
|
|
37
|
+
parameters: tool.inputSchema
|
|
38
|
+
}
|
|
39
|
+
}));
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
const response = await fetch(`${this.baseURL}/chat/completions`, {
|
|
43
|
+
method: 'POST',
|
|
44
|
+
headers: {
|
|
45
|
+
'Content-Type': 'application/json',
|
|
46
|
+
'Authorization': `Bearer ${this.apiKey}`
|
|
47
|
+
},
|
|
48
|
+
body: JSON.stringify(requestBody)
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
if (!response.ok) {
|
|
52
|
+
const error = await response.text();
|
|
53
|
+
throw new Error(`OpenAI API error: ${response.status} - ${error}`);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const data = await response.json();
|
|
57
|
+
return this.parseResponse(data);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
parseResponse(data) {
|
|
61
|
+
const choice = data.choices[0];
|
|
62
|
+
const message = {
|
|
63
|
+
role: 'assistant',
|
|
64
|
+
content: [],
|
|
65
|
+
model: data.model,
|
|
66
|
+
finishReason: choice.finish_reason
|
|
67
|
+
};
|
|
68
|
+
|
|
69
|
+
if (choice.message.content) {
|
|
70
|
+
message.content.push({
|
|
71
|
+
type: 'text',
|
|
72
|
+
text: choice.message.content
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
if (choice.message.tool_calls) {
|
|
77
|
+
for (const toolCall of choice.message.tool_calls) {
|
|
78
|
+
message.content.push({
|
|
79
|
+
type: 'tool_use',
|
|
80
|
+
id: toolCall.id,
|
|
81
|
+
name: toolCall.function.name,
|
|
82
|
+
input: JSON.parse(toolCall.function.arguments)
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
return message;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// Ollama 客户端(本地运行)
|
|
92
|
+
export class OllamaClient {
|
|
93
|
+
constructor(config) {
|
|
94
|
+
this.baseURL = config.baseURL || 'http://localhost:11434';
|
|
95
|
+
this.model = config.model || 'llama3.1';
|
|
96
|
+
this.maxTokens = config.maxTokens || 4096;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
async chat(messages, options = {}) {
|
|
100
|
+
const system = options.system || 'You are a helpful AI programming assistant.';
|
|
101
|
+
const temperature = options.temperature ?? 0.7;
|
|
102
|
+
|
|
103
|
+
const formattedMessages = [
|
|
104
|
+
{ role: 'system', content: system },
|
|
105
|
+
...messages.map(m => ({
|
|
106
|
+
role: m.role,
|
|
107
|
+
content: m.content
|
|
108
|
+
}))
|
|
109
|
+
];
|
|
110
|
+
|
|
111
|
+
const response = await fetch(`${this.baseURL}/api/chat`, {
|
|
112
|
+
method: 'POST',
|
|
113
|
+
headers: { 'Content-Type': 'application/json' },
|
|
114
|
+
body: JSON.stringify({
|
|
115
|
+
model: this.model,
|
|
116
|
+
messages: formattedMessages,
|
|
117
|
+
stream: false,
|
|
118
|
+
options: { temperature }
|
|
119
|
+
})
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
if (!response.ok) {
|
|
123
|
+
const error = await response.text();
|
|
124
|
+
throw new Error(`Ollama error: ${response.status} - ${error}`);
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
const data = await response.json();
|
|
128
|
+
return {
|
|
129
|
+
role: 'assistant',
|
|
130
|
+
content: [{ type: 'text', text: data.message.content }],
|
|
131
|
+
model: this.model
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// 客户端工厂
|
|
137
|
+
export function createAIClient(config) {
|
|
138
|
+
const { provider, anthropic, openai, ollama } = config.ai;
|
|
139
|
+
|
|
140
|
+
switch (provider) {
|
|
141
|
+
case 'anthropic':
|
|
142
|
+
return new AnthropicClient(anthropic);
|
|
143
|
+
case 'openai':
|
|
144
|
+
return new OpenAIClient(openai);
|
|
145
|
+
case 'ollama':
|
|
146
|
+
return new OllamaClient(ollama);
|
|
147
|
+
default:
|
|
148
|
+
throw new Error(`Unknown AI provider: ${provider}`);
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// 检查 API 配置
|
|
153
|
+
export function checkConfig(config) {
|
|
154
|
+
const { provider, anthropic, openai, ollama } = config.ai;
|
|
155
|
+
|
|
156
|
+
switch (provider) {
|
|
157
|
+
case 'anthropic':
|
|
158
|
+
if (!anthropic.apiKey) {
|
|
159
|
+
throw new Error('Anthropic API key not configured. Set CLOSER_ANTHROPIC_API_KEY environment variable.');
|
|
160
|
+
}
|
|
161
|
+
break;
|
|
162
|
+
case 'openai':
|
|
163
|
+
if (!openai.apiKey) {
|
|
164
|
+
throw new Error('OpenAI API key not configured. Set CLOSER_OPENAI_API_KEY environment variable.');
|
|
165
|
+
}
|
|
166
|
+
break;
|
|
167
|
+
case 'ollama':
|
|
168
|
+
// Ollama 不需要密钥,但需要本地运行
|
|
169
|
+
break;
|
|
170
|
+
}
|
|
171
|
+
}
|
package/src/ai-client.js
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI 客户端模块 - 使用 @anthropic-ai/sdk
|
|
3
|
+
*
|
|
4
|
+
* 相比之前实现的优势:
|
|
5
|
+
* - 代码量减少 70%
|
|
6
|
+
* - 自动类型检查
|
|
7
|
+
* - 内置错误处理和重试
|
|
8
|
+
* - 无需手工解析 SSE
|
|
9
|
+
* - 原生支持工具调用循环(toolRunner)
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Anthropic 客户端(使用 SDK)
|
|
16
|
+
*/
|
|
17
|
+
export class AnthropicClient {
|
|
18
|
+
constructor(config) {
|
|
19
|
+
this.client = new Anthropic({
|
|
20
|
+
apiKey: config.apiKey,
|
|
21
|
+
baseURL: config.baseURL || 'https://api.anthropic.com',
|
|
22
|
+
timeout: 60000, // 默认 10 分钟,这里设置为 60 秒
|
|
23
|
+
maxRetries: 2 // 默认重试 2 次
|
|
24
|
+
});
|
|
25
|
+
this.model = config.model || 'claude-sonnet-4-5-20250929';
|
|
26
|
+
this.maxTokens = config.maxTokens || 8192;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* 发送消息(非流式)
|
|
31
|
+
* @param {Array} messages - 消息数组(SDK 自动处理格式)
|
|
32
|
+
* @param {Object} options - 选项(system, tools, temperature)
|
|
33
|
+
* @returns {Promise} API 响应
|
|
34
|
+
*/
|
|
35
|
+
async chat(messages, options = {}) {
|
|
36
|
+
return await this.client.messages.create({
|
|
37
|
+
model: this.model,
|
|
38
|
+
max_tokens: this.maxTokens,
|
|
39
|
+
system: options.system,
|
|
40
|
+
messages: messages, // SDK 自动处理格式转换
|
|
41
|
+
tools: options.tools,
|
|
42
|
+
temperature: options.temperature,
|
|
43
|
+
thinking: options.thinking || { type: 'enabled', budget_tokens: 20000 }
|
|
44
|
+
});
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* 发送消息(流式)
|
|
49
|
+
* 使用 SDK 的事件监听器 API,符合 Extended Thinking 官方示例
|
|
50
|
+
*
|
|
51
|
+
* @param {Array} messages - 消息数组
|
|
52
|
+
* @param {Object} options - 选项
|
|
53
|
+
* @param {Function} onChunk - 流式回调函数
|
|
54
|
+
* @returns {Promise} 最终消息
|
|
55
|
+
*/
|
|
56
|
+
async chatStream(messages, options = {}, onChunk) {
|
|
57
|
+
const stream = this.client.messages.stream({
|
|
58
|
+
model: this.model,
|
|
59
|
+
max_tokens: this.maxTokens,
|
|
60
|
+
system: options.system,
|
|
61
|
+
messages: messages,
|
|
62
|
+
tools: options.tools,
|
|
63
|
+
temperature: options.temperature,
|
|
64
|
+
thinking: options.thinking || { type: 'enabled', budget_tokens: 20000 }
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
// 使用 SDK 的事件监听器 API(官方推荐方式)
|
|
68
|
+
// 参考: examples/thinking-stream.ts
|
|
69
|
+
stream.on('thinking', (thinkingDelta, thinkingSnapshot) => {
|
|
70
|
+
// thinkingDelta: 增量的 thinking 内容
|
|
71
|
+
// thinkingSnapshot: 完整的 thinking 内容快照
|
|
72
|
+
if (typeof onChunk === 'function') {
|
|
73
|
+
onChunk({
|
|
74
|
+
type: 'thinking',
|
|
75
|
+
delta: thinkingDelta,
|
|
76
|
+
snapshot: thinkingSnapshot
|
|
77
|
+
});
|
|
78
|
+
}
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
stream.on('text', (textDelta, textSnapshot) => {
|
|
82
|
+
// textDelta: 增量的文本内容
|
|
83
|
+
// textSnapshot: 完整的文本内容快照
|
|
84
|
+
if (typeof onChunk === 'function') {
|
|
85
|
+
onChunk({
|
|
86
|
+
type: 'text',
|
|
87
|
+
delta: textDelta,
|
|
88
|
+
snapshot: textSnapshot
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
stream.on('signature', (signature) => {
|
|
94
|
+
// signature: thinking 块的签名
|
|
95
|
+
if (typeof onChunk === 'function') {
|
|
96
|
+
onChunk({
|
|
97
|
+
type: 'signature',
|
|
98
|
+
signature: signature
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
// 监听所有原始事件
|
|
104
|
+
stream.on('content_block_start', (contentBlock) => {
|
|
105
|
+
if (typeof onChunk === 'function') {
|
|
106
|
+
onChunk({
|
|
107
|
+
type: 'content_block_start',
|
|
108
|
+
blockType: contentBlock.type,
|
|
109
|
+
block: contentBlock
|
|
110
|
+
});
|
|
111
|
+
}
|
|
112
|
+
});
|
|
113
|
+
|
|
114
|
+
stream.on('content_block_delta', (delta) => {
|
|
115
|
+
if (typeof onChunk === 'function') {
|
|
116
|
+
onChunk({
|
|
117
|
+
type: 'content_block_delta',
|
|
118
|
+
delta: delta.delta
|
|
119
|
+
});
|
|
120
|
+
}
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
// 获取最终消息
|
|
124
|
+
return await stream.finalMessage();
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* 使用 toolRunner 自动处理工具调用循环
|
|
129
|
+
* 这是 SDK 提供的高级功能,可以自动处理工具的调用和结果返回
|
|
130
|
+
*
|
|
131
|
+
* @param {Array} messages - 消息数组
|
|
132
|
+
* @param {Array} tools - 工具数组(使用 Zod 定义的 betaZodTool)
|
|
133
|
+
* @param {Object} options - 选项
|
|
134
|
+
* @returns {Promise} 最终消息(所有工具调用完成后)
|
|
135
|
+
*/
|
|
136
|
+
async chatWithTools(messages, tools, options = {}) {
|
|
137
|
+
return await this.client.beta.messages.toolRunner({
|
|
138
|
+
model: this.model,
|
|
139
|
+
max_tokens: this.maxTokens,
|
|
140
|
+
system: options.system,
|
|
141
|
+
messages: messages,
|
|
142
|
+
tools: tools,
|
|
143
|
+
temperature: options.temperature
|
|
144
|
+
});
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
/**
|
|
148
|
+
* 获取消息的 token 计数(用于预估成本)
|
|
149
|
+
* @param {Array} messages - 消息数组
|
|
150
|
+
* @returns {Promise} Token 计数
|
|
151
|
+
*/
|
|
152
|
+
async countTokens(messages) {
|
|
153
|
+
return await this.client.messages.countTokens({
|
|
154
|
+
model: this.model,
|
|
155
|
+
messages: messages
|
|
156
|
+
});
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
/**
|
|
161
|
+
* 创建 AI 客户端的工厂函数
|
|
162
|
+
*
|
|
163
|
+
* 注意:OpenAI 和 Ollama 仍然使用原有的实现
|
|
164
|
+
*/
|
|
165
|
+
export function createAIClient(config) {
|
|
166
|
+
const { provider, anthropic, openai, ollama } = config.ai;
|
|
167
|
+
|
|
168
|
+
switch (provider) {
|
|
169
|
+
case 'anthropic':
|
|
170
|
+
return new AnthropicClient(anthropic);
|
|
171
|
+
case 'openai':
|
|
172
|
+
// 导入原有的 OpenAI 客户端
|
|
173
|
+
return createOpenAIClient(openai);
|
|
174
|
+
case 'ollama':
|
|
175
|
+
// 导入原有的 Ollama 客户端
|
|
176
|
+
return createOllamaClient(ollama);
|
|
177
|
+
default:
|
|
178
|
+
throw new Error(`Unknown AI provider: ${provider}`);
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
/**
|
|
183
|
+
* OpenAI 客户端(保留原有实现)
|
|
184
|
+
*/
|
|
185
|
+
async function createOpenAIClient(config) {
|
|
186
|
+
// 动态导入以避免循环依赖
|
|
187
|
+
const { OpenAIClient } = await import('./ai-client-legacy.js');
|
|
188
|
+
return new OpenAIClient(config);
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
/**
|
|
192
|
+
* Ollama 客户端(保留原有实现)
|
|
193
|
+
*/
|
|
194
|
+
async function createOllamaClient(config) {
|
|
195
|
+
// 动态导入以避免循环依赖
|
|
196
|
+
const { OllamaClient } = await import('./ai-client-legacy.js');
|
|
197
|
+
return new OllamaClient(config);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* 检查 API 配置
|
|
202
|
+
*/
|
|
203
|
+
export function checkConfig(config) {
|
|
204
|
+
const { provider, anthropic, openai, ollama } = config.ai;
|
|
205
|
+
|
|
206
|
+
switch (provider) {
|
|
207
|
+
case 'anthropic':
|
|
208
|
+
if (!anthropic.apiKey) {
|
|
209
|
+
throw new Error('Anthropic API key not configured. Set CLOSER_ANTHROPIC_API_KEY environment variable.');
|
|
210
|
+
}
|
|
211
|
+
break;
|
|
212
|
+
case 'openai':
|
|
213
|
+
if (!openai.apiKey) {
|
|
214
|
+
throw new Error('OpenAI API key not configured. Set CLOSER_OPENAI_API_KEY environment variable.');
|
|
215
|
+
}
|
|
216
|
+
break;
|
|
217
|
+
case 'ollama':
|
|
218
|
+
// Ollama 不需要密钥,但需要本地运行
|
|
219
|
+
break;
|
|
220
|
+
}
|
|
221
|
+
}
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
import { spawn } from 'child_process';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* 在 bash 进程中执行命令并返回结果
|
|
5
|
+
* @param {string} command - 要执行的 shell 命令
|
|
6
|
+
* @param {Object} options - 配置选项
|
|
7
|
+
* @param {string} options.shell - shell 路径,默认 'bash'
|
|
8
|
+
* @param {string} options.cwd - 工作目录
|
|
9
|
+
* @param {Object} options.env - 环境变量
|
|
10
|
+
* @param {number} options.timeout - 超时时间(毫秒)
|
|
11
|
+
* @returns {Promise<BashResult>} 执行结果
|
|
12
|
+
*/
|
|
13
|
+
export function executeBashCommand(command, options = {}) {
|
|
14
|
+
return new Promise((resolve, reject) => {
|
|
15
|
+
const {
|
|
16
|
+
shell = 'bash',
|
|
17
|
+
cwd = process.cwd(),
|
|
18
|
+
env = process.env,
|
|
19
|
+
timeout = 30000
|
|
20
|
+
} = options;
|
|
21
|
+
|
|
22
|
+
// 启动 bash 进程,使用 -c 参数直接执行命令
|
|
23
|
+
// 不使用 -i (interactive) 避免在没有 TTY 时产生作业控制错误
|
|
24
|
+
const bashProcess = spawn(shell, ['-c', command], {
|
|
25
|
+
cwd,
|
|
26
|
+
env,
|
|
27
|
+
stdio: ['pipe', 'pipe', 'pipe']
|
|
28
|
+
});
|
|
29
|
+
|
|
30
|
+
let stdout = '';
|
|
31
|
+
let stderr = '';
|
|
32
|
+
let timer = null;
|
|
33
|
+
|
|
34
|
+
// 设置超时
|
|
35
|
+
if (timeout > 0) {
|
|
36
|
+
timer = setTimeout(() => {
|
|
37
|
+
bashProcess.kill('SIGTERM');
|
|
38
|
+
reject(new BashResult({
|
|
39
|
+
command,
|
|
40
|
+
stdout,
|
|
41
|
+
stderr,
|
|
42
|
+
exitCode: null,
|
|
43
|
+
signal: 'SIGTERM',
|
|
44
|
+
timedOut: true,
|
|
45
|
+
error: `Command timed out after ${timeout}ms`
|
|
46
|
+
}));
|
|
47
|
+
}, timeout);
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// 收集 stdout
|
|
51
|
+
bashProcess.stdout.on('data', (data) => {
|
|
52
|
+
stdout += data.toString();
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
// 收集 stderr
|
|
56
|
+
bashProcess.stderr.on('data', (data) => {
|
|
57
|
+
stderr += data.toString();
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
// 进程退出时的处理
|
|
61
|
+
bashProcess.on('close', (code, signal) => {
|
|
62
|
+
if (timer) clearTimeout(timer);
|
|
63
|
+
|
|
64
|
+
const result = new BashResult({
|
|
65
|
+
command,
|
|
66
|
+
stdout,
|
|
67
|
+
stderr,
|
|
68
|
+
exitCode: code,
|
|
69
|
+
signal,
|
|
70
|
+
timedOut: false
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
resolve(result);
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
// 错误处理
|
|
77
|
+
bashProcess.on('error', (error) => {
|
|
78
|
+
if (timer) clearTimeout(timer);
|
|
79
|
+
reject(new BashResult({
|
|
80
|
+
command,
|
|
81
|
+
stdout,
|
|
82
|
+
stderr,
|
|
83
|
+
exitCode: null,
|
|
84
|
+
error: error.message
|
|
85
|
+
}));
|
|
86
|
+
});
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Bash 执行结果类
|
|
92
|
+
*/
|
|
93
|
+
export class BashResult {
|
|
94
|
+
constructor({
|
|
95
|
+
command,
|
|
96
|
+
stdout = '',
|
|
97
|
+
stderr = '',
|
|
98
|
+
exitCode = null,
|
|
99
|
+
signal = null,
|
|
100
|
+
timedOut = false,
|
|
101
|
+
error = null
|
|
102
|
+
}) {
|
|
103
|
+
this.command = command;
|
|
104
|
+
this.stdout = stdout;
|
|
105
|
+
this.stderr = stderr;
|
|
106
|
+
this.exitCode = exitCode;
|
|
107
|
+
this.signal = signal;
|
|
108
|
+
this.timedOut = timedOut;
|
|
109
|
+
this.error = error;
|
|
110
|
+
this.timestamp = new Date().toISOString();
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
/** 是否成功执行(退出码为0) */
|
|
114
|
+
get success() {
|
|
115
|
+
return this.exitCode === 0 && !this.error && !this.timedOut;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/** 获取完整的输出(stdout + stderr) */
|
|
119
|
+
get output() {
|
|
120
|
+
return this.stdout + this.stderr;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/** 转换为 JSON 格式 */
|
|
124
|
+
toJSON() {
|
|
125
|
+
return {
|
|
126
|
+
command: this.command,
|
|
127
|
+
stdout: this.stdout,
|
|
128
|
+
stderr: this.stderr,
|
|
129
|
+
exitCode: this.exitCode,
|
|
130
|
+
signal: this.signal,
|
|
131
|
+
success: this.success,
|
|
132
|
+
timedOut: this.timedOut,
|
|
133
|
+
error: this.error,
|
|
134
|
+
timestamp: this.timestamp
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/** 格式化输出 */
|
|
139
|
+
toString() {
|
|
140
|
+
const parts = [];
|
|
141
|
+
parts.push(`Command: ${this.command}`);
|
|
142
|
+
parts.push(`Exit Code: ${this.exitCode}`);
|
|
143
|
+
if (this.stdout) parts.push(`\nStdout:\n${this.stdout}`);
|
|
144
|
+
if (this.stderr) parts.push(`\nStderr:\n${this.stderr}`);
|
|
145
|
+
if (this.error) parts.push(`\nError: ${this.error}`);
|
|
146
|
+
return parts.join('\n');
|
|
147
|
+
}
|
|
148
|
+
}
|