tabby-ai-assistant 1.0.5 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/components/chat/ai-sidebar.component.d.ts +147 -0
- package/dist/components/chat/chat-interface.component.d.ts +38 -6
- package/dist/components/settings/general-settings.component.d.ts +6 -3
- package/dist/components/settings/provider-config.component.d.ts +25 -12
- package/dist/components/terminal/command-preview.component.d.ts +38 -0
- package/dist/index-full.d.ts +8 -0
- package/dist/index-minimal.d.ts +3 -0
- package/dist/index.d.ts +7 -3
- package/dist/index.js +1 -2
- package/dist/providers/tabby/ai-config.provider.d.ts +57 -5
- package/dist/providers/tabby/ai-hotkey.provider.d.ts +8 -14
- package/dist/providers/tabby/ai-toolbar-button.provider.d.ts +8 -9
- package/dist/services/chat/ai-sidebar.service.d.ts +89 -0
- package/dist/services/chat/chat-history.service.d.ts +78 -0
- package/dist/services/chat/chat-session.service.d.ts +57 -2
- package/dist/services/context/compaction.d.ts +90 -0
- package/dist/services/context/manager.d.ts +69 -0
- package/dist/services/context/memory.d.ts +116 -0
- package/dist/services/context/token-budget.d.ts +105 -0
- package/dist/services/core/ai-assistant.service.d.ts +40 -1
- package/dist/services/core/checkpoint.service.d.ts +130 -0
- package/dist/services/platform/escape-sequence.service.d.ts +132 -0
- package/dist/services/platform/platform-detection.service.d.ts +146 -0
- package/dist/services/providers/anthropic-provider.service.d.ts +5 -0
- package/dist/services/providers/base-provider.service.d.ts +6 -1
- package/dist/services/providers/glm-provider.service.d.ts +5 -0
- package/dist/services/providers/minimax-provider.service.d.ts +10 -1
- package/dist/services/providers/ollama-provider.service.d.ts +76 -0
- package/dist/services/providers/openai-compatible.service.d.ts +5 -0
- package/dist/services/providers/openai-provider.service.d.ts +5 -0
- package/dist/services/providers/vllm-provider.service.d.ts +82 -0
- package/dist/services/terminal/buffer-analyzer.service.d.ts +128 -0
- package/dist/services/terminal/terminal-manager.service.d.ts +185 -0
- package/dist/services/terminal/terminal-tools.service.d.ts +79 -0
- package/dist/types/ai.types.d.ts +92 -0
- package/dist/types/provider.types.d.ts +1 -1
- package/package.json +7 -10
- package/src/components/chat/ai-sidebar.component.ts +945 -0
- package/src/components/chat/chat-input.component.html +9 -24
- package/src/components/chat/chat-input.component.scss +3 -2
- package/src/components/chat/chat-interface.component.html +77 -69
- package/src/components/chat/chat-interface.component.scss +54 -4
- package/src/components/chat/chat-interface.component.ts +250 -34
- package/src/components/chat/chat-settings.component.scss +4 -4
- package/src/components/chat/chat-settings.component.ts +22 -11
- package/src/components/common/error-message.component.html +15 -0
- package/src/components/common/error-message.component.scss +77 -0
- package/src/components/common/error-message.component.ts +2 -96
- package/src/components/common/loading-spinner.component.html +4 -0
- package/src/components/common/loading-spinner.component.scss +57 -0
- package/src/components/common/loading-spinner.component.ts +2 -63
- package/src/components/security/consent-dialog.component.html +22 -0
- package/src/components/security/consent-dialog.component.scss +34 -0
- package/src/components/security/consent-dialog.component.ts +2 -55
- package/src/components/security/password-prompt.component.html +19 -0
- package/src/components/security/password-prompt.component.scss +30 -0
- package/src/components/security/password-prompt.component.ts +2 -54
- package/src/components/security/risk-confirm-dialog.component.html +8 -12
- package/src/components/security/risk-confirm-dialog.component.scss +8 -5
- package/src/components/security/risk-confirm-dialog.component.ts +6 -6
- package/src/components/settings/ai-settings-tab.component.html +16 -20
- package/src/components/settings/ai-settings-tab.component.scss +8 -5
- package/src/components/settings/ai-settings-tab.component.ts +12 -12
- package/src/components/settings/general-settings.component.html +8 -17
- package/src/components/settings/general-settings.component.scss +6 -3
- package/src/components/settings/general-settings.component.ts +62 -22
- package/src/components/settings/provider-config.component.html +19 -39
- package/src/components/settings/provider-config.component.scss +182 -39
- package/src/components/settings/provider-config.component.ts +119 -7
- package/src/components/settings/security-settings.component.scss +1 -1
- package/src/components/terminal/ai-toolbar-button.component.html +8 -0
- package/src/components/terminal/ai-toolbar-button.component.scss +20 -0
- package/src/components/terminal/ai-toolbar-button.component.ts +2 -30
- package/src/components/terminal/command-preview.component.html +61 -0
- package/src/components/terminal/command-preview.component.scss +72 -0
- package/src/components/terminal/command-preview.component.ts +127 -140
- package/src/components/terminal/command-suggestion.component.html +23 -0
- package/src/components/terminal/command-suggestion.component.scss +55 -0
- package/src/components/terminal/command-suggestion.component.ts +2 -77
- package/src/index-minimal.ts +32 -0
- package/src/index.ts +94 -11
- package/src/index.ts.backup +165 -0
- package/src/providers/tabby/ai-config.provider.ts +60 -51
- package/src/providers/tabby/ai-hotkey.provider.ts +23 -39
- package/src/providers/tabby/ai-settings-tab.provider.ts +2 -2
- package/src/providers/tabby/ai-toolbar-button.provider.ts +29 -24
- package/src/services/chat/ai-sidebar.service.ts +258 -0
- package/src/services/chat/chat-history.service.ts +308 -0
- package/src/services/chat/chat-history.service.ts.backup +239 -0
- package/src/services/chat/chat-session.service.ts +276 -3
- package/src/services/context/compaction.ts +483 -0
- package/src/services/context/manager.ts +442 -0
- package/src/services/context/memory.ts +519 -0
- package/src/services/context/token-budget.ts +422 -0
- package/src/services/core/ai-assistant.service.ts +280 -5
- package/src/services/core/ai-provider-manager.service.ts +2 -2
- package/src/services/core/checkpoint.service.ts +619 -0
- package/src/services/platform/escape-sequence.service.ts +499 -0
- package/src/services/platform/platform-detection.service.ts +494 -0
- package/src/services/providers/anthropic-provider.service.ts +28 -1
- package/src/services/providers/base-provider.service.ts +7 -1
- package/src/services/providers/glm-provider.service.ts +28 -1
- package/src/services/providers/minimax-provider.service.ts +209 -11
- package/src/services/providers/ollama-provider.service.ts +445 -0
- package/src/services/providers/openai-compatible.service.ts +9 -0
- package/src/services/providers/openai-provider.service.ts +9 -0
- package/src/services/providers/vllm-provider.service.ts +463 -0
- package/src/services/security/risk-assessment.service.ts +6 -2
- package/src/services/terminal/buffer-analyzer.service.ts +594 -0
- package/src/services/terminal/terminal-manager.service.ts +748 -0
- package/src/services/terminal/terminal-tools.service.ts +441 -0
- package/src/styles/ai-assistant.scss +78 -6
- package/src/types/ai.types.ts +144 -0
- package/src/types/provider.types.ts +1 -1
- package/tsconfig.json +9 -9
- package/webpack.config.js +28 -6
|
@@ -0,0 +1,463 @@
|
|
|
1
|
+
import { Injectable } from '@angular/core';
|
|
2
|
+
import { Observable, Observer } from 'rxjs';
|
|
3
|
+
import { BaseAiProvider } from './base-provider.service';
|
|
4
|
+
import { ProviderCapability, HealthStatus, ValidationResult } from '../../types/provider.types';
|
|
5
|
+
import { ChatRequest, ChatResponse, StreamEvent, MessageRole, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse } from '../../types/ai.types';
|
|
6
|
+
import { LoggerService } from '../core/logger.service';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* vLLM 本地 AI 提供商
|
|
10
|
+
* 兼容 OpenAI API 格式,默认端口 8000
|
|
11
|
+
*/
|
|
12
|
+
@Injectable()
|
|
13
|
+
export class VllmProviderService extends BaseAiProvider {
|
|
14
|
+
readonly name = 'vllm';
|
|
15
|
+
readonly displayName = 'vLLM (本地)';
|
|
16
|
+
readonly capabilities = [
|
|
17
|
+
ProviderCapability.CHAT,
|
|
18
|
+
ProviderCapability.STREAMING,
|
|
19
|
+
ProviderCapability.COMMAND_GENERATION,
|
|
20
|
+
ProviderCapability.COMMAND_EXPLANATION
|
|
21
|
+
];
|
|
22
|
+
readonly authConfig = {
|
|
23
|
+
type: 'bearer' as const,
|
|
24
|
+
credentials: { apiKey: '' }
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
constructor(logger: LoggerService) {
|
|
28
|
+
super(logger);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
protected getDefaultBaseURL(): string {
|
|
32
|
+
return 'http://localhost:8000/v1';
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* 获取认证头
|
|
37
|
+
*/
|
|
38
|
+
protected getAuthHeaders(): Record<string, string> {
|
|
39
|
+
const headers: Record<string, string> = {
|
|
40
|
+
'Content-Type': 'application/json'
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
if (this.config?.apiKey) {
|
|
44
|
+
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
return headers;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* 非流式聊天
|
|
52
|
+
*/
|
|
53
|
+
async chat(request: ChatRequest): Promise<ChatResponse> {
|
|
54
|
+
this.logRequest(request);
|
|
55
|
+
|
|
56
|
+
try {
|
|
57
|
+
const response = await fetch(`${this.getBaseURL()}/chat/completions`, {
|
|
58
|
+
method: 'POST',
|
|
59
|
+
headers: this.getAuthHeaders(),
|
|
60
|
+
body: JSON.stringify({
|
|
61
|
+
model: this.config?.model || 'meta-llama/Llama-3.1-8B',
|
|
62
|
+
messages: this.transformMessages(request.messages),
|
|
63
|
+
max_tokens: request.maxTokens || 1000,
|
|
64
|
+
temperature: request.temperature || 0.7,
|
|
65
|
+
stream: false
|
|
66
|
+
})
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
if (!response.ok) {
|
|
70
|
+
throw new Error(`vLLM API error: ${response.status}`);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const data = await response.json();
|
|
74
|
+
this.logResponse(data);
|
|
75
|
+
|
|
76
|
+
return {
|
|
77
|
+
message: {
|
|
78
|
+
id: this.generateId(),
|
|
79
|
+
role: MessageRole.ASSISTANT,
|
|
80
|
+
content: data.choices[0]?.message?.content || '',
|
|
81
|
+
timestamp: new Date()
|
|
82
|
+
},
|
|
83
|
+
usage: data.usage ? {
|
|
84
|
+
promptTokens: data.usage.prompt_tokens,
|
|
85
|
+
completionTokens: data.usage.completion_tokens,
|
|
86
|
+
totalTokens: data.usage.total_tokens
|
|
87
|
+
} : undefined
|
|
88
|
+
};
|
|
89
|
+
} catch (error) {
|
|
90
|
+
this.logError(error, { request });
|
|
91
|
+
throw new Error(`vLLM chat failed: ${error instanceof Error ? error.message : String(error)}`);
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* 流式聊天
|
|
97
|
+
*/
|
|
98
|
+
chatStream(request: ChatRequest): Observable<StreamEvent> {
|
|
99
|
+
return new Observable<StreamEvent>((subscriber: Observer<StreamEvent>) => {
|
|
100
|
+
const abortController = new AbortController();
|
|
101
|
+
|
|
102
|
+
this.logRequest(request);
|
|
103
|
+
|
|
104
|
+
const runStream = async () => {
|
|
105
|
+
try {
|
|
106
|
+
const response = await fetch(`${this.getBaseURL()}/chat/completions`, {
|
|
107
|
+
method: 'POST',
|
|
108
|
+
headers: this.getAuthHeaders(),
|
|
109
|
+
body: JSON.stringify({
|
|
110
|
+
model: this.config?.model || 'meta-llama/Llama-3.1-8B',
|
|
111
|
+
messages: this.transformMessages(request.messages),
|
|
112
|
+
max_tokens: request.maxTokens || 1000,
|
|
113
|
+
temperature: request.temperature || 0.7,
|
|
114
|
+
stream: true
|
|
115
|
+
}),
|
|
116
|
+
signal: abortController.signal
|
|
117
|
+
});
|
|
118
|
+
|
|
119
|
+
if (!response.ok) {
|
|
120
|
+
throw new Error(`vLLM API error: ${response.status}`);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
const reader = response.body?.getReader();
|
|
124
|
+
const decoder = new TextDecoder();
|
|
125
|
+
|
|
126
|
+
if (!reader) {
|
|
127
|
+
throw new Error('No response body');
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
let fullContent = '';
|
|
131
|
+
|
|
132
|
+
while (true) {
|
|
133
|
+
const { done, value } = await reader.read();
|
|
134
|
+
if (done) break;
|
|
135
|
+
|
|
136
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
137
|
+
const lines = chunk.split('\n').filter(line => line.startsWith('data: '));
|
|
138
|
+
|
|
139
|
+
for (const line of lines) {
|
|
140
|
+
const data = line.slice(6);
|
|
141
|
+
if (data === '[DONE]') continue;
|
|
142
|
+
|
|
143
|
+
try {
|
|
144
|
+
const parsed = JSON.parse(data);
|
|
145
|
+
const delta = parsed.choices[0]?.delta?.content;
|
|
146
|
+
if (delta) {
|
|
147
|
+
fullContent += delta;
|
|
148
|
+
subscriber.next({
|
|
149
|
+
type: 'text_delta',
|
|
150
|
+
textDelta: delta
|
|
151
|
+
});
|
|
152
|
+
}
|
|
153
|
+
} catch (e) {
|
|
154
|
+
// 忽略解析错误
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
subscriber.next({
|
|
160
|
+
type: 'message_end',
|
|
161
|
+
message: {
|
|
162
|
+
id: this.generateId(),
|
|
163
|
+
role: MessageRole.ASSISTANT,
|
|
164
|
+
content: fullContent,
|
|
165
|
+
timestamp: new Date()
|
|
166
|
+
}
|
|
167
|
+
});
|
|
168
|
+
subscriber.complete();
|
|
169
|
+
} catch (error) {
|
|
170
|
+
if ((error as any).name !== 'AbortError') {
|
|
171
|
+
this.logError(error, { request });
|
|
172
|
+
subscriber.error(new Error(`vLLM stream failed: ${error instanceof Error ? error.message : String(error)}`));
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
};
|
|
176
|
+
|
|
177
|
+
runStream();
|
|
178
|
+
|
|
179
|
+
// 返回取消函数
|
|
180
|
+
return () => abortController.abort();
|
|
181
|
+
});
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
/**
|
|
185
|
+
* 健康检查 - 检测 vLLM 服务是否运行
|
|
186
|
+
*/
|
|
187
|
+
async healthCheck(): Promise<HealthStatus> {
|
|
188
|
+
try {
|
|
189
|
+
const controller = new AbortController();
|
|
190
|
+
const timeoutId = setTimeout(() => controller.abort(), 5000);
|
|
191
|
+
|
|
192
|
+
const response = await fetch(`${this.getBaseURL()}/models`, {
|
|
193
|
+
method: 'GET',
|
|
194
|
+
headers: this.getAuthHeaders(),
|
|
195
|
+
signal: controller.signal
|
|
196
|
+
});
|
|
197
|
+
|
|
198
|
+
clearTimeout(timeoutId);
|
|
199
|
+
|
|
200
|
+
if (response.ok) {
|
|
201
|
+
this.lastHealthCheck = { status: HealthStatus.HEALTHY, timestamp: new Date() };
|
|
202
|
+
return HealthStatus.HEALTHY;
|
|
203
|
+
}
|
|
204
|
+
return HealthStatus.DEGRADED;
|
|
205
|
+
} catch (error) {
|
|
206
|
+
this.logger.warn('vLLM health check failed', error);
|
|
207
|
+
return HealthStatus.UNHEALTHY;
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
/**
|
|
212
|
+
* 验证配置
|
|
213
|
+
*/
|
|
214
|
+
validateConfig(): ValidationResult {
|
|
215
|
+
const errors: string[] = [];
|
|
216
|
+
const warnings: string[] = [];
|
|
217
|
+
|
|
218
|
+
if (!this.config?.model) {
|
|
219
|
+
warnings.push('未指定模型,将使用默认模型 meta-llama/Llama-3.1-8B');
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
return {
|
|
223
|
+
valid: errors.length === 0,
|
|
224
|
+
errors: errors.length > 0 ? errors : undefined,
|
|
225
|
+
warnings: warnings.length > 0 ? warnings : undefined
|
|
226
|
+
};
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* 生成命令
|
|
231
|
+
*/
|
|
232
|
+
async generateCommand(request: CommandRequest): Promise<CommandResponse> {
|
|
233
|
+
const prompt = this.buildCommandPrompt(request);
|
|
234
|
+
|
|
235
|
+
const chatRequest: ChatRequest = {
|
|
236
|
+
messages: [
|
|
237
|
+
{
|
|
238
|
+
id: this.generateId(),
|
|
239
|
+
role: MessageRole.USER,
|
|
240
|
+
content: prompt,
|
|
241
|
+
timestamp: new Date()
|
|
242
|
+
}
|
|
243
|
+
],
|
|
244
|
+
maxTokens: 500,
|
|
245
|
+
temperature: 0.3
|
|
246
|
+
};
|
|
247
|
+
|
|
248
|
+
const response = await this.chat(chatRequest);
|
|
249
|
+
return this.parseCommandResponse(response.message.content);
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* 解释命令
|
|
254
|
+
*/
|
|
255
|
+
async explainCommand(request: ExplainRequest): Promise<ExplainResponse> {
|
|
256
|
+
const prompt = this.buildExplainPrompt(request);
|
|
257
|
+
|
|
258
|
+
const chatRequest: ChatRequest = {
|
|
259
|
+
messages: [
|
|
260
|
+
{
|
|
261
|
+
id: this.generateId(),
|
|
262
|
+
role: MessageRole.USER,
|
|
263
|
+
content: prompt,
|
|
264
|
+
timestamp: new Date()
|
|
265
|
+
}
|
|
266
|
+
],
|
|
267
|
+
maxTokens: 1000,
|
|
268
|
+
temperature: 0.5
|
|
269
|
+
};
|
|
270
|
+
|
|
271
|
+
const response = await this.chat(chatRequest);
|
|
272
|
+
return this.parseExplainResponse(response.message.content);
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
/**
|
|
276
|
+
* 分析结果
|
|
277
|
+
*/
|
|
278
|
+
async analyzeResult(request: AnalysisRequest): Promise<AnalysisResponse> {
|
|
279
|
+
const prompt = this.buildAnalysisPrompt(request);
|
|
280
|
+
|
|
281
|
+
const chatRequest: ChatRequest = {
|
|
282
|
+
messages: [
|
|
283
|
+
{
|
|
284
|
+
id: this.generateId(),
|
|
285
|
+
role: MessageRole.USER,
|
|
286
|
+
content: prompt,
|
|
287
|
+
timestamp: new Date()
|
|
288
|
+
}
|
|
289
|
+
],
|
|
290
|
+
maxTokens: 1000,
|
|
291
|
+
temperature: 0.7
|
|
292
|
+
};
|
|
293
|
+
|
|
294
|
+
const response = await this.chat(chatRequest);
|
|
295
|
+
return this.parseAnalysisResponse(response.message.content);
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
/**
|
|
299
|
+
* 转换消息格式
|
|
300
|
+
*/
|
|
301
|
+
protected transformMessages(messages: any[]): any[] {
|
|
302
|
+
return messages.map(msg => ({
|
|
303
|
+
role: msg.role === 'user' ? 'user' : 'assistant',
|
|
304
|
+
content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
|
|
305
|
+
}));
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
/**
|
|
309
|
+
* 构建命令生成提示
|
|
310
|
+
*/
|
|
311
|
+
private buildCommandPrompt(request: CommandRequest): string {
|
|
312
|
+
let prompt = `请将以下自然语言描述转换为准确的终端命令:\n\n"${request.naturalLanguage}"\n\n`;
|
|
313
|
+
|
|
314
|
+
if (request.context) {
|
|
315
|
+
prompt += `当前环境:\n`;
|
|
316
|
+
if (request.context.currentDirectory) {
|
|
317
|
+
prompt += `- 当前目录:${request.context.currentDirectory}\n`;
|
|
318
|
+
}
|
|
319
|
+
if (request.context.operatingSystem) {
|
|
320
|
+
prompt += `- 操作系统:${request.context.operatingSystem}\n`;
|
|
321
|
+
}
|
|
322
|
+
if (request.context.shell) {
|
|
323
|
+
prompt += `- Shell:${request.context.shell}\n`;
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
prompt += `\n请直接返回JSON格式:\n`;
|
|
328
|
+
prompt += `{\n`;
|
|
329
|
+
prompt += ` "command": "具体命令",\n`;
|
|
330
|
+
prompt += ` "explanation": "命令解释",\n`;
|
|
331
|
+
prompt += ` "confidence": 0.95\n`;
|
|
332
|
+
prompt += `}\n`;
|
|
333
|
+
|
|
334
|
+
return prompt;
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
/**
|
|
338
|
+
* 构建命令解释提示
|
|
339
|
+
*/
|
|
340
|
+
private buildExplainPrompt(request: ExplainRequest): string {
|
|
341
|
+
let prompt = `请详细解释以下终端命令:\n\n\`${request.command}\`\n\n`;
|
|
342
|
+
|
|
343
|
+
if (request.context?.currentDirectory) {
|
|
344
|
+
prompt += `当前目录:${request.context.currentDirectory}\n`;
|
|
345
|
+
}
|
|
346
|
+
if (request.context?.operatingSystem) {
|
|
347
|
+
prompt += `操作系统:${request.context.operatingSystem}\n`;
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
prompt += `\n请按以下JSON格式返回:\n`;
|
|
351
|
+
prompt += `{\n`;
|
|
352
|
+
prompt += ` "explanation": "整体解释",\n`;
|
|
353
|
+
prompt += ` "breakdown": [\n`;
|
|
354
|
+
prompt += ` {"part": "命令部分", "description": "说明"}\n`;
|
|
355
|
+
prompt += ` ],\n`;
|
|
356
|
+
prompt += ` "examples": ["使用示例"]\n`;
|
|
357
|
+
prompt += `}\n`;
|
|
358
|
+
|
|
359
|
+
return prompt;
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
/**
|
|
363
|
+
* 构建结果分析提示
|
|
364
|
+
*/
|
|
365
|
+
private buildAnalysisPrompt(request: AnalysisRequest): string {
|
|
366
|
+
let prompt = `请分析以下命令执行结果:\n\n`;
|
|
367
|
+
prompt += `命令:${request.command}\n`;
|
|
368
|
+
prompt += `退出码:${request.exitCode}\n`;
|
|
369
|
+
prompt += `输出:\n${request.output}\n\n`;
|
|
370
|
+
|
|
371
|
+
if (request.context?.workingDirectory) {
|
|
372
|
+
prompt += `工作目录:${request.context.workingDirectory}\n`;
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
prompt += `\n请按以下JSON格式返回:\n`;
|
|
376
|
+
prompt += `{\n`;
|
|
377
|
+
prompt += ` "summary": "结果总结",\n`;
|
|
378
|
+
prompt += ` "insights": ["洞察1", "洞察2"],\n`;
|
|
379
|
+
prompt += ` "success": true/false,\n`;
|
|
380
|
+
prompt += ` "issues": [\n`;
|
|
381
|
+
prompt += ` {"severity": "warning|error|info", "message": "问题描述", "suggestion": "建议"}\n`;
|
|
382
|
+
prompt += ` ]\n`;
|
|
383
|
+
prompt += `}\n`;
|
|
384
|
+
|
|
385
|
+
return prompt;
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
/**
|
|
389
|
+
* 解析命令响应
|
|
390
|
+
*/
|
|
391
|
+
private parseCommandResponse(content: string): CommandResponse {
|
|
392
|
+
try {
|
|
393
|
+
const match = content.match(/\{[\s\S]*\}/);
|
|
394
|
+
if (match) {
|
|
395
|
+
const parsed = JSON.parse(match[0]);
|
|
396
|
+
return {
|
|
397
|
+
command: parsed.command || '',
|
|
398
|
+
explanation: parsed.explanation || '',
|
|
399
|
+
confidence: parsed.confidence || 0.5
|
|
400
|
+
};
|
|
401
|
+
}
|
|
402
|
+
} catch (error) {
|
|
403
|
+
this.logger.warn('Failed to parse vLLM command response as JSON', error);
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
const lines = content.split('\n').map(l => l.trim()).filter(l => l);
|
|
407
|
+
return {
|
|
408
|
+
command: lines[0] || '',
|
|
409
|
+
explanation: lines.slice(1).join(' ') || 'AI生成的命令',
|
|
410
|
+
confidence: 0.5
|
|
411
|
+
};
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
/**
|
|
415
|
+
* 解析解释响应
|
|
416
|
+
*/
|
|
417
|
+
private parseExplainResponse(content: string): ExplainResponse {
|
|
418
|
+
try {
|
|
419
|
+
const match = content.match(/\{[\s\S]*\}/);
|
|
420
|
+
if (match) {
|
|
421
|
+
const parsed = JSON.parse(match[0]);
|
|
422
|
+
return {
|
|
423
|
+
explanation: parsed.explanation || '',
|
|
424
|
+
breakdown: parsed.breakdown || [],
|
|
425
|
+
examples: parsed.examples || []
|
|
426
|
+
};
|
|
427
|
+
}
|
|
428
|
+
} catch (error) {
|
|
429
|
+
this.logger.warn('Failed to parse vLLM explain response as JSON', error);
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
return {
|
|
433
|
+
explanation: content,
|
|
434
|
+
breakdown: []
|
|
435
|
+
};
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
/**
|
|
439
|
+
* 解析分析响应
|
|
440
|
+
*/
|
|
441
|
+
private parseAnalysisResponse(content: string): AnalysisResponse {
|
|
442
|
+
try {
|
|
443
|
+
const match = content.match(/\{[\s\S]*\}/);
|
|
444
|
+
if (match) {
|
|
445
|
+
const parsed = JSON.parse(match[0]);
|
|
446
|
+
return {
|
|
447
|
+
summary: parsed.summary || '',
|
|
448
|
+
insights: parsed.insights || [],
|
|
449
|
+
success: parsed.success !== false,
|
|
450
|
+
issues: parsed.issues || []
|
|
451
|
+
};
|
|
452
|
+
}
|
|
453
|
+
} catch (error) {
|
|
454
|
+
this.logger.warn('Failed to parse vLLM analysis response as JSON', error);
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
return {
|
|
458
|
+
summary: content,
|
|
459
|
+
insights: [],
|
|
460
|
+
success: true
|
|
461
|
+
};
|
|
462
|
+
}
|
|
463
|
+
}
|
|
@@ -96,7 +96,11 @@ export class RiskAssessmentService {
|
|
|
96
96
|
async performAssessment(command: string): Promise<RiskAssessment> {
|
|
97
97
|
this.logger.debug('Assessing risk for command', { command });
|
|
98
98
|
|
|
99
|
-
const matchedPatterns
|
|
99
|
+
const matchedPatterns: {
|
|
100
|
+
pattern: string;
|
|
101
|
+
match: string;
|
|
102
|
+
severity: RiskLevel;
|
|
103
|
+
}[] = [];
|
|
100
104
|
let maxSeverity = RiskLevel.LOW;
|
|
101
105
|
const reasons: string[] = [];
|
|
102
106
|
|
|
@@ -330,7 +334,7 @@ export class RiskAssessmentService {
|
|
|
330
334
|
* 批量评估多个命令
|
|
331
335
|
*/
|
|
332
336
|
async assessMultiple(commands: string[]): Promise<{ command: string; level: RiskLevel }[]> {
|
|
333
|
-
const results = [];
|
|
337
|
+
const results: { command: string; level: RiskLevel }[] = [];
|
|
334
338
|
for (const command of commands) {
|
|
335
339
|
const level = await this.assessRisk(command);
|
|
336
340
|
results.push({ command, level });
|