tabby-ai-assistant 1.0.5 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/components/chat/ai-sidebar.component.d.ts +147 -0
- package/dist/components/chat/chat-interface.component.d.ts +38 -6
- package/dist/components/settings/general-settings.component.d.ts +6 -3
- package/dist/components/settings/provider-config.component.d.ts +25 -12
- package/dist/components/terminal/command-preview.component.d.ts +38 -0
- package/dist/index-full.d.ts +8 -0
- package/dist/index-minimal.d.ts +3 -0
- package/dist/index.d.ts +7 -3
- package/dist/index.js +1 -2
- package/dist/providers/tabby/ai-config.provider.d.ts +57 -5
- package/dist/providers/tabby/ai-hotkey.provider.d.ts +8 -14
- package/dist/providers/tabby/ai-toolbar-button.provider.d.ts +8 -9
- package/dist/services/chat/ai-sidebar.service.d.ts +89 -0
- package/dist/services/chat/chat-history.service.d.ts +78 -0
- package/dist/services/chat/chat-session.service.d.ts +57 -2
- package/dist/services/context/compaction.d.ts +90 -0
- package/dist/services/context/manager.d.ts +69 -0
- package/dist/services/context/memory.d.ts +116 -0
- package/dist/services/context/token-budget.d.ts +105 -0
- package/dist/services/core/ai-assistant.service.d.ts +40 -1
- package/dist/services/core/checkpoint.service.d.ts +130 -0
- package/dist/services/platform/escape-sequence.service.d.ts +132 -0
- package/dist/services/platform/platform-detection.service.d.ts +146 -0
- package/dist/services/providers/anthropic-provider.service.d.ts +5 -0
- package/dist/services/providers/base-provider.service.d.ts +6 -1
- package/dist/services/providers/glm-provider.service.d.ts +5 -0
- package/dist/services/providers/minimax-provider.service.d.ts +10 -1
- package/dist/services/providers/ollama-provider.service.d.ts +76 -0
- package/dist/services/providers/openai-compatible.service.d.ts +5 -0
- package/dist/services/providers/openai-provider.service.d.ts +5 -0
- package/dist/services/providers/vllm-provider.service.d.ts +82 -0
- package/dist/services/terminal/buffer-analyzer.service.d.ts +128 -0
- package/dist/services/terminal/terminal-manager.service.d.ts +185 -0
- package/dist/services/terminal/terminal-tools.service.d.ts +79 -0
- package/dist/types/ai.types.d.ts +92 -0
- package/dist/types/provider.types.d.ts +1 -1
- package/package.json +7 -10
- package/src/components/chat/ai-sidebar.component.ts +945 -0
- package/src/components/chat/chat-input.component.html +9 -24
- package/src/components/chat/chat-input.component.scss +3 -2
- package/src/components/chat/chat-interface.component.html +77 -69
- package/src/components/chat/chat-interface.component.scss +54 -4
- package/src/components/chat/chat-interface.component.ts +250 -34
- package/src/components/chat/chat-settings.component.scss +4 -4
- package/src/components/chat/chat-settings.component.ts +22 -11
- package/src/components/common/error-message.component.html +15 -0
- package/src/components/common/error-message.component.scss +77 -0
- package/src/components/common/error-message.component.ts +2 -96
- package/src/components/common/loading-spinner.component.html +4 -0
- package/src/components/common/loading-spinner.component.scss +57 -0
- package/src/components/common/loading-spinner.component.ts +2 -63
- package/src/components/security/consent-dialog.component.html +22 -0
- package/src/components/security/consent-dialog.component.scss +34 -0
- package/src/components/security/consent-dialog.component.ts +2 -55
- package/src/components/security/password-prompt.component.html +19 -0
- package/src/components/security/password-prompt.component.scss +30 -0
- package/src/components/security/password-prompt.component.ts +2 -54
- package/src/components/security/risk-confirm-dialog.component.html +8 -12
- package/src/components/security/risk-confirm-dialog.component.scss +8 -5
- package/src/components/security/risk-confirm-dialog.component.ts +6 -6
- package/src/components/settings/ai-settings-tab.component.html +16 -20
- package/src/components/settings/ai-settings-tab.component.scss +8 -5
- package/src/components/settings/ai-settings-tab.component.ts +12 -12
- package/src/components/settings/general-settings.component.html +8 -17
- package/src/components/settings/general-settings.component.scss +6 -3
- package/src/components/settings/general-settings.component.ts +62 -22
- package/src/components/settings/provider-config.component.html +19 -39
- package/src/components/settings/provider-config.component.scss +182 -39
- package/src/components/settings/provider-config.component.ts +119 -7
- package/src/components/settings/security-settings.component.scss +1 -1
- package/src/components/terminal/ai-toolbar-button.component.html +8 -0
- package/src/components/terminal/ai-toolbar-button.component.scss +20 -0
- package/src/components/terminal/ai-toolbar-button.component.ts +2 -30
- package/src/components/terminal/command-preview.component.html +61 -0
- package/src/components/terminal/command-preview.component.scss +72 -0
- package/src/components/terminal/command-preview.component.ts +127 -140
- package/src/components/terminal/command-suggestion.component.html +23 -0
- package/src/components/terminal/command-suggestion.component.scss +55 -0
- package/src/components/terminal/command-suggestion.component.ts +2 -77
- package/src/index-minimal.ts +32 -0
- package/src/index.ts +94 -11
- package/src/index.ts.backup +165 -0
- package/src/providers/tabby/ai-config.provider.ts +60 -51
- package/src/providers/tabby/ai-hotkey.provider.ts +23 -39
- package/src/providers/tabby/ai-settings-tab.provider.ts +2 -2
- package/src/providers/tabby/ai-toolbar-button.provider.ts +29 -24
- package/src/services/chat/ai-sidebar.service.ts +258 -0
- package/src/services/chat/chat-history.service.ts +308 -0
- package/src/services/chat/chat-history.service.ts.backup +239 -0
- package/src/services/chat/chat-session.service.ts +276 -3
- package/src/services/context/compaction.ts +483 -0
- package/src/services/context/manager.ts +442 -0
- package/src/services/context/memory.ts +519 -0
- package/src/services/context/token-budget.ts +422 -0
- package/src/services/core/ai-assistant.service.ts +280 -5
- package/src/services/core/ai-provider-manager.service.ts +2 -2
- package/src/services/core/checkpoint.service.ts +619 -0
- package/src/services/platform/escape-sequence.service.ts +499 -0
- package/src/services/platform/platform-detection.service.ts +494 -0
- package/src/services/providers/anthropic-provider.service.ts +28 -1
- package/src/services/providers/base-provider.service.ts +7 -1
- package/src/services/providers/glm-provider.service.ts +28 -1
- package/src/services/providers/minimax-provider.service.ts +209 -11
- package/src/services/providers/ollama-provider.service.ts +445 -0
- package/src/services/providers/openai-compatible.service.ts +9 -0
- package/src/services/providers/openai-provider.service.ts +9 -0
- package/src/services/providers/vllm-provider.service.ts +463 -0
- package/src/services/security/risk-assessment.service.ts +6 -2
- package/src/services/terminal/buffer-analyzer.service.ts +594 -0
- package/src/services/terminal/terminal-manager.service.ts +748 -0
- package/src/services/terminal/terminal-tools.service.ts +441 -0
- package/src/styles/ai-assistant.scss +78 -6
- package/src/types/ai.types.ts +144 -0
- package/src/types/provider.types.ts +1 -1
- package/tsconfig.json +9 -9
- package/webpack.config.js +28 -6
|
@@ -0,0 +1,445 @@
|
|
|
1
|
+
import { Injectable } from '@angular/core';
|
|
2
|
+
import { Observable, Observer, from } from 'rxjs';
|
|
3
|
+
import { BaseAiProvider } from './base-provider.service';
|
|
4
|
+
import { ProviderCapability, HealthStatus, ValidationResult } from '../../types/provider.types';
|
|
5
|
+
import { ChatRequest, ChatResponse, StreamEvent, MessageRole, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse } from '../../types/ai.types';
|
|
6
|
+
import { LoggerService } from '../core/logger.service';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Ollama 本地 AI 提供商
|
|
10
|
+
* 兼容 OpenAI API 格式,默认端口 11434
|
|
11
|
+
*/
|
|
12
|
+
@Injectable()
|
|
13
|
+
export class OllamaProviderService extends BaseAiProvider {
|
|
14
|
+
readonly name = 'ollama';
|
|
15
|
+
readonly displayName = 'Ollama (本地)';
|
|
16
|
+
readonly capabilities = [
|
|
17
|
+
ProviderCapability.CHAT,
|
|
18
|
+
ProviderCapability.STREAMING,
|
|
19
|
+
ProviderCapability.COMMAND_GENERATION,
|
|
20
|
+
ProviderCapability.COMMAND_EXPLANATION
|
|
21
|
+
];
|
|
22
|
+
readonly authConfig = {
|
|
23
|
+
type: 'none' as const,
|
|
24
|
+
credentials: {}
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
constructor(logger: LoggerService) {
|
|
28
|
+
super(logger);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
protected getDefaultBaseURL(): string {
|
|
32
|
+
return 'http://localhost:11434/v1';
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* 非流式聊天
|
|
37
|
+
*/
|
|
38
|
+
async chat(request: ChatRequest): Promise<ChatResponse> {
|
|
39
|
+
this.logRequest(request);
|
|
40
|
+
|
|
41
|
+
try {
|
|
42
|
+
const response = await fetch(`${this.getBaseURL()}/chat/completions`, {
|
|
43
|
+
method: 'POST',
|
|
44
|
+
headers: { 'Content-Type': 'application/json' },
|
|
45
|
+
body: JSON.stringify({
|
|
46
|
+
model: this.config?.model || 'llama3.1',
|
|
47
|
+
messages: this.transformMessages(request.messages),
|
|
48
|
+
max_tokens: request.maxTokens || 1000,
|
|
49
|
+
temperature: request.temperature || 0.7,
|
|
50
|
+
stream: false
|
|
51
|
+
})
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
if (!response.ok) {
|
|
55
|
+
throw new Error(`Ollama API error: ${response.status}`);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const data = await response.json();
|
|
59
|
+
this.logResponse(data);
|
|
60
|
+
|
|
61
|
+
return {
|
|
62
|
+
message: {
|
|
63
|
+
id: this.generateId(),
|
|
64
|
+
role: MessageRole.ASSISTANT,
|
|
65
|
+
content: data.choices[0]?.message?.content || '',
|
|
66
|
+
timestamp: new Date()
|
|
67
|
+
},
|
|
68
|
+
usage: data.usage ? {
|
|
69
|
+
promptTokens: data.usage.prompt_tokens,
|
|
70
|
+
completionTokens: data.usage.completion_tokens,
|
|
71
|
+
totalTokens: data.usage.total_tokens
|
|
72
|
+
} : undefined
|
|
73
|
+
};
|
|
74
|
+
} catch (error) {
|
|
75
|
+
this.logError(error, { request });
|
|
76
|
+
throw new Error(`Ollama chat failed: ${error instanceof Error ? error.message : String(error)}`);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* 流式聊天
|
|
82
|
+
*/
|
|
83
|
+
chatStream(request: ChatRequest): Observable<StreamEvent> {
|
|
84
|
+
return new Observable<StreamEvent>((subscriber: Observer<StreamEvent>) => {
|
|
85
|
+
const abortController = new AbortController();
|
|
86
|
+
|
|
87
|
+
this.logRequest(request);
|
|
88
|
+
|
|
89
|
+
const runStream = async () => {
|
|
90
|
+
try {
|
|
91
|
+
const response = await fetch(`${this.getBaseURL()}/chat/completions`, {
|
|
92
|
+
method: 'POST',
|
|
93
|
+
headers: { 'Content-Type': 'application/json' },
|
|
94
|
+
body: JSON.stringify({
|
|
95
|
+
model: this.config?.model || 'llama3.1',
|
|
96
|
+
messages: this.transformMessages(request.messages),
|
|
97
|
+
max_tokens: request.maxTokens || 1000,
|
|
98
|
+
temperature: request.temperature || 0.7,
|
|
99
|
+
stream: true
|
|
100
|
+
}),
|
|
101
|
+
signal: abortController.signal
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
if (!response.ok) {
|
|
105
|
+
throw new Error(`Ollama API error: ${response.status}`);
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
const reader = response.body?.getReader();
|
|
109
|
+
const decoder = new TextDecoder();
|
|
110
|
+
|
|
111
|
+
if (!reader) {
|
|
112
|
+
throw new Error('No response body');
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
let fullContent = '';
|
|
116
|
+
|
|
117
|
+
while (true) {
|
|
118
|
+
const { done, value } = await reader.read();
|
|
119
|
+
if (done) break;
|
|
120
|
+
|
|
121
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
122
|
+
const lines = chunk.split('\n').filter(line => line.startsWith('data: '));
|
|
123
|
+
|
|
124
|
+
for (const line of lines) {
|
|
125
|
+
const data = line.slice(6);
|
|
126
|
+
if (data === '[DONE]') continue;
|
|
127
|
+
|
|
128
|
+
try {
|
|
129
|
+
const parsed = JSON.parse(data);
|
|
130
|
+
const delta = parsed.choices[0]?.delta?.content;
|
|
131
|
+
if (delta) {
|
|
132
|
+
fullContent += delta;
|
|
133
|
+
subscriber.next({
|
|
134
|
+
type: 'text_delta',
|
|
135
|
+
textDelta: delta
|
|
136
|
+
});
|
|
137
|
+
}
|
|
138
|
+
} catch (e) {
|
|
139
|
+
// 忽略解析错误
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
subscriber.next({
|
|
145
|
+
type: 'message_end',
|
|
146
|
+
message: {
|
|
147
|
+
id: this.generateId(),
|
|
148
|
+
role: MessageRole.ASSISTANT,
|
|
149
|
+
content: fullContent,
|
|
150
|
+
timestamp: new Date()
|
|
151
|
+
}
|
|
152
|
+
});
|
|
153
|
+
subscriber.complete();
|
|
154
|
+
} catch (error) {
|
|
155
|
+
if ((error as any).name !== 'AbortError') {
|
|
156
|
+
this.logError(error, { request });
|
|
157
|
+
subscriber.error(new Error(`Ollama stream failed: ${error instanceof Error ? error.message : String(error)}`));
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
};
|
|
161
|
+
|
|
162
|
+
runStream();
|
|
163
|
+
|
|
164
|
+
// 返回取消函数
|
|
165
|
+
return () => abortController.abort();
|
|
166
|
+
});
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
/**
|
|
170
|
+
* 健康检查 - 检测 Ollama 服务是否运行
|
|
171
|
+
*/
|
|
172
|
+
async healthCheck(): Promise<HealthStatus> {
|
|
173
|
+
try {
|
|
174
|
+
const controller = new AbortController();
|
|
175
|
+
const timeoutId = setTimeout(() => controller.abort(), 5000);
|
|
176
|
+
|
|
177
|
+
const response = await fetch(`${this.getBaseURL()}/models`, {
|
|
178
|
+
method: 'GET',
|
|
179
|
+
signal: controller.signal
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
clearTimeout(timeoutId);
|
|
183
|
+
|
|
184
|
+
if (response.ok) {
|
|
185
|
+
this.lastHealthCheck = { status: HealthStatus.HEALTHY, timestamp: new Date() };
|
|
186
|
+
return HealthStatus.HEALTHY;
|
|
187
|
+
}
|
|
188
|
+
return HealthStatus.UNHEALTHY;
|
|
189
|
+
} catch (error) {
|
|
190
|
+
this.logger.warn('Ollama health check failed', error);
|
|
191
|
+
return HealthStatus.UNHEALTHY;
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* 验证配置 - 本地服务无需 API Key
|
|
197
|
+
*/
|
|
198
|
+
validateConfig(): ValidationResult {
|
|
199
|
+
const warnings: string[] = [];
|
|
200
|
+
|
|
201
|
+
if (!this.config?.model) {
|
|
202
|
+
warnings.push('未指定模型,将使用默认模型 llama3.1');
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
return {
|
|
206
|
+
valid: true,
|
|
207
|
+
warnings: warnings.length > 0 ? warnings : undefined
|
|
208
|
+
};
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
/**
|
|
212
|
+
* 生成命令
|
|
213
|
+
*/
|
|
214
|
+
async generateCommand(request: CommandRequest): Promise<CommandResponse> {
|
|
215
|
+
const prompt = this.buildCommandPrompt(request);
|
|
216
|
+
|
|
217
|
+
const chatRequest: ChatRequest = {
|
|
218
|
+
messages: [
|
|
219
|
+
{
|
|
220
|
+
id: this.generateId(),
|
|
221
|
+
role: MessageRole.USER,
|
|
222
|
+
content: prompt,
|
|
223
|
+
timestamp: new Date()
|
|
224
|
+
}
|
|
225
|
+
],
|
|
226
|
+
maxTokens: 500,
|
|
227
|
+
temperature: 0.3
|
|
228
|
+
};
|
|
229
|
+
|
|
230
|
+
const response = await this.chat(chatRequest);
|
|
231
|
+
return this.parseCommandResponse(response.message.content);
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
/**
|
|
235
|
+
* 解释命令
|
|
236
|
+
*/
|
|
237
|
+
async explainCommand(request: ExplainRequest): Promise<ExplainResponse> {
|
|
238
|
+
const prompt = this.buildExplainPrompt(request);
|
|
239
|
+
|
|
240
|
+
const chatRequest: ChatRequest = {
|
|
241
|
+
messages: [
|
|
242
|
+
{
|
|
243
|
+
id: this.generateId(),
|
|
244
|
+
role: MessageRole.USER,
|
|
245
|
+
content: prompt,
|
|
246
|
+
timestamp: new Date()
|
|
247
|
+
}
|
|
248
|
+
],
|
|
249
|
+
maxTokens: 1000,
|
|
250
|
+
temperature: 0.5
|
|
251
|
+
};
|
|
252
|
+
|
|
253
|
+
const response = await this.chat(chatRequest);
|
|
254
|
+
return this.parseExplainResponse(response.message.content);
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
/**
|
|
258
|
+
* 分析结果
|
|
259
|
+
*/
|
|
260
|
+
async analyzeResult(request: AnalysisRequest): Promise<AnalysisResponse> {
|
|
261
|
+
const prompt = this.buildAnalysisPrompt(request);
|
|
262
|
+
|
|
263
|
+
const chatRequest: ChatRequest = {
|
|
264
|
+
messages: [
|
|
265
|
+
{
|
|
266
|
+
id: this.generateId(),
|
|
267
|
+
role: MessageRole.USER,
|
|
268
|
+
content: prompt,
|
|
269
|
+
timestamp: new Date()
|
|
270
|
+
}
|
|
271
|
+
],
|
|
272
|
+
maxTokens: 1000,
|
|
273
|
+
temperature: 0.7
|
|
274
|
+
};
|
|
275
|
+
|
|
276
|
+
const response = await this.chat(chatRequest);
|
|
277
|
+
return this.parseAnalysisResponse(response.message.content);
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
/**
|
|
281
|
+
* 转换消息格式
|
|
282
|
+
*/
|
|
283
|
+
protected transformMessages(messages: any[]): any[] {
|
|
284
|
+
return messages.map(msg => ({
|
|
285
|
+
role: msg.role === 'user' ? 'user' : 'assistant',
|
|
286
|
+
content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
|
|
287
|
+
}));
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
/**
|
|
291
|
+
* 构建命令生成提示
|
|
292
|
+
*/
|
|
293
|
+
private buildCommandPrompt(request: CommandRequest): string {
|
|
294
|
+
let prompt = `请将以下自然语言描述转换为准确的终端命令:\n\n"${request.naturalLanguage}"\n\n`;
|
|
295
|
+
|
|
296
|
+
if (request.context) {
|
|
297
|
+
prompt += `当前环境:\n`;
|
|
298
|
+
if (request.context.currentDirectory) {
|
|
299
|
+
prompt += `- 当前目录:${request.context.currentDirectory}\n`;
|
|
300
|
+
}
|
|
301
|
+
if (request.context.operatingSystem) {
|
|
302
|
+
prompt += `- 操作系统:${request.context.operatingSystem}\n`;
|
|
303
|
+
}
|
|
304
|
+
if (request.context.shell) {
|
|
305
|
+
prompt += `- Shell:${request.context.shell}\n`;
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
prompt += `\n请直接返回JSON格式:\n`;
|
|
310
|
+
prompt += `{\n`;
|
|
311
|
+
prompt += ` "command": "具体命令",\n`;
|
|
312
|
+
prompt += ` "explanation": "命令解释",\n`;
|
|
313
|
+
prompt += ` "confidence": 0.95\n`;
|
|
314
|
+
prompt += `}\n`;
|
|
315
|
+
|
|
316
|
+
return prompt;
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
/**
|
|
320
|
+
* 构建命令解释提示
|
|
321
|
+
*/
|
|
322
|
+
private buildExplainPrompt(request: ExplainRequest): string {
|
|
323
|
+
let prompt = `请详细解释以下终端命令:\n\n\`${request.command}\`\n\n`;
|
|
324
|
+
|
|
325
|
+
if (request.context?.currentDirectory) {
|
|
326
|
+
prompt += `当前目录:${request.context.currentDirectory}\n`;
|
|
327
|
+
}
|
|
328
|
+
if (request.context?.operatingSystem) {
|
|
329
|
+
prompt += `操作系统:${request.context.operatingSystem}\n`;
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
prompt += `\n请按以下JSON格式返回:\n`;
|
|
333
|
+
prompt += `{\n`;
|
|
334
|
+
prompt += ` "explanation": "整体解释",\n`;
|
|
335
|
+
prompt += ` "breakdown": [\n`;
|
|
336
|
+
prompt += ` {"part": "命令部分", "description": "说明"}\n`;
|
|
337
|
+
prompt += ` ],\n`;
|
|
338
|
+
prompt += ` "examples": ["使用示例"]\n`;
|
|
339
|
+
prompt += `}\n`;
|
|
340
|
+
|
|
341
|
+
return prompt;
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
/**
|
|
345
|
+
* 构建结果分析提示
|
|
346
|
+
*/
|
|
347
|
+
private buildAnalysisPrompt(request: AnalysisRequest): string {
|
|
348
|
+
let prompt = `请分析以下命令执行结果:\n\n`;
|
|
349
|
+
prompt += `命令:${request.command}\n`;
|
|
350
|
+
prompt += `退出码:${request.exitCode}\n`;
|
|
351
|
+
prompt += `输出:\n${request.output}\n\n`;
|
|
352
|
+
|
|
353
|
+
if (request.context?.workingDirectory) {
|
|
354
|
+
prompt += `工作目录:${request.context.workingDirectory}\n`;
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
prompt += `\n请按以下JSON格式返回:\n`;
|
|
358
|
+
prompt += `{\n`;
|
|
359
|
+
prompt += ` "summary": "结果总结",\n`;
|
|
360
|
+
prompt += ` "insights": ["洞察1", "洞察2"],\n`;
|
|
361
|
+
prompt += ` "success": true/false,\n`;
|
|
362
|
+
prompt += ` "issues": [\n`;
|
|
363
|
+
prompt += ` {"severity": "warning|error|info", "message": "问题描述", "suggestion": "建议"}\n`;
|
|
364
|
+
prompt += ` ]\n`;
|
|
365
|
+
prompt += `}\n`;
|
|
366
|
+
|
|
367
|
+
return prompt;
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
/**
|
|
371
|
+
* 解析命令响应
|
|
372
|
+
*/
|
|
373
|
+
private parseCommandResponse(content: string): CommandResponse {
|
|
374
|
+
try {
|
|
375
|
+
const match = content.match(/\{[\s\S]*\}/);
|
|
376
|
+
if (match) {
|
|
377
|
+
const parsed = JSON.parse(match[0]);
|
|
378
|
+
return {
|
|
379
|
+
command: parsed.command || '',
|
|
380
|
+
explanation: parsed.explanation || '',
|
|
381
|
+
confidence: parsed.confidence || 0.5
|
|
382
|
+
};
|
|
383
|
+
}
|
|
384
|
+
} catch (error) {
|
|
385
|
+
this.logger.warn('Failed to parse Ollama command response as JSON', error);
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
const lines = content.split('\n').map(l => l.trim()).filter(l => l);
|
|
389
|
+
return {
|
|
390
|
+
command: lines[0] || '',
|
|
391
|
+
explanation: lines.slice(1).join(' ') || 'AI生成的命令',
|
|
392
|
+
confidence: 0.5
|
|
393
|
+
};
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
/**
|
|
397
|
+
* 解析解释响应
|
|
398
|
+
*/
|
|
399
|
+
private parseExplainResponse(content: string): ExplainResponse {
|
|
400
|
+
try {
|
|
401
|
+
const match = content.match(/\{[\s\S]*\}/);
|
|
402
|
+
if (match) {
|
|
403
|
+
const parsed = JSON.parse(match[0]);
|
|
404
|
+
return {
|
|
405
|
+
explanation: parsed.explanation || '',
|
|
406
|
+
breakdown: parsed.breakdown || [],
|
|
407
|
+
examples: parsed.examples || []
|
|
408
|
+
};
|
|
409
|
+
}
|
|
410
|
+
} catch (error) {
|
|
411
|
+
this.logger.warn('Failed to parse Ollama explain response as JSON', error);
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
return {
|
|
415
|
+
explanation: content,
|
|
416
|
+
breakdown: []
|
|
417
|
+
};
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
/**
|
|
421
|
+
* 解析分析响应
|
|
422
|
+
*/
|
|
423
|
+
private parseAnalysisResponse(content: string): AnalysisResponse {
|
|
424
|
+
try {
|
|
425
|
+
const match = content.match(/\{[\s\S]*\}/);
|
|
426
|
+
if (match) {
|
|
427
|
+
const parsed = JSON.parse(match[0]);
|
|
428
|
+
return {
|
|
429
|
+
summary: parsed.summary || '',
|
|
430
|
+
insights: parsed.insights || [],
|
|
431
|
+
success: parsed.success !== false,
|
|
432
|
+
issues: parsed.issues || []
|
|
433
|
+
};
|
|
434
|
+
}
|
|
435
|
+
} catch (error) {
|
|
436
|
+
this.logger.warn('Failed to parse Ollama analysis response as JSON', error);
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
return {
|
|
440
|
+
summary: content,
|
|
441
|
+
insights: [],
|
|
442
|
+
success: true
|
|
443
|
+
};
|
|
444
|
+
}
|
|
445
|
+
}
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { Injectable } from '@angular/core';
|
|
2
|
+
import { Observable, from } from 'rxjs';
|
|
2
3
|
import axios, { AxiosInstance } from 'axios';
|
|
3
4
|
import { BaseAiProvider } from './base-provider.service';
|
|
4
5
|
import { ProviderCapability, HealthStatus, ValidationResult } from '../../types/provider.types';
|
|
@@ -106,6 +107,14 @@ export class OpenAiCompatibleProviderService extends BaseAiProvider {
|
|
|
106
107
|
}
|
|
107
108
|
}
|
|
108
109
|
|
|
110
|
+
/**
|
|
111
|
+
* 流式聊天功能 - 暂未实现,回退到非流式
|
|
112
|
+
*/
|
|
113
|
+
chatStream(request: ChatRequest): Observable<any> {
|
|
114
|
+
// 回退到非流式
|
|
115
|
+
return from(this.chat(request));
|
|
116
|
+
}
|
|
117
|
+
|
|
109
118
|
async generateCommand(request: CommandRequest): Promise<CommandResponse> {
|
|
110
119
|
const prompt = this.buildCommandPrompt(request);
|
|
111
120
|
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { Injectable } from '@angular/core';
|
|
2
|
+
import { Observable, from } from 'rxjs';
|
|
2
3
|
import axios, { AxiosInstance } from 'axios';
|
|
3
4
|
import { BaseAiProvider } from './base-provider.service';
|
|
4
5
|
import { ProviderCapability, HealthStatus, ValidationResult } from '../../types/provider.types';
|
|
@@ -94,6 +95,14 @@ export class OpenAiProviderService extends BaseAiProvider {
|
|
|
94
95
|
}
|
|
95
96
|
}
|
|
96
97
|
|
|
98
|
+
/**
|
|
99
|
+
* 流式聊天功能 - 暂未实现,回退到非流式
|
|
100
|
+
*/
|
|
101
|
+
chatStream(request: ChatRequest): Observable<any> {
|
|
102
|
+
// 回退到非流式
|
|
103
|
+
return from(this.chat(request));
|
|
104
|
+
}
|
|
105
|
+
|
|
97
106
|
async generateCommand(request: CommandRequest): Promise<CommandResponse> {
|
|
98
107
|
const prompt = this.buildCommandPrompt(request);
|
|
99
108
|
|