tabby-ai-assistant 1.0.12 → 1.0.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.editorconfig +18 -0
- package/README.md +113 -55
- package/dist/index.js +1 -1
- package/package.json +6 -4
- package/src/components/chat/ai-sidebar.component.scss +220 -9
- package/src/components/chat/ai-sidebar.component.ts +364 -29
- package/src/components/chat/chat-input.component.ts +36 -4
- package/src/components/chat/chat-interface.component.ts +225 -5
- package/src/components/chat/chat-message.component.ts +6 -1
- package/src/components/settings/context-settings.component.ts +91 -91
- package/src/components/terminal/ai-toolbar-button.component.ts +4 -2
- package/src/components/terminal/command-suggestion.component.ts +148 -6
- package/src/index.ts +0 -6
- package/src/providers/tabby/ai-toolbar-button.provider.ts +7 -3
- package/src/services/chat/ai-sidebar.service.ts +414 -410
- package/src/services/chat/chat-session.service.ts +36 -12
- package/src/services/context/compaction.ts +110 -134
- package/src/services/context/manager.ts +27 -7
- package/src/services/context/memory.ts +17 -33
- package/src/services/context/summary.service.ts +136 -0
- package/src/services/core/ai-assistant.service.ts +1060 -37
- package/src/services/core/ai-provider-manager.service.ts +154 -25
- package/src/services/core/checkpoint.service.ts +218 -18
- package/src/services/core/config-provider.service.ts +4 -12
- package/src/services/core/toast.service.ts +106 -106
- package/src/services/providers/anthropic-provider.service.ts +126 -202
- package/src/services/providers/base-provider.service.ts +315 -21
- package/src/services/providers/glm-provider.service.ts +151 -233
- package/src/services/providers/minimax-provider.service.ts +55 -238
- package/src/services/providers/ollama-provider.service.ts +117 -188
- package/src/services/providers/openai-compatible.service.ts +165 -177
- package/src/services/providers/openai-provider.service.ts +170 -177
- package/src/services/providers/vllm-provider.service.ts +116 -188
- package/src/services/terminal/terminal-context.service.ts +265 -5
- package/src/services/terminal/terminal-manager.service.ts +748 -748
- package/src/services/terminal/terminal-tools.service.ts +612 -441
- package/src/types/ai.types.ts +156 -3
- package/src/types/provider.types.ts +206 -75
- package/src/utils/cost.utils.ts +249 -0
- package/src/utils/validation.utils.ts +306 -2
- package/dist/index.js.LICENSE.txt +0 -18
- package/src/index.ts.backup +0 -165
- package/src/services/chat/chat-history.service.ts.backup +0 -239
- package/src/services/terminal/command-analyzer.service.ts +0 -43
- package/src/services/terminal/context-menu.service.ts +0 -45
- package/src/services/terminal/hotkey.service.ts +0 -53
- package/webpack.config.js.backup +0 -57
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import { Injectable } from '@angular/core';
|
|
2
|
-
import { Observable,
|
|
2
|
+
import { Observable, Observer } from 'rxjs';
|
|
3
3
|
import axios, { AxiosInstance } from 'axios';
|
|
4
4
|
import { BaseAiProvider } from './base-provider.service';
|
|
5
|
-
import { ProviderCapability,
|
|
6
|
-
import { ChatRequest, ChatResponse, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse, MessageRole } from '../../types/ai.types';
|
|
5
|
+
import { ProviderCapability, ValidationResult } from '../../types/provider.types';
|
|
6
|
+
import { ChatRequest, ChatResponse, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse, MessageRole, StreamEvent } from '../../types/ai.types';
|
|
7
7
|
import { LoggerService } from '../core/logger.service';
|
|
8
8
|
|
|
9
9
|
/**
|
|
@@ -96,11 +96,162 @@ export class OpenAiProviderService extends BaseAiProvider {
|
|
|
96
96
|
}
|
|
97
97
|
|
|
98
98
|
/**
|
|
99
|
-
* 流式聊天功能 -
|
|
99
|
+
* 流式聊天功能 - 支持工具调用事件
|
|
100
100
|
*/
|
|
101
|
-
chatStream(request: ChatRequest): Observable<
|
|
102
|
-
|
|
103
|
-
|
|
101
|
+
chatStream(request: ChatRequest): Observable<StreamEvent> {
|
|
102
|
+
return new Observable<StreamEvent>((subscriber) => {
|
|
103
|
+
if (!this.client) {
|
|
104
|
+
const error = new Error('OpenAI client not initialized');
|
|
105
|
+
subscriber.next({ type: 'error', error: error.message });
|
|
106
|
+
subscriber.error(error);
|
|
107
|
+
return;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
const abortController = new AbortController();
|
|
111
|
+
|
|
112
|
+
const runStream = async () => {
|
|
113
|
+
try {
|
|
114
|
+
const response = await this.client!.post('/chat/completions', {
|
|
115
|
+
model: this.config?.model || 'gpt-4',
|
|
116
|
+
messages: this.transformMessages(request.messages),
|
|
117
|
+
max_tokens: request.maxTokens || 1000,
|
|
118
|
+
temperature: request.temperature || 0.7,
|
|
119
|
+
stream: true
|
|
120
|
+
}, {
|
|
121
|
+
responseType: 'stream'
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
const stream = response.data;
|
|
125
|
+
let currentToolCallId = '';
|
|
126
|
+
let currentToolCallName = '';
|
|
127
|
+
let currentToolInput = '';
|
|
128
|
+
let currentToolIndex = -1;
|
|
129
|
+
let fullContent = '';
|
|
130
|
+
|
|
131
|
+
for await (const chunk of stream) {
|
|
132
|
+
if (abortController.signal.aborted) break;
|
|
133
|
+
|
|
134
|
+
const lines = chunk.toString().split('\n').filter(Boolean);
|
|
135
|
+
|
|
136
|
+
for (const line of lines) {
|
|
137
|
+
if (line.startsWith('data: ')) {
|
|
138
|
+
const data = line.slice(6);
|
|
139
|
+
if (data === '[DONE]') continue;
|
|
140
|
+
|
|
141
|
+
try {
|
|
142
|
+
const parsed = JSON.parse(data);
|
|
143
|
+
const choice = parsed.choices?.[0];
|
|
144
|
+
|
|
145
|
+
this.logger.debug('Stream event', { type: 'delta', hasToolCalls: !!choice?.delta?.tool_calls });
|
|
146
|
+
|
|
147
|
+
// 处理工具调用块
|
|
148
|
+
if (choice?.delta?.tool_calls?.length > 0) {
|
|
149
|
+
for (const toolCall of choice.delta.tool_calls) {
|
|
150
|
+
const index = toolCall.index || 0;
|
|
151
|
+
|
|
152
|
+
// 新工具调用开始
|
|
153
|
+
if (currentToolIndex !== index) {
|
|
154
|
+
if (currentToolIndex >= 0) {
|
|
155
|
+
// 发送前一个工具调用的结束事件
|
|
156
|
+
let parsedInput = {};
|
|
157
|
+
try {
|
|
158
|
+
parsedInput = JSON.parse(currentToolInput || '{}');
|
|
159
|
+
} catch (e) {
|
|
160
|
+
// 使用原始输入
|
|
161
|
+
}
|
|
162
|
+
subscriber.next({
|
|
163
|
+
type: 'tool_use_end',
|
|
164
|
+
toolCall: {
|
|
165
|
+
id: currentToolCallId,
|
|
166
|
+
name: currentToolCallName,
|
|
167
|
+
input: parsedInput
|
|
168
|
+
}
|
|
169
|
+
});
|
|
170
|
+
this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolCallName });
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
currentToolIndex = index;
|
|
174
|
+
currentToolCallId = toolCall.id || `tool_${Date.now()}_${index}`;
|
|
175
|
+
currentToolCallName = toolCall.function?.name || '';
|
|
176
|
+
currentToolInput = toolCall.function?.arguments || '';
|
|
177
|
+
|
|
178
|
+
// 发送工具调用开始事件
|
|
179
|
+
subscriber.next({
|
|
180
|
+
type: 'tool_use_start',
|
|
181
|
+
toolCall: {
|
|
182
|
+
id: currentToolCallId,
|
|
183
|
+
name: currentToolCallName,
|
|
184
|
+
input: {}
|
|
185
|
+
}
|
|
186
|
+
});
|
|
187
|
+
this.logger.debug('Stream event', { type: 'tool_use_start', name: currentToolCallName });
|
|
188
|
+
} else {
|
|
189
|
+
// 继续累积参数
|
|
190
|
+
if (toolCall.function?.arguments) {
|
|
191
|
+
currentToolInput += toolCall.function.arguments;
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
// 处理文本增量
|
|
197
|
+
else if (choice?.delta?.content) {
|
|
198
|
+
const textDelta = choice.delta.content;
|
|
199
|
+
fullContent += textDelta;
|
|
200
|
+
subscriber.next({
|
|
201
|
+
type: 'text_delta',
|
|
202
|
+
textDelta
|
|
203
|
+
});
|
|
204
|
+
}
|
|
205
|
+
} catch (e) {
|
|
206
|
+
// 忽略解析错误
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
// 发送最后一个工具调用的结束事件
|
|
213
|
+
if (currentToolIndex >= 0) {
|
|
214
|
+
let parsedInput = {};
|
|
215
|
+
try {
|
|
216
|
+
parsedInput = JSON.parse(currentToolInput || '{}');
|
|
217
|
+
} catch (e) {
|
|
218
|
+
// 使用原始输入
|
|
219
|
+
}
|
|
220
|
+
subscriber.next({
|
|
221
|
+
type: 'tool_use_end',
|
|
222
|
+
toolCall: {
|
|
223
|
+
id: currentToolCallId,
|
|
224
|
+
name: currentToolCallName,
|
|
225
|
+
input: parsedInput
|
|
226
|
+
}
|
|
227
|
+
});
|
|
228
|
+
this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolCallName });
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
subscriber.next({
|
|
232
|
+
type: 'message_end',
|
|
233
|
+
message: {
|
|
234
|
+
id: this.generateId(),
|
|
235
|
+
role: MessageRole.ASSISTANT,
|
|
236
|
+
content: fullContent,
|
|
237
|
+
timestamp: new Date()
|
|
238
|
+
}
|
|
239
|
+
});
|
|
240
|
+
this.logger.debug('Stream event', { type: 'message_end', contentLength: fullContent.length });
|
|
241
|
+
subscriber.complete();
|
|
242
|
+
|
|
243
|
+
} catch (error) {
|
|
244
|
+
const errorMessage = `OpenAI stream failed: ${error instanceof Error ? error.message : String(error)}`;
|
|
245
|
+
this.logger.error('Stream error', error);
|
|
246
|
+
subscriber.next({ type: 'error', error: errorMessage });
|
|
247
|
+
subscriber.error(new Error(errorMessage));
|
|
248
|
+
}
|
|
249
|
+
};
|
|
250
|
+
|
|
251
|
+
runStream();
|
|
252
|
+
|
|
253
|
+
return () => abortController.abort();
|
|
254
|
+
});
|
|
104
255
|
}
|
|
105
256
|
|
|
106
257
|
async generateCommand(request: CommandRequest): Promise<CommandResponse> {
|
|
@@ -163,35 +314,19 @@ export class OpenAiProviderService extends BaseAiProvider {
|
|
|
163
314
|
return this.parseAnalysisResponse(response.message.content);
|
|
164
315
|
}
|
|
165
316
|
|
|
166
|
-
async
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
}
|
|
171
|
-
|
|
172
|
-
const response = await this.client.post('/chat/completions', {
|
|
173
|
-
model: this.config?.model || 'gpt-4',
|
|
174
|
-
max_tokens: 1,
|
|
175
|
-
messages: [
|
|
176
|
-
{
|
|
177
|
-
role: 'user',
|
|
178
|
-
content: 'Hi'
|
|
179
|
-
}
|
|
180
|
-
]
|
|
181
|
-
});
|
|
182
|
-
|
|
183
|
-
if (response.status === 200) {
|
|
184
|
-
this.lastHealthCheck = { status: HealthStatus.HEALTHY, timestamp: new Date() };
|
|
185
|
-
return HealthStatus.HEALTHY;
|
|
186
|
-
}
|
|
317
|
+
protected async sendTestRequest(request: ChatRequest): Promise<ChatResponse> {
|
|
318
|
+
if (!this.client) {
|
|
319
|
+
throw new Error('OpenAI client not initialized');
|
|
320
|
+
}
|
|
187
321
|
|
|
188
|
-
|
|
322
|
+
const response = await this.client.post('/chat/completions', {
|
|
323
|
+
model: this.config?.model || 'gpt-4',
|
|
324
|
+
messages: this.transformMessages(request.messages),
|
|
325
|
+
max_tokens: request.maxTokens || 1,
|
|
326
|
+
temperature: request.temperature || 0
|
|
327
|
+
});
|
|
189
328
|
|
|
190
|
-
|
|
191
|
-
this.logger.error('OpenAI health check failed', error);
|
|
192
|
-
this.lastHealthCheck = { status: HealthStatus.UNHEALTHY, timestamp: new Date() };
|
|
193
|
-
return HealthStatus.UNHEALTHY;
|
|
194
|
-
}
|
|
329
|
+
return this.transformChatResponse(response.data);
|
|
195
330
|
}
|
|
196
331
|
|
|
197
332
|
validateConfig(): ValidationResult {
|
|
@@ -215,10 +350,6 @@ export class OpenAiProviderService extends BaseAiProvider {
|
|
|
215
350
|
return result;
|
|
216
351
|
}
|
|
217
352
|
|
|
218
|
-
protected getDefaultBaseURL(): string {
|
|
219
|
-
return 'https://api.openai.com/v1';
|
|
220
|
-
}
|
|
221
|
-
|
|
222
353
|
protected transformMessages(messages: any[]): any[] {
|
|
223
354
|
return messages.map(msg => ({
|
|
224
355
|
role: msg.role,
|
|
@@ -244,142 +375,4 @@ export class OpenAiProviderService extends BaseAiProvider {
|
|
|
244
375
|
} : undefined
|
|
245
376
|
};
|
|
246
377
|
}
|
|
247
|
-
|
|
248
|
-
private buildCommandPrompt(request: CommandRequest): string {
|
|
249
|
-
let prompt = `请将以下自然语言描述转换为准确的终端命令:\n\n"${request.naturalLanguage}"\n\n`;
|
|
250
|
-
|
|
251
|
-
if (request.context) {
|
|
252
|
-
prompt += `当前环境:\n`;
|
|
253
|
-
if (request.context.currentDirectory) {
|
|
254
|
-
prompt += `- 当前目录:${request.context.currentDirectory}\n`;
|
|
255
|
-
}
|
|
256
|
-
if (request.context.operatingSystem) {
|
|
257
|
-
prompt += `- 操作系统:${request.context.operatingSystem}\n`;
|
|
258
|
-
}
|
|
259
|
-
if (request.context.shell) {
|
|
260
|
-
prompt += `- Shell:${request.context.shell}\n`;
|
|
261
|
-
}
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
prompt += `\n请直接返回JSON格式:\n`;
|
|
265
|
-
prompt += `{\n`;
|
|
266
|
-
prompt += ` "command": "具体命令",\n`;
|
|
267
|
-
prompt += ` "explanation": "命令解释",\n`;
|
|
268
|
-
prompt += ` "confidence": 0.95\n`;
|
|
269
|
-
prompt += `}\n`;
|
|
270
|
-
|
|
271
|
-
return prompt;
|
|
272
|
-
}
|
|
273
|
-
|
|
274
|
-
private buildExplainPrompt(request: ExplainRequest): string {
|
|
275
|
-
let prompt = `请详细解释以下终端命令:\n\n\`${request.command}\`\n\n`;
|
|
276
|
-
|
|
277
|
-
if (request.context?.currentDirectory) {
|
|
278
|
-
prompt += `当前目录:${request.context.currentDirectory}\n`;
|
|
279
|
-
}
|
|
280
|
-
if (request.context?.operatingSystem) {
|
|
281
|
-
prompt += `操作系统:${request.context.operatingSystem}\n`;
|
|
282
|
-
}
|
|
283
|
-
|
|
284
|
-
prompt += `\n请按以下JSON格式返回:\n`;
|
|
285
|
-
prompt += `{\n`;
|
|
286
|
-
prompt += ` "explanation": "整体解释",\n`;
|
|
287
|
-
prompt += ` "breakdown": [\n`;
|
|
288
|
-
prompt += ` {"part": "命令部分", "description": "说明"}\n`;
|
|
289
|
-
prompt += ` ],\n`;
|
|
290
|
-
prompt += ` "examples": ["使用示例"]\n`;
|
|
291
|
-
prompt += `}\n`;
|
|
292
|
-
|
|
293
|
-
return prompt;
|
|
294
|
-
}
|
|
295
|
-
|
|
296
|
-
private buildAnalysisPrompt(request: AnalysisRequest): string {
|
|
297
|
-
let prompt = `请分析以下命令执行结果:\n\n`;
|
|
298
|
-
prompt += `命令:${request.command}\n`;
|
|
299
|
-
prompt += `退出码:${request.exitCode}\n`;
|
|
300
|
-
prompt += `输出:\n${request.output}\n\n`;
|
|
301
|
-
|
|
302
|
-
if (request.context?.workingDirectory) {
|
|
303
|
-
prompt += `工作目录:${request.context.workingDirectory}\n`;
|
|
304
|
-
}
|
|
305
|
-
|
|
306
|
-
prompt += `\n请按以下JSON格式返回:\n`;
|
|
307
|
-
prompt += `{\n`;
|
|
308
|
-
prompt += ` "summary": "结果总结",\n`;
|
|
309
|
-
prompt += ` "insights": ["洞察1", "洞察2"],\n`;
|
|
310
|
-
prompt += ` "success": true/false,\n`;
|
|
311
|
-
prompt += ` "issues": [\n`;
|
|
312
|
-
prompt += ` {"severity": "warning|error|info", "message": "问题描述", "suggestion": "建议"}\n`;
|
|
313
|
-
prompt += ` ]\n`;
|
|
314
|
-
prompt += `}\n`;
|
|
315
|
-
|
|
316
|
-
return prompt;
|
|
317
|
-
}
|
|
318
|
-
|
|
319
|
-
private parseCommandResponse(content: string): CommandResponse {
|
|
320
|
-
try {
|
|
321
|
-
const match = content.match(/\{[\s\S]*\}/);
|
|
322
|
-
if (match) {
|
|
323
|
-
const parsed = JSON.parse(match[0]);
|
|
324
|
-
return {
|
|
325
|
-
command: parsed.command || '',
|
|
326
|
-
explanation: parsed.explanation || '',
|
|
327
|
-
confidence: parsed.confidence || 0.5
|
|
328
|
-
};
|
|
329
|
-
}
|
|
330
|
-
} catch (error) {
|
|
331
|
-
this.logger.warn('Failed to parse command response as JSON', error);
|
|
332
|
-
}
|
|
333
|
-
|
|
334
|
-
const lines = content.split('\n').map(l => l.trim()).filter(l => l);
|
|
335
|
-
return {
|
|
336
|
-
command: lines[0] || '',
|
|
337
|
-
explanation: lines.slice(1).join(' ') || 'AI生成的命令',
|
|
338
|
-
confidence: 0.5
|
|
339
|
-
};
|
|
340
|
-
}
|
|
341
|
-
|
|
342
|
-
private parseExplainResponse(content: string): ExplainResponse {
|
|
343
|
-
try {
|
|
344
|
-
const match = content.match(/\{[\s\S]*\}/);
|
|
345
|
-
if (match) {
|
|
346
|
-
const parsed = JSON.parse(match[0]);
|
|
347
|
-
return {
|
|
348
|
-
explanation: parsed.explanation || '',
|
|
349
|
-
breakdown: parsed.breakdown || [],
|
|
350
|
-
examples: parsed.examples || []
|
|
351
|
-
};
|
|
352
|
-
}
|
|
353
|
-
} catch (error) {
|
|
354
|
-
this.logger.warn('Failed to parse explain response as JSON', error);
|
|
355
|
-
}
|
|
356
|
-
|
|
357
|
-
return {
|
|
358
|
-
explanation: content,
|
|
359
|
-
breakdown: []
|
|
360
|
-
};
|
|
361
|
-
}
|
|
362
|
-
|
|
363
|
-
private parseAnalysisResponse(content: string): AnalysisResponse {
|
|
364
|
-
try {
|
|
365
|
-
const match = content.match(/\{[\s\S]*\}/);
|
|
366
|
-
if (match) {
|
|
367
|
-
const parsed = JSON.parse(match[0]);
|
|
368
|
-
return {
|
|
369
|
-
summary: parsed.summary || '',
|
|
370
|
-
insights: parsed.insights || [],
|
|
371
|
-
success: parsed.success !== false,
|
|
372
|
-
issues: parsed.issues || []
|
|
373
|
-
};
|
|
374
|
-
}
|
|
375
|
-
} catch (error) {
|
|
376
|
-
this.logger.warn('Failed to parse analysis response as JSON', error);
|
|
377
|
-
}
|
|
378
|
-
|
|
379
|
-
return {
|
|
380
|
-
summary: content,
|
|
381
|
-
insights: [],
|
|
382
|
-
success: true
|
|
383
|
-
};
|
|
384
|
-
}
|
|
385
|
-
}
|
|
378
|
+
}
|