tabby-ai-assistant 1.0.12 → 1.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/.editorconfig +18 -0
  2. package/README.md +113 -55
  3. package/dist/index.js +1 -1
  4. package/package.json +6 -4
  5. package/src/components/chat/ai-sidebar.component.scss +220 -9
  6. package/src/components/chat/ai-sidebar.component.ts +364 -29
  7. package/src/components/chat/chat-input.component.ts +36 -4
  8. package/src/components/chat/chat-interface.component.ts +225 -5
  9. package/src/components/chat/chat-message.component.ts +6 -1
  10. package/src/components/settings/context-settings.component.ts +91 -91
  11. package/src/components/terminal/ai-toolbar-button.component.ts +4 -2
  12. package/src/components/terminal/command-suggestion.component.ts +148 -6
  13. package/src/index.ts +0 -6
  14. package/src/providers/tabby/ai-toolbar-button.provider.ts +7 -3
  15. package/src/services/chat/ai-sidebar.service.ts +414 -410
  16. package/src/services/chat/chat-session.service.ts +36 -12
  17. package/src/services/context/compaction.ts +110 -134
  18. package/src/services/context/manager.ts +27 -7
  19. package/src/services/context/memory.ts +17 -33
  20. package/src/services/context/summary.service.ts +136 -0
  21. package/src/services/core/ai-assistant.service.ts +1060 -37
  22. package/src/services/core/ai-provider-manager.service.ts +154 -25
  23. package/src/services/core/checkpoint.service.ts +218 -18
  24. package/src/services/core/config-provider.service.ts +4 -12
  25. package/src/services/core/toast.service.ts +106 -106
  26. package/src/services/providers/anthropic-provider.service.ts +126 -202
  27. package/src/services/providers/base-provider.service.ts +315 -21
  28. package/src/services/providers/glm-provider.service.ts +151 -233
  29. package/src/services/providers/minimax-provider.service.ts +55 -238
  30. package/src/services/providers/ollama-provider.service.ts +117 -188
  31. package/src/services/providers/openai-compatible.service.ts +165 -177
  32. package/src/services/providers/openai-provider.service.ts +170 -177
  33. package/src/services/providers/vllm-provider.service.ts +116 -188
  34. package/src/services/terminal/terminal-context.service.ts +265 -5
  35. package/src/services/terminal/terminal-manager.service.ts +748 -748
  36. package/src/services/terminal/terminal-tools.service.ts +612 -441
  37. package/src/types/ai.types.ts +156 -3
  38. package/src/types/provider.types.ts +206 -75
  39. package/src/utils/cost.utils.ts +249 -0
  40. package/src/utils/validation.utils.ts +306 -2
  41. package/dist/index.js.LICENSE.txt +0 -18
  42. package/src/index.ts.backup +0 -165
  43. package/src/services/chat/chat-history.service.ts.backup +0 -239
  44. package/src/services/terminal/command-analyzer.service.ts +0 -43
  45. package/src/services/terminal/context-menu.service.ts +0 -45
  46. package/src/services/terminal/hotkey.service.ts +0 -53
  47. package/webpack.config.js.backup +0 -57
@@ -1,9 +1,9 @@
1
1
  import { Injectable } from '@angular/core';
2
- import { Observable, from } from 'rxjs';
2
+ import { Observable, Observer } from 'rxjs';
3
3
  import axios, { AxiosInstance } from 'axios';
4
4
  import { BaseAiProvider } from './base-provider.service';
5
- import { ProviderCapability, HealthStatus, ValidationResult } from '../../types/provider.types';
6
- import { ChatRequest, ChatResponse, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse, MessageRole } from '../../types/ai.types';
5
+ import { ProviderCapability, ValidationResult } from '../../types/provider.types';
6
+ import { ChatRequest, ChatResponse, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse, MessageRole, StreamEvent } from '../../types/ai.types';
7
7
  import { LoggerService } from '../core/logger.service';
8
8
 
9
9
  /**
@@ -108,11 +108,157 @@ export class OpenAiCompatibleProviderService extends BaseAiProvider {
108
108
  }
109
109
 
110
110
  /**
111
- * 流式聊天功能 - 暂未实现,回退到非流式
111
+ * 流式聊天功能 - 支持工具调用事件
112
112
  */
113
- chatStream(request: ChatRequest): Observable<any> {
114
- // 回退到非流式
115
- return from(this.chat(request));
113
+ chatStream(request: ChatRequest): Observable<StreamEvent> {
114
+ return new Observable<StreamEvent>((subscriber: Observer<StreamEvent>) => {
115
+ if (!this.client) {
116
+ const error = new Error('OpenAI compatible client not initialized');
117
+ subscriber.next({ type: 'error', error: error.message });
118
+ subscriber.error(error);
119
+ return;
120
+ }
121
+
122
+ const abortController = new AbortController();
123
+
124
+ const runStream = async () => {
125
+ try {
126
+ const response = await this.client!.post('/chat/completions', {
127
+ model: this.config?.model || 'gpt-3.5-turbo',
128
+ messages: this.transformMessages(request.messages),
129
+ max_tokens: request.maxTokens || 1000,
130
+ temperature: request.temperature || 0.7,
131
+ stream: true
132
+ }, {
133
+ responseType: 'stream'
134
+ });
135
+
136
+ const stream = response.data;
137
+ let currentToolCallId = '';
138
+ let currentToolCallName = '';
139
+ let currentToolInput = '';
140
+ let currentToolIndex = -1;
141
+ let fullContent = '';
142
+
143
+ for await (const chunk of stream) {
144
+ if (abortController.signal.aborted) break;
145
+
146
+ const lines = chunk.toString().split('\n').filter(Boolean);
147
+
148
+ for (const line of lines) {
149
+ if (line.startsWith('data: ')) {
150
+ const data = line.slice(6);
151
+ if (data === '[DONE]') continue;
152
+
153
+ try {
154
+ const parsed = JSON.parse(data);
155
+ const choice = parsed.choices?.[0];
156
+
157
+ this.logger.debug('Stream event', { type: 'delta', hasToolCalls: !!choice?.delta?.tool_calls });
158
+
159
+ // 处理工具调用块
160
+ if (choice?.delta?.tool_calls?.length > 0) {
161
+ for (const toolCall of choice.delta.tool_calls) {
162
+ const index = toolCall.index || 0;
163
+
164
+ if (currentToolIndex !== index) {
165
+ if (currentToolIndex >= 0) {
166
+ let parsedInput = {};
167
+ try {
168
+ parsedInput = JSON.parse(currentToolInput || '{}');
169
+ } catch (e) {
170
+ // 使用原始输入
171
+ }
172
+ subscriber.next({
173
+ type: 'tool_use_end',
174
+ toolCall: {
175
+ id: currentToolCallId,
176
+ name: currentToolCallName,
177
+ input: parsedInput
178
+ }
179
+ });
180
+ this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolCallName });
181
+ }
182
+
183
+ currentToolIndex = index;
184
+ currentToolCallId = toolCall.id || `tool_${Date.now()}_${index}`;
185
+ currentToolCallName = toolCall.function?.name || '';
186
+ currentToolInput = toolCall.function?.arguments || '';
187
+
188
+ subscriber.next({
189
+ type: 'tool_use_start',
190
+ toolCall: {
191
+ id: currentToolCallId,
192
+ name: currentToolCallName,
193
+ input: {}
194
+ }
195
+ });
196
+ this.logger.debug('Stream event', { type: 'tool_use_start', name: currentToolCallName });
197
+ } else {
198
+ if (toolCall.function?.arguments) {
199
+ currentToolInput += toolCall.function.arguments;
200
+ }
201
+ }
202
+ }
203
+ }
204
+ // 处理文本增量
205
+ else if (choice?.delta?.content) {
206
+ const textDelta = choice.delta.content;
207
+ fullContent += textDelta;
208
+ subscriber.next({
209
+ type: 'text_delta',
210
+ textDelta
211
+ });
212
+ }
213
+ } catch (e) {
214
+ // 忽略解析错误
215
+ }
216
+ }
217
+ }
218
+ }
219
+
220
+ if (currentToolIndex >= 0) {
221
+ let parsedInput = {};
222
+ try {
223
+ parsedInput = JSON.parse(currentToolInput || '{}');
224
+ } catch (e) {
225
+ // 使用原始输入
226
+ }
227
+ subscriber.next({
228
+ type: 'tool_use_end',
229
+ toolCall: {
230
+ id: currentToolCallId,
231
+ name: currentToolCallName,
232
+ input: parsedInput
233
+ }
234
+ });
235
+ this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolCallName });
236
+ }
237
+
238
+ subscriber.next({
239
+ type: 'message_end',
240
+ message: {
241
+ id: this.generateId(),
242
+ role: MessageRole.ASSISTANT,
243
+ content: fullContent,
244
+ timestamp: new Date()
245
+ }
246
+ });
247
+ this.logger.debug('Stream event', { type: 'message_end', contentLength: fullContent.length });
248
+ subscriber.complete();
249
+
250
+ } catch (error) {
251
+ const errorMessage = `OpenAI compatible stream failed: ${error instanceof Error ? error.message : String(error)}`;
252
+ this.logger.error('Stream error', error);
253
+ subscriber.next({ type: 'error', error: errorMessage });
254
+ subscriber.error(new Error(errorMessage));
255
+ }
256
+ };
257
+
258
+ runStream();
259
+
260
+ return () => abortController.abort();
261
+ });
116
262
  }
117
263
 
118
264
  async generateCommand(request: CommandRequest): Promise<CommandResponse> {
@@ -175,35 +321,19 @@ export class OpenAiCompatibleProviderService extends BaseAiProvider {
175
321
  return this.parseAnalysisResponse(response.message.content);
176
322
  }
177
323
 
178
- async healthCheck(): Promise<HealthStatus> {
179
- try {
180
- if (!this.client) {
181
- return HealthStatus.UNHEALTHY;
182
- }
183
-
184
- const response = await this.client.post('/chat/completions', {
185
- model: this.config?.model || 'gpt-3.5-turbo',
186
- max_tokens: 1,
187
- messages: [
188
- {
189
- role: 'user',
190
- content: 'Hi'
191
- }
192
- ]
193
- });
194
-
195
- if (response.status === 200) {
196
- this.lastHealthCheck = { status: HealthStatus.HEALTHY, timestamp: new Date() };
197
- return HealthStatus.HEALTHY;
198
- }
324
+ protected async sendTestRequest(request: ChatRequest): Promise<ChatResponse> {
325
+ if (!this.client) {
326
+ throw new Error('OpenAI compatible client not initialized');
327
+ }
199
328
 
200
- return HealthStatus.DEGRADED;
329
+ const response = await this.client.post('/chat/completions', {
330
+ model: this.config?.model || 'gpt-3.5-turbo',
331
+ messages: this.transformMessages(request.messages),
332
+ max_tokens: request.maxTokens || 1,
333
+ temperature: request.temperature || 0
334
+ });
201
335
 
202
- } catch (error) {
203
- this.logger.error('OpenAI compatible health check failed', error);
204
- this.lastHealthCheck = { status: HealthStatus.UNHEALTHY, timestamp: new Date() };
205
- return HealthStatus.UNHEALTHY;
206
- }
336
+ return this.transformChatResponse(response.data);
207
337
  }
208
338
 
209
339
  validateConfig(): ValidationResult {
@@ -233,10 +363,6 @@ export class OpenAiCompatibleProviderService extends BaseAiProvider {
233
363
  return result;
234
364
  }
235
365
 
236
- protected getDefaultBaseURL(): string {
237
- return 'http://localhost:11434/v1';
238
- }
239
-
240
366
  protected transformMessages(messages: any[]): any[] {
241
367
  return messages.map(msg => ({
242
368
  role: msg.role,
@@ -262,142 +388,4 @@ export class OpenAiCompatibleProviderService extends BaseAiProvider {
262
388
  } : undefined
263
389
  };
264
390
  }
265
-
266
- private buildCommandPrompt(request: CommandRequest): string {
267
- let prompt = `请将以下自然语言描述转换为准确的终端命令:\n\n"${request.naturalLanguage}"\n\n`;
268
-
269
- if (request.context) {
270
- prompt += `当前环境:\n`;
271
- if (request.context.currentDirectory) {
272
- prompt += `- 当前目录:${request.context.currentDirectory}\n`;
273
- }
274
- if (request.context.operatingSystem) {
275
- prompt += `- 操作系统:${request.context.operatingSystem}\n`;
276
- }
277
- if (request.context.shell) {
278
- prompt += `- Shell:${request.context.shell}\n`;
279
- }
280
- }
281
-
282
- prompt += `\n请直接返回JSON格式:\n`;
283
- prompt += `{\n`;
284
- prompt += ` "command": "具体命令",\n`;
285
- prompt += ` "explanation": "命令解释",\n`;
286
- prompt += ` "confidence": 0.95\n`;
287
- prompt += `}\n`;
288
-
289
- return prompt;
290
- }
291
-
292
- private buildExplainPrompt(request: ExplainRequest): string {
293
- let prompt = `请详细解释以下终端命令:\n\n\`${request.command}\`\n\n`;
294
-
295
- if (request.context?.currentDirectory) {
296
- prompt += `当前目录:${request.context.currentDirectory}\n`;
297
- }
298
- if (request.context?.operatingSystem) {
299
- prompt += `操作系统:${request.context.operatingSystem}\n`;
300
- }
301
-
302
- prompt += `\n请按以下JSON格式返回:\n`;
303
- prompt += `{\n`;
304
- prompt += ` "explanation": "整体解释",\n`;
305
- prompt += ` "breakdown": [\n`;
306
- prompt += ` {"part": "命令部分", "description": "说明"}\n`;
307
- prompt += ` ],\n`;
308
- prompt += ` "examples": ["使用示例"]\n`;
309
- prompt += `}\n`;
310
-
311
- return prompt;
312
- }
313
-
314
- private buildAnalysisPrompt(request: AnalysisRequest): string {
315
- let prompt = `请分析以下命令执行结果:\n\n`;
316
- prompt += `命令:${request.command}\n`;
317
- prompt += `退出码:${request.exitCode}\n`;
318
- prompt += `输出:\n${request.output}\n\n`;
319
-
320
- if (request.context?.workingDirectory) {
321
- prompt += `工作目录:${request.context.workingDirectory}\n`;
322
- }
323
-
324
- prompt += `\n请按以下JSON格式返回:\n`;
325
- prompt += `{\n`;
326
- prompt += ` "summary": "结果总结",\n`;
327
- prompt += ` "insights": ["洞察1", "洞察2"],\n`;
328
- prompt += ` "success": true/false,\n`;
329
- prompt += ` "issues": [\n`;
330
- prompt += ` {"severity": "warning|error|info", "message": "问题描述", "suggestion": "建议"}\n`;
331
- prompt += ` ]\n`;
332
- prompt += `}\n`;
333
-
334
- return prompt;
335
- }
336
-
337
- private parseCommandResponse(content: string): CommandResponse {
338
- try {
339
- const match = content.match(/\{[\s\S]*\}/);
340
- if (match) {
341
- const parsed = JSON.parse(match[0]);
342
- return {
343
- command: parsed.command || '',
344
- explanation: parsed.explanation || '',
345
- confidence: parsed.confidence || 0.5
346
- };
347
- }
348
- } catch (error) {
349
- this.logger.warn('Failed to parse command response as JSON', error);
350
- }
351
-
352
- const lines = content.split('\n').map(l => l.trim()).filter(l => l);
353
- return {
354
- command: lines[0] || '',
355
- explanation: lines.slice(1).join(' ') || 'AI生成的命令',
356
- confidence: 0.5
357
- };
358
- }
359
-
360
- private parseExplainResponse(content: string): ExplainResponse {
361
- try {
362
- const match = content.match(/\{[\s\S]*\}/);
363
- if (match) {
364
- const parsed = JSON.parse(match[0]);
365
- return {
366
- explanation: parsed.explanation || '',
367
- breakdown: parsed.breakdown || [],
368
- examples: parsed.examples || []
369
- };
370
- }
371
- } catch (error) {
372
- this.logger.warn('Failed to parse explain response as JSON', error);
373
- }
374
-
375
- return {
376
- explanation: content,
377
- breakdown: []
378
- };
379
- }
380
-
381
- private parseAnalysisResponse(content: string): AnalysisResponse {
382
- try {
383
- const match = content.match(/\{[\s\S]*\}/);
384
- if (match) {
385
- const parsed = JSON.parse(match[0]);
386
- return {
387
- summary: parsed.summary || '',
388
- insights: parsed.insights || [],
389
- success: parsed.success !== false,
390
- issues: parsed.issues || []
391
- };
392
- }
393
- } catch (error) {
394
- this.logger.warn('Failed to parse analysis response as JSON', error);
395
- }
396
-
397
- return {
398
- summary: content,
399
- insights: [],
400
- success: true
401
- };
402
- }
403
- }
391
+ }