tabby-ai-assistant 1.0.12 → 1.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/.editorconfig +18 -0
  2. package/README.md +113 -55
  3. package/dist/index.js +1 -1
  4. package/package.json +6 -4
  5. package/src/components/chat/ai-sidebar.component.scss +220 -9
  6. package/src/components/chat/ai-sidebar.component.ts +364 -29
  7. package/src/components/chat/chat-input.component.ts +36 -4
  8. package/src/components/chat/chat-interface.component.ts +225 -5
  9. package/src/components/chat/chat-message.component.ts +6 -1
  10. package/src/components/settings/context-settings.component.ts +91 -91
  11. package/src/components/terminal/ai-toolbar-button.component.ts +4 -2
  12. package/src/components/terminal/command-suggestion.component.ts +148 -6
  13. package/src/index.ts +0 -6
  14. package/src/providers/tabby/ai-toolbar-button.provider.ts +7 -3
  15. package/src/services/chat/ai-sidebar.service.ts +414 -410
  16. package/src/services/chat/chat-session.service.ts +36 -12
  17. package/src/services/context/compaction.ts +110 -134
  18. package/src/services/context/manager.ts +27 -7
  19. package/src/services/context/memory.ts +17 -33
  20. package/src/services/context/summary.service.ts +136 -0
  21. package/src/services/core/ai-assistant.service.ts +1060 -37
  22. package/src/services/core/ai-provider-manager.service.ts +154 -25
  23. package/src/services/core/checkpoint.service.ts +218 -18
  24. package/src/services/core/config-provider.service.ts +4 -12
  25. package/src/services/core/toast.service.ts +106 -106
  26. package/src/services/providers/anthropic-provider.service.ts +126 -202
  27. package/src/services/providers/base-provider.service.ts +315 -21
  28. package/src/services/providers/glm-provider.service.ts +151 -233
  29. package/src/services/providers/minimax-provider.service.ts +55 -238
  30. package/src/services/providers/ollama-provider.service.ts +117 -188
  31. package/src/services/providers/openai-compatible.service.ts +165 -177
  32. package/src/services/providers/openai-provider.service.ts +170 -177
  33. package/src/services/providers/vllm-provider.service.ts +116 -188
  34. package/src/services/terminal/terminal-context.service.ts +265 -5
  35. package/src/services/terminal/terminal-manager.service.ts +748 -748
  36. package/src/services/terminal/terminal-tools.service.ts +612 -441
  37. package/src/types/ai.types.ts +156 -3
  38. package/src/types/provider.types.ts +206 -75
  39. package/src/utils/cost.utils.ts +249 -0
  40. package/src/utils/validation.utils.ts +306 -2
  41. package/dist/index.js.LICENSE.txt +0 -18
  42. package/src/index.ts.backup +0 -165
  43. package/src/services/chat/chat-history.service.ts.backup +0 -239
  44. package/src/services/terminal/command-analyzer.service.ts +0 -43
  45. package/src/services/terminal/context-menu.service.ts +0 -45
  46. package/src/services/terminal/hotkey.service.ts +0 -53
  47. package/webpack.config.js.backup +0 -57
@@ -1,7 +1,7 @@
1
1
  import { Injectable } from '@angular/core';
2
- import { Observable, Observer, from } from 'rxjs';
2
+ import { Observable, Observer } from 'rxjs';
3
3
  import { BaseAiProvider } from './base-provider.service';
4
- import { ProviderCapability, HealthStatus, ValidationResult } from '../../types/provider.types';
4
+ import { ProviderCapability, ValidationResult } from '../../types/provider.types';
5
5
  import { ChatRequest, ChatResponse, StreamEvent, MessageRole, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse } from '../../types/ai.types';
6
6
  import { LoggerService } from '../core/logger.service';
7
7
 
@@ -28,10 +28,6 @@ export class OllamaProviderService extends BaseAiProvider {
28
28
  super(logger);
29
29
  }
30
30
 
31
- protected getDefaultBaseURL(): string {
32
- return 'http://localhost:11434/v1';
33
- }
34
-
35
31
  /**
36
32
  * 非流式聊天
37
33
  */
@@ -78,7 +74,7 @@ export class OllamaProviderService extends BaseAiProvider {
78
74
  }
79
75
 
80
76
  /**
81
- * 流式聊天
77
+ * 流式聊天功能 - 支持工具调用事件
82
78
  */
83
79
  chatStream(request: ChatRequest): Observable<StreamEvent> {
84
80
  return new Observable<StreamEvent>((subscriber: Observer<StreamEvent>) => {
@@ -112,9 +108,16 @@ export class OllamaProviderService extends BaseAiProvider {
112
108
  throw new Error('No response body');
113
109
  }
114
110
 
111
+ // 工具调用状态跟踪
112
+ let currentToolCallId = '';
113
+ let currentToolCallName = '';
114
+ let currentToolInput = '';
115
+ let currentToolIndex = -1;
115
116
  let fullContent = '';
116
117
 
117
118
  while (true) {
119
+ if (abortController.signal.aborted) break;
120
+
118
121
  const { done, value } = await reader.read();
119
122
  if (done) break;
120
123
 
@@ -127,8 +130,62 @@ export class OllamaProviderService extends BaseAiProvider {
127
130
 
128
131
  try {
129
132
  const parsed = JSON.parse(data);
130
- const delta = parsed.choices[0]?.delta?.content;
131
- if (delta) {
133
+ const choice = parsed.choices?.[0];
134
+
135
+ this.logger.debug('Stream event', { type: 'delta', hasToolCalls: !!choice?.delta?.tool_calls });
136
+
137
+ // 处理工具调用块
138
+ if (choice?.delta?.tool_calls?.length > 0) {
139
+ for (const toolCall of choice.delta.tool_calls) {
140
+ const index = toolCall.index || 0;
141
+
142
+ // 新工具调用开始
143
+ if (currentToolIndex !== index) {
144
+ if (currentToolIndex >= 0) {
145
+ // 发送前一个工具调用的结束事件
146
+ let parsedInput = {};
147
+ try {
148
+ parsedInput = JSON.parse(currentToolInput || '{}');
149
+ } catch (e) {
150
+ // 使用原始输入
151
+ }
152
+ subscriber.next({
153
+ type: 'tool_use_end',
154
+ toolCall: {
155
+ id: currentToolCallId,
156
+ name: currentToolCallName,
157
+ input: parsedInput
158
+ }
159
+ });
160
+ this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolCallName });
161
+ }
162
+
163
+ currentToolIndex = index;
164
+ currentToolCallId = toolCall.id || `tool_${Date.now()}_${index}`;
165
+ currentToolCallName = toolCall.function?.name || '';
166
+ currentToolInput = toolCall.function?.arguments || '';
167
+
168
+ // 发送工具调用开始事件
169
+ subscriber.next({
170
+ type: 'tool_use_start',
171
+ toolCall: {
172
+ id: currentToolCallId,
173
+ name: currentToolCallName,
174
+ input: {}
175
+ }
176
+ });
177
+ this.logger.debug('Stream event', { type: 'tool_use_start', name: currentToolCallName });
178
+ } else {
179
+ // 继续累积参数
180
+ if (toolCall.function?.arguments) {
181
+ currentToolInput += toolCall.function.arguments;
182
+ }
183
+ }
184
+ }
185
+ }
186
+ // 处理文本增量
187
+ else if (choice?.delta?.content) {
188
+ const delta = choice.delta.content;
132
189
  fullContent += delta;
133
190
  subscriber.next({
134
191
  type: 'text_delta',
@@ -141,6 +198,25 @@ export class OllamaProviderService extends BaseAiProvider {
141
198
  }
142
199
  }
143
200
 
201
+ // 发送最后一个工具调用的结束事件
202
+ if (currentToolIndex >= 0) {
203
+ let parsedInput = {};
204
+ try {
205
+ parsedInput = JSON.parse(currentToolInput || '{}');
206
+ } catch (e) {
207
+ // 使用原始输入
208
+ }
209
+ subscriber.next({
210
+ type: 'tool_use_end',
211
+ toolCall: {
212
+ id: currentToolCallId,
213
+ name: currentToolCallName,
214
+ input: parsedInput
215
+ }
216
+ });
217
+ this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolCallName });
218
+ }
219
+
144
220
  subscriber.next({
145
221
  type: 'message_end',
146
222
  message: {
@@ -150,11 +226,14 @@ export class OllamaProviderService extends BaseAiProvider {
150
226
  timestamp: new Date()
151
227
  }
152
228
  });
229
+ this.logger.debug('Stream event', { type: 'message_end', contentLength: fullContent.length });
153
230
  subscriber.complete();
154
231
  } catch (error) {
155
232
  if ((error as any).name !== 'AbortError') {
233
+ const errorMessage = `Ollama stream failed: ${error instanceof Error ? error.message : String(error)}`;
156
234
  this.logError(error, { request });
157
- subscriber.error(new Error(`Ollama stream failed: ${error instanceof Error ? error.message : String(error)}`));
235
+ subscriber.next({ type: 'error', error: errorMessage });
236
+ subscriber.error(new Error(errorMessage));
158
237
  }
159
238
  }
160
239
  };
@@ -166,30 +245,36 @@ export class OllamaProviderService extends BaseAiProvider {
166
245
  });
167
246
  }
168
247
 
169
- /**
170
- * 健康检查 - 检测 Ollama 服务是否运行
171
- */
172
- async healthCheck(): Promise<HealthStatus> {
173
- try {
174
- const controller = new AbortController();
175
- const timeoutId = setTimeout(() => controller.abort(), 5000);
176
-
177
- const response = await fetch(`${this.getBaseURL()}/models`, {
178
- method: 'GET',
179
- signal: controller.signal
180
- });
181
-
182
- clearTimeout(timeoutId);
248
+ protected async sendTestRequest(request: ChatRequest): Promise<ChatResponse> {
249
+ const response = await fetch(`${this.getBaseURL()}/chat/completions`, {
250
+ method: 'POST',
251
+ headers: { 'Content-Type': 'application/json' },
252
+ body: JSON.stringify({
253
+ model: this.config?.model || 'llama3.1',
254
+ messages: this.transformMessages(request.messages),
255
+ max_tokens: request.maxTokens || 1,
256
+ temperature: request.temperature || 0
257
+ })
258
+ });
183
259
 
184
- if (response.ok) {
185
- this.lastHealthCheck = { status: HealthStatus.HEALTHY, timestamp: new Date() };
186
- return HealthStatus.HEALTHY;
187
- }
188
- return HealthStatus.UNHEALTHY;
189
- } catch (error) {
190
- this.logger.warn('Ollama health check failed', error);
191
- return HealthStatus.UNHEALTHY;
260
+ if (!response.ok) {
261
+ throw new Error(`Ollama API error: ${response.status}`);
192
262
  }
263
+
264
+ const data = await response.json();
265
+ return {
266
+ message: {
267
+ id: this.generateId(),
268
+ role: MessageRole.ASSISTANT,
269
+ content: data.choices[0]?.message?.content || '',
270
+ timestamp: new Date()
271
+ },
272
+ usage: data.usage ? {
273
+ promptTokens: data.usage.prompt_tokens,
274
+ completionTokens: data.usage.completion_tokens,
275
+ totalTokens: data.usage.total_tokens
276
+ } : undefined
277
+ };
193
278
  }
194
279
 
195
280
  /**
@@ -286,160 +371,4 @@ export class OllamaProviderService extends BaseAiProvider {
286
371
  content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
287
372
  }));
288
373
  }
289
-
290
- /**
291
- * 构建命令生成提示
292
- */
293
- private buildCommandPrompt(request: CommandRequest): string {
294
- let prompt = `请将以下自然语言描述转换为准确的终端命令:\n\n"${request.naturalLanguage}"\n\n`;
295
-
296
- if (request.context) {
297
- prompt += `当前环境:\n`;
298
- if (request.context.currentDirectory) {
299
- prompt += `- 当前目录:${request.context.currentDirectory}\n`;
300
- }
301
- if (request.context.operatingSystem) {
302
- prompt += `- 操作系统:${request.context.operatingSystem}\n`;
303
- }
304
- if (request.context.shell) {
305
- prompt += `- Shell:${request.context.shell}\n`;
306
- }
307
- }
308
-
309
- prompt += `\n请直接返回JSON格式:\n`;
310
- prompt += `{\n`;
311
- prompt += ` "command": "具体命令",\n`;
312
- prompt += ` "explanation": "命令解释",\n`;
313
- prompt += ` "confidence": 0.95\n`;
314
- prompt += `}\n`;
315
-
316
- return prompt;
317
- }
318
-
319
- /**
320
- * 构建命令解释提示
321
- */
322
- private buildExplainPrompt(request: ExplainRequest): string {
323
- let prompt = `请详细解释以下终端命令:\n\n\`${request.command}\`\n\n`;
324
-
325
- if (request.context?.currentDirectory) {
326
- prompt += `当前目录:${request.context.currentDirectory}\n`;
327
- }
328
- if (request.context?.operatingSystem) {
329
- prompt += `操作系统:${request.context.operatingSystem}\n`;
330
- }
331
-
332
- prompt += `\n请按以下JSON格式返回:\n`;
333
- prompt += `{\n`;
334
- prompt += ` "explanation": "整体解释",\n`;
335
- prompt += ` "breakdown": [\n`;
336
- prompt += ` {"part": "命令部分", "description": "说明"}\n`;
337
- prompt += ` ],\n`;
338
- prompt += ` "examples": ["使用示例"]\n`;
339
- prompt += `}\n`;
340
-
341
- return prompt;
342
- }
343
-
344
- /**
345
- * 构建结果分析提示
346
- */
347
- private buildAnalysisPrompt(request: AnalysisRequest): string {
348
- let prompt = `请分析以下命令执行结果:\n\n`;
349
- prompt += `命令:${request.command}\n`;
350
- prompt += `退出码:${request.exitCode}\n`;
351
- prompt += `输出:\n${request.output}\n\n`;
352
-
353
- if (request.context?.workingDirectory) {
354
- prompt += `工作目录:${request.context.workingDirectory}\n`;
355
- }
356
-
357
- prompt += `\n请按以下JSON格式返回:\n`;
358
- prompt += `{\n`;
359
- prompt += ` "summary": "结果总结",\n`;
360
- prompt += ` "insights": ["洞察1", "洞察2"],\n`;
361
- prompt += ` "success": true/false,\n`;
362
- prompt += ` "issues": [\n`;
363
- prompt += ` {"severity": "warning|error|info", "message": "问题描述", "suggestion": "建议"}\n`;
364
- prompt += ` ]\n`;
365
- prompt += `}\n`;
366
-
367
- return prompt;
368
- }
369
-
370
- /**
371
- * 解析命令响应
372
- */
373
- private parseCommandResponse(content: string): CommandResponse {
374
- try {
375
- const match = content.match(/\{[\s\S]*\}/);
376
- if (match) {
377
- const parsed = JSON.parse(match[0]);
378
- return {
379
- command: parsed.command || '',
380
- explanation: parsed.explanation || '',
381
- confidence: parsed.confidence || 0.5
382
- };
383
- }
384
- } catch (error) {
385
- this.logger.warn('Failed to parse Ollama command response as JSON', error);
386
- }
387
-
388
- const lines = content.split('\n').map(l => l.trim()).filter(l => l);
389
- return {
390
- command: lines[0] || '',
391
- explanation: lines.slice(1).join(' ') || 'AI生成的命令',
392
- confidence: 0.5
393
- };
394
- }
395
-
396
- /**
397
- * 解析解释响应
398
- */
399
- private parseExplainResponse(content: string): ExplainResponse {
400
- try {
401
- const match = content.match(/\{[\s\S]*\}/);
402
- if (match) {
403
- const parsed = JSON.parse(match[0]);
404
- return {
405
- explanation: parsed.explanation || '',
406
- breakdown: parsed.breakdown || [],
407
- examples: parsed.examples || []
408
- };
409
- }
410
- } catch (error) {
411
- this.logger.warn('Failed to parse Ollama explain response as JSON', error);
412
- }
413
-
414
- return {
415
- explanation: content,
416
- breakdown: []
417
- };
418
- }
419
-
420
- /**
421
- * 解析分析响应
422
- */
423
- private parseAnalysisResponse(content: string): AnalysisResponse {
424
- try {
425
- const match = content.match(/\{[\s\S]*\}/);
426
- if (match) {
427
- const parsed = JSON.parse(match[0]);
428
- return {
429
- summary: parsed.summary || '',
430
- insights: parsed.insights || [],
431
- success: parsed.success !== false,
432
- issues: parsed.issues || []
433
- };
434
- }
435
- } catch (error) {
436
- this.logger.warn('Failed to parse Ollama analysis response as JSON', error);
437
- }
438
-
439
- return {
440
- summary: content,
441
- insights: [],
442
- success: true
443
- };
444
- }
445
374
  }