tabby-ai-assistant 1.0.13 → 1.0.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.editorconfig +18 -0
- package/dist/index.js +1 -1
- package/package.json +6 -4
- package/src/components/chat/ai-sidebar.component.scss +220 -9
- package/src/components/chat/ai-sidebar.component.ts +364 -29
- package/src/components/chat/chat-input.component.ts +36 -4
- package/src/components/chat/chat-interface.component.ts +225 -5
- package/src/components/chat/chat-message.component.ts +6 -1
- package/src/components/settings/context-settings.component.ts +91 -91
- package/src/components/terminal/ai-toolbar-button.component.ts +4 -2
- package/src/components/terminal/command-suggestion.component.ts +148 -6
- package/src/index.ts +0 -6
- package/src/providers/tabby/ai-toolbar-button.provider.ts +7 -3
- package/src/services/chat/ai-sidebar.service.ts +414 -410
- package/src/services/chat/chat-session.service.ts +36 -12
- package/src/services/context/compaction.ts +110 -134
- package/src/services/context/manager.ts +27 -7
- package/src/services/context/memory.ts +17 -33
- package/src/services/context/summary.service.ts +136 -0
- package/src/services/core/ai-assistant.service.ts +1060 -37
- package/src/services/core/ai-provider-manager.service.ts +154 -25
- package/src/services/core/checkpoint.service.ts +218 -18
- package/src/services/core/toast.service.ts +106 -106
- package/src/services/providers/anthropic-provider.service.ts +126 -30
- package/src/services/providers/base-provider.service.ts +90 -7
- package/src/services/providers/glm-provider.service.ts +151 -38
- package/src/services/providers/minimax-provider.service.ts +55 -40
- package/src/services/providers/ollama-provider.service.ts +117 -28
- package/src/services/providers/openai-compatible.service.ts +164 -34
- package/src/services/providers/openai-provider.service.ts +169 -34
- package/src/services/providers/vllm-provider.service.ts +116 -28
- package/src/services/terminal/terminal-context.service.ts +265 -5
- package/src/services/terminal/terminal-manager.service.ts +748 -748
- package/src/services/terminal/terminal-tools.service.ts +612 -441
- package/src/types/ai.types.ts +156 -3
- package/src/utils/cost.utils.ts +249 -0
- package/src/utils/validation.utils.ts +306 -2
- package/dist/index.js.LICENSE.txt +0 -18
- package/src/services/terminal/command-analyzer.service.ts +0 -43
- package/src/services/terminal/context-menu.service.ts +0 -45
- package/src/services/terminal/hotkey.service.ts +0 -53
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import { Injectable } from '@angular/core';
|
|
2
|
-
import { Observable, from } from 'rxjs';
|
|
2
|
+
import { Observable, Observer, from } from 'rxjs';
|
|
3
3
|
import axios, { AxiosInstance } from 'axios';
|
|
4
4
|
import { BaseAiProvider } from './base-provider.service';
|
|
5
|
-
import { ProviderCapability,
|
|
6
|
-
import { ChatRequest, ChatResponse, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse, MessageRole } from '../../types/ai.types';
|
|
5
|
+
import { ProviderCapability, ValidationResult } from '../../types/provider.types';
|
|
6
|
+
import { ChatRequest, ChatResponse, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse, MessageRole, StreamEvent } from '../../types/ai.types';
|
|
7
7
|
import { LoggerService } from '../core/logger.service';
|
|
8
8
|
|
|
9
9
|
/**
|
|
@@ -137,11 +137,144 @@ export class GlmProviderService extends BaseAiProvider {
|
|
|
137
137
|
}
|
|
138
138
|
|
|
139
139
|
/**
|
|
140
|
-
* 流式聊天功能 -
|
|
140
|
+
* 流式聊天功能 - 支持工具调用事件
|
|
141
141
|
*/
|
|
142
|
-
chatStream(request: ChatRequest): Observable<
|
|
143
|
-
|
|
144
|
-
|
|
142
|
+
chatStream(request: ChatRequest): Observable<StreamEvent> {
|
|
143
|
+
return new Observable<StreamEvent>((subscriber: Observer<StreamEvent>) => {
|
|
144
|
+
if (!this.client) {
|
|
145
|
+
const error = new Error('GLM client not initialized');
|
|
146
|
+
subscriber.next({ type: 'error', error: error.message });
|
|
147
|
+
subscriber.error(error);
|
|
148
|
+
return;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
let currentToolId = '';
|
|
152
|
+
let currentToolName = '';
|
|
153
|
+
let currentToolInput = '';
|
|
154
|
+
let fullContent = '';
|
|
155
|
+
|
|
156
|
+
const abortController = new AbortController();
|
|
157
|
+
|
|
158
|
+
const runStream = async () => {
|
|
159
|
+
try {
|
|
160
|
+
const response = await this.client!.post('/v1/messages', {
|
|
161
|
+
model: this.config?.model || 'glm-4.6',
|
|
162
|
+
max_tokens: request.maxTokens || 1000,
|
|
163
|
+
system: request.systemPrompt || this.getDefaultSystemPrompt(),
|
|
164
|
+
messages: this.transformMessages(request.messages),
|
|
165
|
+
temperature: request.temperature || 0.95,
|
|
166
|
+
stream: true
|
|
167
|
+
}, {
|
|
168
|
+
responseType: 'stream'
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
const stream = response.data;
|
|
172
|
+
const decoder = new TextDecoder();
|
|
173
|
+
let buffer = '';
|
|
174
|
+
|
|
175
|
+
for await (const chunk of stream) {
|
|
176
|
+
if (abortController.signal.aborted) break;
|
|
177
|
+
|
|
178
|
+
buffer += decoder.decode(chunk, { stream: true });
|
|
179
|
+
const lines = buffer.split('\n');
|
|
180
|
+
buffer = lines.pop() || '';
|
|
181
|
+
|
|
182
|
+
for (const line of lines) {
|
|
183
|
+
if (line.startsWith('data:')) {
|
|
184
|
+
const data = line.slice(5).trim();
|
|
185
|
+
if (data === '[DONE]') continue;
|
|
186
|
+
|
|
187
|
+
try {
|
|
188
|
+
const parsed = JSON.parse(data);
|
|
189
|
+
const eventType = parsed.type;
|
|
190
|
+
const eventData = parsed;
|
|
191
|
+
|
|
192
|
+
this.logger.debug('Stream event', { type: eventType });
|
|
193
|
+
|
|
194
|
+
// 处理文本增量
|
|
195
|
+
if (eventType === 'content_block_delta' && eventData.delta?.type === 'text_delta') {
|
|
196
|
+
const textDelta = eventData.delta.text;
|
|
197
|
+
fullContent += textDelta;
|
|
198
|
+
subscriber.next({
|
|
199
|
+
type: 'text_delta',
|
|
200
|
+
textDelta
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
// 处理工具调用开始
|
|
204
|
+
else if (eventType === 'content_block_start' && eventData.content_block?.type === 'tool_use') {
|
|
205
|
+
currentToolId = eventData.content_block.id || `tool_${Date.now()}`;
|
|
206
|
+
currentToolName = eventData.content_block.name;
|
|
207
|
+
currentToolInput = '';
|
|
208
|
+
subscriber.next({
|
|
209
|
+
type: 'tool_use_start',
|
|
210
|
+
toolCall: {
|
|
211
|
+
id: currentToolId,
|
|
212
|
+
name: currentToolName,
|
|
213
|
+
input: {}
|
|
214
|
+
}
|
|
215
|
+
});
|
|
216
|
+
this.logger.debug('Stream event', { type: 'tool_use_start', name: currentToolName });
|
|
217
|
+
}
|
|
218
|
+
// 处理工具调用参数
|
|
219
|
+
else if (eventType === 'content_block_delta' && eventData.delta?.type === 'input_json_delta') {
|
|
220
|
+
currentToolInput += eventData.delta.partial_json || '';
|
|
221
|
+
}
|
|
222
|
+
// 处理工具调用结束
|
|
223
|
+
else if (eventType === 'content_block_stop') {
|
|
224
|
+
if (currentToolId && currentToolName) {
|
|
225
|
+
let parsedInput = {};
|
|
226
|
+
try {
|
|
227
|
+
parsedInput = JSON.parse(currentToolInput || '{}');
|
|
228
|
+
} catch (e) {
|
|
229
|
+
// 使用原始输入
|
|
230
|
+
}
|
|
231
|
+
subscriber.next({
|
|
232
|
+
type: 'tool_use_end',
|
|
233
|
+
toolCall: {
|
|
234
|
+
id: currentToolId,
|
|
235
|
+
name: currentToolName,
|
|
236
|
+
input: parsedInput
|
|
237
|
+
}
|
|
238
|
+
});
|
|
239
|
+
this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolName });
|
|
240
|
+
currentToolId = '';
|
|
241
|
+
currentToolName = '';
|
|
242
|
+
currentToolInput = '';
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
} catch (e) {
|
|
246
|
+
// 忽略解析错误
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
subscriber.next({
|
|
253
|
+
type: 'message_end',
|
|
254
|
+
message: {
|
|
255
|
+
id: this.generateId(),
|
|
256
|
+
role: MessageRole.ASSISTANT,
|
|
257
|
+
content: fullContent,
|
|
258
|
+
timestamp: new Date()
|
|
259
|
+
}
|
|
260
|
+
});
|
|
261
|
+
this.logger.debug('Stream event', { type: 'message_end', contentLength: fullContent.length });
|
|
262
|
+
subscriber.complete();
|
|
263
|
+
|
|
264
|
+
} catch (error) {
|
|
265
|
+
if ((error as any).name !== 'AbortError') {
|
|
266
|
+
const errorMessage = `GLM stream failed: ${error instanceof Error ? error.message : String(error)}`;
|
|
267
|
+
this.logger.error('Stream error', error);
|
|
268
|
+
subscriber.next({ type: 'error', error: errorMessage });
|
|
269
|
+
subscriber.error(new Error(errorMessage));
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
};
|
|
273
|
+
|
|
274
|
+
runStream();
|
|
275
|
+
|
|
276
|
+
return () => abortController.abort();
|
|
277
|
+
});
|
|
145
278
|
}
|
|
146
279
|
|
|
147
280
|
/**
|
|
@@ -213,39 +346,19 @@ export class GlmProviderService extends BaseAiProvider {
|
|
|
213
346
|
return this.parseAnalysisResponse(response.message.content);
|
|
214
347
|
}
|
|
215
348
|
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
try {
|
|
221
|
-
if (!this.client) {
|
|
222
|
-
return HealthStatus.UNHEALTHY;
|
|
223
|
-
}
|
|
224
|
-
|
|
225
|
-
// 简单的测试请求
|
|
226
|
-
const response = await this.client.post('/v1/messages', {
|
|
227
|
-
model: this.config?.model || 'glm-4.6',
|
|
228
|
-
max_tokens: 1,
|
|
229
|
-
messages: [
|
|
230
|
-
{
|
|
231
|
-
role: 'user',
|
|
232
|
-
content: [{ type: 'text', text: 'Hi' }]
|
|
233
|
-
}
|
|
234
|
-
]
|
|
235
|
-
});
|
|
236
|
-
|
|
237
|
-
if (response.status === 200) {
|
|
238
|
-
this.lastHealthCheck = { status: HealthStatus.HEALTHY, timestamp: new Date() };
|
|
239
|
-
return HealthStatus.HEALTHY;
|
|
240
|
-
}
|
|
349
|
+
protected async sendTestRequest(request: ChatRequest): Promise<ChatResponse> {
|
|
350
|
+
if (!this.client) {
|
|
351
|
+
throw new Error('GLM client not initialized');
|
|
352
|
+
}
|
|
241
353
|
|
|
242
|
-
|
|
354
|
+
const response = await this.client.post('/v1/messages', {
|
|
355
|
+
model: this.config?.model || 'glm-4.6',
|
|
356
|
+
max_tokens: request.maxTokens || 1,
|
|
357
|
+
messages: this.transformMessages(request.messages),
|
|
358
|
+
temperature: request.temperature || 0
|
|
359
|
+
});
|
|
243
360
|
|
|
244
|
-
|
|
245
|
-
this.logger.error('GLM health check failed', error);
|
|
246
|
-
this.lastHealthCheck = { status: HealthStatus.UNHEALTHY, timestamp: new Date() };
|
|
247
|
-
return HealthStatus.UNHEALTHY;
|
|
248
|
-
}
|
|
361
|
+
return this.transformChatResponse(response.data);
|
|
249
362
|
}
|
|
250
363
|
|
|
251
364
|
/**
|
|
@@ -2,7 +2,7 @@ import { Injectable } from '@angular/core';
|
|
|
2
2
|
import { Observable, Observer } from 'rxjs';
|
|
3
3
|
import { Anthropic } from '@anthropic-ai/sdk';
|
|
4
4
|
import { BaseAiProvider } from './base-provider.service';
|
|
5
|
-
import { ProviderCapability,
|
|
5
|
+
import { ProviderCapability, ValidationResult } from '../../types/provider.types';
|
|
6
6
|
import { ChatRequest, ChatResponse, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse, MessageRole, StreamEvent } from '../../types/ai.types';
|
|
7
7
|
import { LoggerService } from '../core/logger.service';
|
|
8
8
|
|
|
@@ -116,12 +116,16 @@ export class MinimaxProviderService extends BaseAiProvider {
|
|
|
116
116
|
chatStream(request: ChatRequest): Observable<StreamEvent> {
|
|
117
117
|
return new Observable<StreamEvent>((subscriber: Observer<StreamEvent>) => {
|
|
118
118
|
if (!this.client) {
|
|
119
|
-
|
|
119
|
+
const error = new Error('Minimax client not initialized');
|
|
120
|
+
subscriber.next({ type: 'error', error: error.message });
|
|
121
|
+
subscriber.error(error);
|
|
120
122
|
return;
|
|
121
123
|
}
|
|
122
124
|
|
|
123
125
|
this.logRequest(request);
|
|
124
126
|
|
|
127
|
+
const abortController = new AbortController();
|
|
128
|
+
|
|
125
129
|
const runStream = async () => {
|
|
126
130
|
try {
|
|
127
131
|
// 注意:SDK 类型定义可能不包含 tools,但 API 实际支持
|
|
@@ -141,6 +145,10 @@ export class MinimaxProviderService extends BaseAiProvider {
|
|
|
141
145
|
let currentToolInput = '';
|
|
142
146
|
|
|
143
147
|
for await (const event of stream) {
|
|
148
|
+
if (abortController.signal.aborted) break;
|
|
149
|
+
|
|
150
|
+
this.logger.debug('Stream event', { type: event.type });
|
|
151
|
+
|
|
144
152
|
if (event.type === 'content_block_delta') {
|
|
145
153
|
const delta = event.delta as any;
|
|
146
154
|
// 文本增量
|
|
@@ -162,7 +170,15 @@ export class MinimaxProviderService extends BaseAiProvider {
|
|
|
162
170
|
currentToolId = block.id;
|
|
163
171
|
currentToolName = block.name;
|
|
164
172
|
currentToolInput = '';
|
|
165
|
-
subscriber.next({
|
|
173
|
+
subscriber.next({
|
|
174
|
+
type: 'tool_use_start',
|
|
175
|
+
toolCall: {
|
|
176
|
+
id: currentToolId,
|
|
177
|
+
name: currentToolName,
|
|
178
|
+
input: {}
|
|
179
|
+
}
|
|
180
|
+
});
|
|
181
|
+
this.logger.debug('Stream event', { type: 'tool_use_start', name: currentToolName });
|
|
166
182
|
}
|
|
167
183
|
}
|
|
168
184
|
// 内容块结束
|
|
@@ -183,6 +199,7 @@ export class MinimaxProviderService extends BaseAiProvider {
|
|
|
183
199
|
input: parsedInput
|
|
184
200
|
}
|
|
185
201
|
});
|
|
202
|
+
this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolName });
|
|
186
203
|
// 重置
|
|
187
204
|
currentToolId = '';
|
|
188
205
|
currentToolName = '';
|
|
@@ -197,19 +214,22 @@ export class MinimaxProviderService extends BaseAiProvider {
|
|
|
197
214
|
type: 'message_end',
|
|
198
215
|
message: this.transformChatResponse(finalMessage).message
|
|
199
216
|
});
|
|
217
|
+
this.logger.debug('Stream event', { type: 'message_end' });
|
|
200
218
|
subscriber.complete();
|
|
201
219
|
} catch (error) {
|
|
202
|
-
|
|
203
|
-
|
|
220
|
+
if ((error as any).name !== 'AbortError') {
|
|
221
|
+
const errorMessage = `Minimax stream failed: ${error instanceof Error ? error.message : String(error)}`;
|
|
222
|
+
this.logError(error, { request });
|
|
223
|
+
subscriber.next({ type: 'error', error: errorMessage });
|
|
224
|
+
subscriber.error(new Error(errorMessage));
|
|
225
|
+
}
|
|
204
226
|
}
|
|
205
227
|
};
|
|
206
228
|
|
|
207
229
|
runStream();
|
|
208
230
|
|
|
209
231
|
// 返回取消订阅的处理函数
|
|
210
|
-
return () =>
|
|
211
|
-
this.logger.debug('Stream subscription cancelled');
|
|
212
|
-
};
|
|
232
|
+
return () => abortController.abort();
|
|
213
233
|
});
|
|
214
234
|
}
|
|
215
235
|
|
|
@@ -282,35 +302,19 @@ export class MinimaxProviderService extends BaseAiProvider {
|
|
|
282
302
|
return this.parseAnalysisResponse(response.message.content);
|
|
283
303
|
}
|
|
284
304
|
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
try {
|
|
290
|
-
if (!this.client) {
|
|
291
|
-
return HealthStatus.UNHEALTHY;
|
|
292
|
-
}
|
|
293
|
-
|
|
294
|
-
// 简单的测试请求
|
|
295
|
-
const _response = await this.client.messages.create({
|
|
296
|
-
model: this.config?.model || 'MiniMax-M2',
|
|
297
|
-
max_tokens: 1,
|
|
298
|
-
messages: [
|
|
299
|
-
{
|
|
300
|
-
role: 'user',
|
|
301
|
-
content: 'Hi'
|
|
302
|
-
}
|
|
303
|
-
]
|
|
304
|
-
});
|
|
305
|
+
protected async sendTestRequest(request: ChatRequest): Promise<ChatResponse> {
|
|
306
|
+
if (!this.client) {
|
|
307
|
+
throw new Error('Minimax client not initialized');
|
|
308
|
+
}
|
|
305
309
|
|
|
306
|
-
|
|
307
|
-
|
|
310
|
+
const response = await this.client.messages.create({
|
|
311
|
+
model: this.config?.model || 'MiniMax-M2',
|
|
312
|
+
max_tokens: request.maxTokens || 1,
|
|
313
|
+
messages: this.transformMessages(request.messages),
|
|
314
|
+
temperature: request.temperature || 0
|
|
315
|
+
});
|
|
308
316
|
|
|
309
|
-
|
|
310
|
-
this.logger.error('Minimax health check failed', error);
|
|
311
|
-
this.lastHealthCheck = { status: HealthStatus.UNHEALTHY, timestamp: new Date() };
|
|
312
|
-
return HealthStatus.UNHEALTHY;
|
|
313
|
-
}
|
|
317
|
+
return this.transformChatResponse(response);
|
|
314
318
|
}
|
|
315
319
|
|
|
316
320
|
/**
|
|
@@ -343,8 +347,9 @@ export class MinimaxProviderService extends BaseAiProvider {
|
|
|
343
347
|
*/
|
|
344
348
|
protected transformMessages(messages: any[]): any[] {
|
|
345
349
|
// 过滤掉系统消息(system role 不应该在 messages 数组中)
|
|
350
|
+
// 保留 user, assistant, tool 角色的消息
|
|
346
351
|
const filteredMessages = messages.filter(msg =>
|
|
347
|
-
msg.role === 'user' || msg.role === 'assistant'
|
|
352
|
+
msg.role === 'user' || msg.role === 'assistant' || msg.role === 'tool'
|
|
348
353
|
);
|
|
349
354
|
|
|
350
355
|
this.logger.info('Transforming messages', {
|
|
@@ -353,10 +358,20 @@ export class MinimaxProviderService extends BaseAiProvider {
|
|
|
353
358
|
roles: messages.map(m => m.role)
|
|
354
359
|
});
|
|
355
360
|
|
|
356
|
-
return filteredMessages.map(msg =>
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
361
|
+
return filteredMessages.map(msg => {
|
|
362
|
+
// tool 角色消息转换为 user 角色(Anthropic API 不直接支持 tool 角色)
|
|
363
|
+
// 但内容保留工具结果标识,让 AI 理解这是工具执行结果
|
|
364
|
+
if (msg.role === 'tool') {
|
|
365
|
+
return {
|
|
366
|
+
role: 'user',
|
|
367
|
+
content: String(msg.content || '')
|
|
368
|
+
};
|
|
369
|
+
}
|
|
370
|
+
return {
|
|
371
|
+
role: msg.role === 'user' ? 'user' : 'assistant',
|
|
372
|
+
content: String(msg.content || '')
|
|
373
|
+
};
|
|
374
|
+
});
|
|
360
375
|
}
|
|
361
376
|
|
|
362
377
|
/**
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { Injectable } from '@angular/core';
|
|
2
|
-
import { Observable, Observer
|
|
2
|
+
import { Observable, Observer } from 'rxjs';
|
|
3
3
|
import { BaseAiProvider } from './base-provider.service';
|
|
4
|
-
import { ProviderCapability,
|
|
4
|
+
import { ProviderCapability, ValidationResult } from '../../types/provider.types';
|
|
5
5
|
import { ChatRequest, ChatResponse, StreamEvent, MessageRole, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse } from '../../types/ai.types';
|
|
6
6
|
import { LoggerService } from '../core/logger.service';
|
|
7
7
|
|
|
@@ -74,7 +74,7 @@ export class OllamaProviderService extends BaseAiProvider {
|
|
|
74
74
|
}
|
|
75
75
|
|
|
76
76
|
/**
|
|
77
|
-
*
|
|
77
|
+
* 流式聊天功能 - 支持工具调用事件
|
|
78
78
|
*/
|
|
79
79
|
chatStream(request: ChatRequest): Observable<StreamEvent> {
|
|
80
80
|
return new Observable<StreamEvent>((subscriber: Observer<StreamEvent>) => {
|
|
@@ -108,9 +108,16 @@ export class OllamaProviderService extends BaseAiProvider {
|
|
|
108
108
|
throw new Error('No response body');
|
|
109
109
|
}
|
|
110
110
|
|
|
111
|
+
// 工具调用状态跟踪
|
|
112
|
+
let currentToolCallId = '';
|
|
113
|
+
let currentToolCallName = '';
|
|
114
|
+
let currentToolInput = '';
|
|
115
|
+
let currentToolIndex = -1;
|
|
111
116
|
let fullContent = '';
|
|
112
117
|
|
|
113
118
|
while (true) {
|
|
119
|
+
if (abortController.signal.aborted) break;
|
|
120
|
+
|
|
114
121
|
const { done, value } = await reader.read();
|
|
115
122
|
if (done) break;
|
|
116
123
|
|
|
@@ -123,8 +130,62 @@ export class OllamaProviderService extends BaseAiProvider {
|
|
|
123
130
|
|
|
124
131
|
try {
|
|
125
132
|
const parsed = JSON.parse(data);
|
|
126
|
-
const
|
|
127
|
-
|
|
133
|
+
const choice = parsed.choices?.[0];
|
|
134
|
+
|
|
135
|
+
this.logger.debug('Stream event', { type: 'delta', hasToolCalls: !!choice?.delta?.tool_calls });
|
|
136
|
+
|
|
137
|
+
// 处理工具调用块
|
|
138
|
+
if (choice?.delta?.tool_calls?.length > 0) {
|
|
139
|
+
for (const toolCall of choice.delta.tool_calls) {
|
|
140
|
+
const index = toolCall.index || 0;
|
|
141
|
+
|
|
142
|
+
// 新工具调用开始
|
|
143
|
+
if (currentToolIndex !== index) {
|
|
144
|
+
if (currentToolIndex >= 0) {
|
|
145
|
+
// 发送前一个工具调用的结束事件
|
|
146
|
+
let parsedInput = {};
|
|
147
|
+
try {
|
|
148
|
+
parsedInput = JSON.parse(currentToolInput || '{}');
|
|
149
|
+
} catch (e) {
|
|
150
|
+
// 使用原始输入
|
|
151
|
+
}
|
|
152
|
+
subscriber.next({
|
|
153
|
+
type: 'tool_use_end',
|
|
154
|
+
toolCall: {
|
|
155
|
+
id: currentToolCallId,
|
|
156
|
+
name: currentToolCallName,
|
|
157
|
+
input: parsedInput
|
|
158
|
+
}
|
|
159
|
+
});
|
|
160
|
+
this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolCallName });
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
currentToolIndex = index;
|
|
164
|
+
currentToolCallId = toolCall.id || `tool_${Date.now()}_${index}`;
|
|
165
|
+
currentToolCallName = toolCall.function?.name || '';
|
|
166
|
+
currentToolInput = toolCall.function?.arguments || '';
|
|
167
|
+
|
|
168
|
+
// 发送工具调用开始事件
|
|
169
|
+
subscriber.next({
|
|
170
|
+
type: 'tool_use_start',
|
|
171
|
+
toolCall: {
|
|
172
|
+
id: currentToolCallId,
|
|
173
|
+
name: currentToolCallName,
|
|
174
|
+
input: {}
|
|
175
|
+
}
|
|
176
|
+
});
|
|
177
|
+
this.logger.debug('Stream event', { type: 'tool_use_start', name: currentToolCallName });
|
|
178
|
+
} else {
|
|
179
|
+
// 继续累积参数
|
|
180
|
+
if (toolCall.function?.arguments) {
|
|
181
|
+
currentToolInput += toolCall.function.arguments;
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
// 处理文本增量
|
|
187
|
+
else if (choice?.delta?.content) {
|
|
188
|
+
const delta = choice.delta.content;
|
|
128
189
|
fullContent += delta;
|
|
129
190
|
subscriber.next({
|
|
130
191
|
type: 'text_delta',
|
|
@@ -137,6 +198,25 @@ export class OllamaProviderService extends BaseAiProvider {
|
|
|
137
198
|
}
|
|
138
199
|
}
|
|
139
200
|
|
|
201
|
+
// 发送最后一个工具调用的结束事件
|
|
202
|
+
if (currentToolIndex >= 0) {
|
|
203
|
+
let parsedInput = {};
|
|
204
|
+
try {
|
|
205
|
+
parsedInput = JSON.parse(currentToolInput || '{}');
|
|
206
|
+
} catch (e) {
|
|
207
|
+
// 使用原始输入
|
|
208
|
+
}
|
|
209
|
+
subscriber.next({
|
|
210
|
+
type: 'tool_use_end',
|
|
211
|
+
toolCall: {
|
|
212
|
+
id: currentToolCallId,
|
|
213
|
+
name: currentToolCallName,
|
|
214
|
+
input: parsedInput
|
|
215
|
+
}
|
|
216
|
+
});
|
|
217
|
+
this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolCallName });
|
|
218
|
+
}
|
|
219
|
+
|
|
140
220
|
subscriber.next({
|
|
141
221
|
type: 'message_end',
|
|
142
222
|
message: {
|
|
@@ -146,11 +226,14 @@ export class OllamaProviderService extends BaseAiProvider {
|
|
|
146
226
|
timestamp: new Date()
|
|
147
227
|
}
|
|
148
228
|
});
|
|
229
|
+
this.logger.debug('Stream event', { type: 'message_end', contentLength: fullContent.length });
|
|
149
230
|
subscriber.complete();
|
|
150
231
|
} catch (error) {
|
|
151
232
|
if ((error as any).name !== 'AbortError') {
|
|
233
|
+
const errorMessage = `Ollama stream failed: ${error instanceof Error ? error.message : String(error)}`;
|
|
152
234
|
this.logError(error, { request });
|
|
153
|
-
subscriber.
|
|
235
|
+
subscriber.next({ type: 'error', error: errorMessage });
|
|
236
|
+
subscriber.error(new Error(errorMessage));
|
|
154
237
|
}
|
|
155
238
|
}
|
|
156
239
|
};
|
|
@@ -162,30 +245,36 @@ export class OllamaProviderService extends BaseAiProvider {
|
|
|
162
245
|
});
|
|
163
246
|
}
|
|
164
247
|
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
});
|
|
177
|
-
|
|
178
|
-
clearTimeout(timeoutId);
|
|
248
|
+
protected async sendTestRequest(request: ChatRequest): Promise<ChatResponse> {
|
|
249
|
+
const response = await fetch(`${this.getBaseURL()}/chat/completions`, {
|
|
250
|
+
method: 'POST',
|
|
251
|
+
headers: { 'Content-Type': 'application/json' },
|
|
252
|
+
body: JSON.stringify({
|
|
253
|
+
model: this.config?.model || 'llama3.1',
|
|
254
|
+
messages: this.transformMessages(request.messages),
|
|
255
|
+
max_tokens: request.maxTokens || 1,
|
|
256
|
+
temperature: request.temperature || 0
|
|
257
|
+
})
|
|
258
|
+
});
|
|
179
259
|
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
return HealthStatus.HEALTHY;
|
|
183
|
-
}
|
|
184
|
-
return HealthStatus.UNHEALTHY;
|
|
185
|
-
} catch (error) {
|
|
186
|
-
this.logger.warn('Ollama health check failed', error);
|
|
187
|
-
return HealthStatus.UNHEALTHY;
|
|
260
|
+
if (!response.ok) {
|
|
261
|
+
throw new Error(`Ollama API error: ${response.status}`);
|
|
188
262
|
}
|
|
263
|
+
|
|
264
|
+
const data = await response.json();
|
|
265
|
+
return {
|
|
266
|
+
message: {
|
|
267
|
+
id: this.generateId(),
|
|
268
|
+
role: MessageRole.ASSISTANT,
|
|
269
|
+
content: data.choices[0]?.message?.content || '',
|
|
270
|
+
timestamp: new Date()
|
|
271
|
+
},
|
|
272
|
+
usage: data.usage ? {
|
|
273
|
+
promptTokens: data.usage.prompt_tokens,
|
|
274
|
+
completionTokens: data.usage.completion_tokens,
|
|
275
|
+
totalTokens: data.usage.total_tokens
|
|
276
|
+
} : undefined
|
|
277
|
+
};
|
|
189
278
|
}
|
|
190
279
|
|
|
191
280
|
/**
|