@huyooo/ai-chat-core 0.2.19 → 0.2.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/events.d.ts +452 -0
- package/dist/events.js +1 -0
- package/dist/index.d.ts +202 -550
- package/dist/index.js +1 -1
- package/package.json +23 -4
- package/src/agent.ts +399 -0
- package/src/constants.ts +125 -0
- package/src/events.ts +797 -0
- package/src/index.ts +309 -0
- package/src/internal/update-plan.ts +2 -0
- package/src/internal/web-search.ts +78 -0
- package/src/mcp/client-manager.ts +301 -0
- package/src/mcp/index.ts +2 -0
- package/src/mcp/types.ts +43 -0
- package/src/providers/context-compressor.ts +149 -0
- package/src/providers/index.ts +120 -0
- package/src/providers/model-registry.ts +320 -0
- package/src/providers/orchestrator.ts +761 -0
- package/src/providers/protocols/anthropic.ts +406 -0
- package/src/providers/protocols/ark.ts +362 -0
- package/src/providers/protocols/deepseek.ts +344 -0
- package/src/providers/protocols/error-utils.ts +74 -0
- package/src/providers/protocols/gemini.ts +350 -0
- package/src/providers/protocols/index.ts +36 -0
- package/src/providers/protocols/openai.ts +420 -0
- package/src/providers/protocols/qwen.ts +326 -0
- package/src/providers/protocols/types.ts +189 -0
- package/src/providers/types.ts +272 -0
- package/src/providers/unified-adapter.ts +367 -0
- package/src/router.ts +72 -0
- package/src/test-utils/mock-sse.ts +32 -0
- package/src/tools.ts +162 -0
- package/src/types.ts +531 -0
- package/src/utils.ts +86 -0
|
@@ -0,0 +1,420 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Protocol(通过 OpenRouter 访问 OpenAI/GPT 模型)
|
|
3
|
+
*
|
|
4
|
+
* 特点:
|
|
5
|
+
* - GPT-5.x 支持 reasoning(思考模式)
|
|
6
|
+
* - 支持工具调用
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import type {
|
|
10
|
+
Protocol,
|
|
11
|
+
ProtocolConfig,
|
|
12
|
+
ProtocolMessage,
|
|
13
|
+
ProtocolToolDefinition,
|
|
14
|
+
ProtocolRequestOptions,
|
|
15
|
+
RawEvent,
|
|
16
|
+
RawToolCall,
|
|
17
|
+
RawSearchResult,
|
|
18
|
+
} from './types';
|
|
19
|
+
import { DEFAULT_OPENROUTER_URL } from '../../constants';
|
|
20
|
+
import { DebugLogger } from '../../utils';
|
|
21
|
+
import { friendlyHttpError } from './error-utils';
|
|
22
|
+
|
|
23
|
+
const logger = DebugLogger.module('OpenAIProtocol');
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* OpenAI Protocol 实现(通过 OpenRouter)
|
|
27
|
+
*/
|
|
28
|
+
export class OpenAIProtocol implements Protocol {
|
|
29
|
+
readonly name = 'openai';
|
|
30
|
+
|
|
31
|
+
private apiKey: string;
|
|
32
|
+
private apiUrl: string;
|
|
33
|
+
|
|
34
|
+
constructor(config: ProtocolConfig) {
|
|
35
|
+
this.apiKey = config.apiKey;
|
|
36
|
+
this.apiUrl = config.apiUrl ?? DEFAULT_OPENROUTER_URL;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
async *stream(
|
|
40
|
+
messages: ProtocolMessage[],
|
|
41
|
+
tools: ProtocolToolDefinition[],
|
|
42
|
+
options: ProtocolRequestOptions
|
|
43
|
+
): AsyncGenerator<RawEvent> {
|
|
44
|
+
const requestBody = this.buildRequestBody(messages, tools, options);
|
|
45
|
+
const url = `${this.apiUrl}/responses`;
|
|
46
|
+
|
|
47
|
+
logger.debug('发送 OpenAI 请求', {
|
|
48
|
+
url,
|
|
49
|
+
model: options.model,
|
|
50
|
+
toolsCount: tools.length,
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
const response = await fetch(url, {
|
|
54
|
+
method: 'POST',
|
|
55
|
+
headers: {
|
|
56
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
57
|
+
'Content-Type': 'application/json',
|
|
58
|
+
'HTTP-Referer': 'https://ai-chat.local',
|
|
59
|
+
'X-Title': 'AI Chat',
|
|
60
|
+
},
|
|
61
|
+
body: JSON.stringify(requestBody),
|
|
62
|
+
signal: options.signal,
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
if (!response.ok) {
|
|
66
|
+
const errorText = await response.text();
|
|
67
|
+
logger.error('OpenAI API 错误', { status: response.status, body: errorText.slice(0, 500) });
|
|
68
|
+
yield { type: 'error', error: friendlyHttpError(response.status, errorText, 'OpenAI') };
|
|
69
|
+
return;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
const reader = response.body?.getReader();
|
|
73
|
+
if (!reader) {
|
|
74
|
+
yield { type: 'error', error: '无法获取响应流' };
|
|
75
|
+
return;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
yield* this.parseSSE(reader, options.enableThinking);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* 构建请求体(OpenAI/GPT 专用)
|
|
83
|
+
*/
|
|
84
|
+
private buildRequestBody(
|
|
85
|
+
messages: ProtocolMessage[],
|
|
86
|
+
tools: ProtocolToolDefinition[],
|
|
87
|
+
options: ProtocolRequestOptions
|
|
88
|
+
): Record<string, unknown> {
|
|
89
|
+
const input = this.convertMessages(messages);
|
|
90
|
+
|
|
91
|
+
const body: Record<string, unknown> = {
|
|
92
|
+
model: options.model,
|
|
93
|
+
stream: true,
|
|
94
|
+
input,
|
|
95
|
+
// OpenRouter Responses API 的 include 仅支持 file_search_call.results 等,不含 usage;
|
|
96
|
+
// usage 由服务端在 response.done / response.completed 中自动返回,无需传 include。
|
|
97
|
+
};
|
|
98
|
+
|
|
99
|
+
// GPT-5.x 支持 reasoning
|
|
100
|
+
if (options.enableThinking) {
|
|
101
|
+
body.reasoning = { effort: 'high' };
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// 构建工具列表
|
|
105
|
+
if (tools.length > 0) {
|
|
106
|
+
body.tools = tools.map(t => ({
|
|
107
|
+
type: 'function',
|
|
108
|
+
name: t.name,
|
|
109
|
+
description: t.description,
|
|
110
|
+
parameters: t.parameters,
|
|
111
|
+
}));
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
return body;
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
private convertMessages(messages: ProtocolMessage[]): unknown[] {
|
|
118
|
+
const input: unknown[] = [];
|
|
119
|
+
|
|
120
|
+
for (const msg of messages) {
|
|
121
|
+
switch (msg.role) {
|
|
122
|
+
case 'system':
|
|
123
|
+
input.push({ role: 'system', content: msg.content });
|
|
124
|
+
break;
|
|
125
|
+
|
|
126
|
+
case 'user': {
|
|
127
|
+
// 当只有图片没有文字时提供默认提示
|
|
128
|
+
const textContent = msg.content || (msg.images?.length ? '请分析这张图片' : '');
|
|
129
|
+
const content: unknown[] = [{ type: 'input_text', text: textContent }];
|
|
130
|
+
if (msg.images?.length) {
|
|
131
|
+
for (const img of msg.images) {
|
|
132
|
+
content.push({
|
|
133
|
+
type: 'input_image',
|
|
134
|
+
image_url: img.startsWith('data:') ? img : `data:image/jpeg;base64,${img}`,
|
|
135
|
+
detail: 'auto',
|
|
136
|
+
});
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
input.push({ role: 'user', content });
|
|
140
|
+
break;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
case 'assistant':
|
|
144
|
+
if (msg.toolCalls?.length) {
|
|
145
|
+
for (const tc of msg.toolCalls) {
|
|
146
|
+
input.push({
|
|
147
|
+
type: 'function_call',
|
|
148
|
+
call_id: tc.id,
|
|
149
|
+
name: tc.name,
|
|
150
|
+
arguments: tc.arguments,
|
|
151
|
+
});
|
|
152
|
+
}
|
|
153
|
+
} else {
|
|
154
|
+
input.push({
|
|
155
|
+
role: 'developer',
|
|
156
|
+
content: `[上一轮AI回复]: ${msg.content}`,
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
break;
|
|
160
|
+
|
|
161
|
+
case 'tool':
|
|
162
|
+
input.push({
|
|
163
|
+
type: 'function_call_output',
|
|
164
|
+
call_id: msg.toolCallId,
|
|
165
|
+
output: msg.content,
|
|
166
|
+
});
|
|
167
|
+
break;
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
return input;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
/**
|
|
175
|
+
* 解析 SSE 流(OpenAI/GPT 专用)
|
|
176
|
+
*/
|
|
177
|
+
private async *parseSSE(
|
|
178
|
+
reader: ReadableStreamDefaultReader<Uint8Array>,
|
|
179
|
+
enableThinking: boolean
|
|
180
|
+
): AsyncGenerator<RawEvent> {
|
|
181
|
+
const decoder = new TextDecoder();
|
|
182
|
+
let buffer = '';
|
|
183
|
+
const pendingToolCalls = new Map<string, RawToolCall>();
|
|
184
|
+
let currentFunctionCallId: string | null = null;
|
|
185
|
+
const searchResults: RawSearchResult[] = [];
|
|
186
|
+
let streamDone = false;
|
|
187
|
+
let thinkingDone = false;
|
|
188
|
+
let textStarted = false;
|
|
189
|
+
// Token 使用统计(从 response.completed 或最后一个 chunk 提取)
|
|
190
|
+
let lastUsage: import('./types').RawTokenUsage | undefined;
|
|
191
|
+
|
|
192
|
+
while (true) {
|
|
193
|
+
const { done, value } = await reader.read();
|
|
194
|
+
if (done) break;
|
|
195
|
+
|
|
196
|
+
buffer += decoder.decode(value, { stream: true });
|
|
197
|
+
const lines = buffer.split('\n');
|
|
198
|
+
buffer = lines.pop() || '';
|
|
199
|
+
|
|
200
|
+
for (const line of lines) {
|
|
201
|
+
if (streamDone) continue;
|
|
202
|
+
if (!line.startsWith('data:')) continue;
|
|
203
|
+
|
|
204
|
+
const data = line.slice(5).trim();
|
|
205
|
+
if (data === '[DONE]') {
|
|
206
|
+
streamDone = true;
|
|
207
|
+
|
|
208
|
+
if (pendingToolCalls.size > 0) {
|
|
209
|
+
for (const tc of pendingToolCalls.values()) {
|
|
210
|
+
yield { type: 'tool_call_done', toolCall: tc };
|
|
211
|
+
}
|
|
212
|
+
yield { type: 'done', finishReason: 'tool_calls', usage: lastUsage };
|
|
213
|
+
} else {
|
|
214
|
+
yield { type: 'done', finishReason: 'stop', usage: lastUsage };
|
|
215
|
+
}
|
|
216
|
+
return;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
try {
|
|
220
|
+
const event = JSON.parse(data);
|
|
221
|
+
logger.debug('SSE 事件', { type: event.type, event: JSON.stringify(event).slice(0, 200) });
|
|
222
|
+
|
|
223
|
+
// 通用 usage 提取(兼容 response.done / response.completed 及任意带 usage 的事件,取最新以保留完整用量)
|
|
224
|
+
const eventUsage = event.response?.usage || event.usage;
|
|
225
|
+
if (eventUsage) {
|
|
226
|
+
lastUsage = {
|
|
227
|
+
promptTokens: eventUsage.input_tokens ?? eventUsage.prompt_tokens ?? 0,
|
|
228
|
+
completionTokens: eventUsage.output_tokens ?? eventUsage.completion_tokens ?? 0,
|
|
229
|
+
totalTokens: (eventUsage.input_tokens ?? eventUsage.prompt_tokens ?? 0) + (eventUsage.output_tokens ?? eventUsage.completion_tokens ?? 0),
|
|
230
|
+
reasoningTokens: eventUsage.output_tokens_details?.reasoning_tokens ?? 0,
|
|
231
|
+
cachedTokens: eventUsage.input_tokens_details?.cached_tokens ?? eventUsage.prompt_tokens_details?.cached_tokens ?? 0,
|
|
232
|
+
};
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
switch (event.type) {
|
|
236
|
+
// ========== Responses API 格式 ==========
|
|
237
|
+
case 'response.output_item.added': {
|
|
238
|
+
const item = event.item;
|
|
239
|
+
// 兼容 call_id 和 id 两种格式
|
|
240
|
+
const callId = item?.call_id || item?.id;
|
|
241
|
+
if (item?.type === 'function_call' && callId) {
|
|
242
|
+
currentFunctionCallId = callId;
|
|
243
|
+
// 兼容多种参数格式:arguments 可能是字符串或对象,也可能叫 input
|
|
244
|
+
let args = item.arguments || item.input || '';
|
|
245
|
+
if (typeof args === 'object') {
|
|
246
|
+
args = JSON.stringify(args);
|
|
247
|
+
}
|
|
248
|
+
pendingToolCalls.set(callId, {
|
|
249
|
+
id: callId,
|
|
250
|
+
name: item.name || item.function?.name || '',
|
|
251
|
+
arguments: args,
|
|
252
|
+
});
|
|
253
|
+
logger.debug('工具调用开始', { id: callId, name: item.name, args: args.slice(0, 100) });
|
|
254
|
+
yield { type: 'tool_call_start', toolCall: { id: callId, name: item.name || item.function?.name || '' } };
|
|
255
|
+
}
|
|
256
|
+
break;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
case 'response.function_call_arguments.delta': {
|
|
260
|
+
const delta = event.delta as string | undefined;
|
|
261
|
+
if (currentFunctionCallId && delta) {
|
|
262
|
+
const call = pendingToolCalls.get(currentFunctionCallId);
|
|
263
|
+
if (call) {
|
|
264
|
+
call.arguments += delta;
|
|
265
|
+
yield { type: 'tool_call_delta', toolCall: { id: currentFunctionCallId, arguments: delta } };
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
break;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
case 'response.function_call_arguments.done': {
|
|
272
|
+
// 兼容 item_id 和 call_id
|
|
273
|
+
const callId = event.item_id || event.call_id || currentFunctionCallId;
|
|
274
|
+
if (callId) {
|
|
275
|
+
const existing = pendingToolCalls.get(callId);
|
|
276
|
+
if (existing) {
|
|
277
|
+
// 如果 event 中有完整的 arguments,使用它
|
|
278
|
+
if (event.arguments) {
|
|
279
|
+
existing.arguments = event.arguments;
|
|
280
|
+
}
|
|
281
|
+
try {
|
|
282
|
+
JSON.parse(existing.arguments);
|
|
283
|
+
} catch {
|
|
284
|
+
logger.warn('工具参数解析失败,使用空对象', { args: existing.arguments.slice(0, 100) });
|
|
285
|
+
existing.arguments = '{}';
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
break;
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
case 'response.output_item.done': {
|
|
293
|
+
const item = event.item;
|
|
294
|
+
const callId = item?.call_id || item?.id;
|
|
295
|
+
if (item?.type === 'function_call' && callId) {
|
|
296
|
+
const existing = pendingToolCalls.get(callId);
|
|
297
|
+
// 兼容多种参数格式
|
|
298
|
+
let args = item.arguments || item.input || existing?.arguments || '{}';
|
|
299
|
+
if (typeof args === 'object') {
|
|
300
|
+
args = JSON.stringify(args);
|
|
301
|
+
}
|
|
302
|
+
pendingToolCalls.set(callId, {
|
|
303
|
+
id: callId,
|
|
304
|
+
name: item.name || item.function?.name || existing?.name || '',
|
|
305
|
+
arguments: args,
|
|
306
|
+
});
|
|
307
|
+
logger.debug('工具调用完成', { id: callId, name: item.name, args: args.slice(0, 100) });
|
|
308
|
+
}
|
|
309
|
+
break;
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
case 'response.output_text.annotation.added': {
|
|
313
|
+
const annotation = event.annotation;
|
|
314
|
+
if (annotation?.url) {
|
|
315
|
+
const exists = searchResults.some(r => r.url === annotation.url);
|
|
316
|
+
if (!exists) {
|
|
317
|
+
searchResults.push({
|
|
318
|
+
title: annotation.title || annotation.text || '',
|
|
319
|
+
url: annotation.url,
|
|
320
|
+
snippet: annotation.summary || annotation.snippet || '',
|
|
321
|
+
});
|
|
322
|
+
yield { type: 'search_result', searchResults: [...searchResults] };
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
break;
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
case 'response.output_text.delta':
|
|
329
|
+
if (event.delta) {
|
|
330
|
+
// 首次收到 text 时,结束 thinking
|
|
331
|
+
if (!textStarted) {
|
|
332
|
+
textStarted = true;
|
|
333
|
+
if (enableThinking && !thinkingDone) {
|
|
334
|
+
thinkingDone = true;
|
|
335
|
+
yield { type: 'thinking_done' };
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
yield { type: 'text_delta', delta: event.delta };
|
|
339
|
+
}
|
|
340
|
+
break;
|
|
341
|
+
|
|
342
|
+
// GPT-5.x 支持 reasoning
|
|
343
|
+
case 'response.reasoning_summary_text.delta':
|
|
344
|
+
if (enableThinking && event.delta && !thinkingDone) {
|
|
345
|
+
yield { type: 'thinking_delta', delta: event.delta };
|
|
346
|
+
}
|
|
347
|
+
break;
|
|
348
|
+
|
|
349
|
+
// response.done(OpenRouter)/ response.completed(含工具参数 + Token 使用统计)
|
|
350
|
+
case 'response.done':
|
|
351
|
+
case 'response.completed': {
|
|
352
|
+
const response = event.response;
|
|
353
|
+
if (response?.output?.length) {
|
|
354
|
+
for (const item of response.output) {
|
|
355
|
+
if (item.type === 'function_call' && item.call_id) {
|
|
356
|
+
const existing = pendingToolCalls.get(item.call_id);
|
|
357
|
+
// 从 response.completed 中获取完整参数
|
|
358
|
+
let args = item.arguments || existing?.arguments || '{}';
|
|
359
|
+
if (typeof args === 'object') {
|
|
360
|
+
args = JSON.stringify(args);
|
|
361
|
+
}
|
|
362
|
+
pendingToolCalls.set(item.call_id, {
|
|
363
|
+
id: item.call_id,
|
|
364
|
+
name: item.name || existing?.name || '',
|
|
365
|
+
arguments: args,
|
|
366
|
+
});
|
|
367
|
+
logger.debug('response.completed: 更新工具参数', { id: item.call_id, args: args.slice(0, 100) });
|
|
368
|
+
}
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
// 提取 Token 使用统计
|
|
373
|
+
const responseUsage = response?.usage;
|
|
374
|
+
const usage = responseUsage ? {
|
|
375
|
+
promptTokens: responseUsage.input_tokens || responseUsage.prompt_tokens || 0,
|
|
376
|
+
completionTokens: responseUsage.output_tokens || responseUsage.completion_tokens || 0,
|
|
377
|
+
totalTokens: (responseUsage.input_tokens || responseUsage.prompt_tokens || 0) + (responseUsage.output_tokens || responseUsage.completion_tokens || 0),
|
|
378
|
+
reasoningTokens: responseUsage.output_tokens_details?.reasoning_tokens || 0,
|
|
379
|
+
cachedTokens: responseUsage.input_tokens_details?.cached_tokens || responseUsage.prompt_tokens_details?.cached_tokens || 0,
|
|
380
|
+
} : undefined;
|
|
381
|
+
lastUsage = usage;
|
|
382
|
+
|
|
383
|
+
streamDone = true;
|
|
384
|
+
if (pendingToolCalls.size > 0) {
|
|
385
|
+
for (const tc of pendingToolCalls.values()) {
|
|
386
|
+
logger.debug('response.completed: 发出 tool_call_done', tc);
|
|
387
|
+
yield { type: 'tool_call_done', toolCall: tc };
|
|
388
|
+
}
|
|
389
|
+
yield { type: 'done', finishReason: 'tool_calls', usage };
|
|
390
|
+
} else {
|
|
391
|
+
yield { type: 'done', finishReason: 'stop', usage };
|
|
392
|
+
}
|
|
393
|
+
return;
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
} catch (e) {
|
|
397
|
+
logger.warn('SSE 解析错误', { line: line.slice(0, 100), error: String(e) });
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
// 兜底
|
|
403
|
+
if (!streamDone) {
|
|
404
|
+
logger.debug('流结束,执行兜底逻辑', { pendingToolCalls: pendingToolCalls.size });
|
|
405
|
+
if (pendingToolCalls.size > 0) {
|
|
406
|
+
for (const tc of pendingToolCalls.values()) {
|
|
407
|
+
yield { type: 'tool_call_done', toolCall: tc };
|
|
408
|
+
}
|
|
409
|
+
yield { type: 'done', finishReason: 'tool_calls', usage: lastUsage };
|
|
410
|
+
} else {
|
|
411
|
+
yield { type: 'done', finishReason: 'stop', usage: lastUsage };
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
export function createOpenAIProtocol(config: ProtocolConfig): OpenAIProtocol {
|
|
418
|
+
return new OpenAIProtocol(config);
|
|
419
|
+
}
|
|
420
|
+
|