@huyooo/ai-chat-core 0.2.19 → 0.2.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,362 @@
1
+ /**
2
+ * ARK Protocol(火山引擎 Responses API)
3
+ *
4
+ * 只负责:
5
+ * - HTTP 请求发送
6
+ * - SSE 流解析
7
+ * - 原始事件产出
8
+ */
9
+
10
+ import type {
11
+ Protocol,
12
+ ProtocolConfig,
13
+ ProtocolMessage,
14
+ ProtocolToolDefinition,
15
+ ProtocolRequestOptions,
16
+ RawEvent,
17
+ RawToolCall,
18
+ RawSearchResult,
19
+ } from './types';
20
+ import { DEFAULT_ARK_URL } from '../../constants';
21
+ import { DebugLogger } from '../../utils';
22
+ import { friendlyHttpError } from './error-utils';
23
+ import { WEB_SEARCH_TOOL_NAME } from '../../internal/web-search';
24
+
25
+ const logger = DebugLogger.module('ArkProtocol');
26
+
27
+ /**
28
+ * ARK Protocol 实现
29
+ */
30
+ export class ArkProtocol implements Protocol {
31
+ readonly name = 'ark';
32
+
33
+ private apiKey: string;
34
+ private apiUrl: string;
35
+
36
+ constructor(config: ProtocolConfig) {
37
+ this.apiKey = config.apiKey;
38
+ this.apiUrl = config.apiUrl ?? DEFAULT_ARK_URL;
39
+ }
40
+
41
+ /**
42
+ * 发送请求并返回原始事件流
43
+ */
44
+ async *stream(
45
+ messages: ProtocolMessage[],
46
+ tools: ProtocolToolDefinition[],
47
+ options: ProtocolRequestOptions
48
+ ): AsyncGenerator<RawEvent> {
49
+ // 构建请求体
50
+ const requestBody = this.buildRequestBody(messages, tools, options);
51
+
52
+ logger.debug('发送 ARK 请求', {
53
+ url: `${this.apiUrl}/responses`,
54
+ model: options.model,
55
+ enableSearch: options.enableSearch,
56
+ enableThinking: options.enableThinking,
57
+ toolsCount: tools.length,
58
+ });
59
+
60
+ // 发送请求
61
+ const response = await fetch(`${this.apiUrl}/responses`, {
62
+ method: 'POST',
63
+ headers: {
64
+ 'Authorization': `Bearer ${this.apiKey}`,
65
+ 'Content-Type': 'application/json',
66
+ },
67
+ body: JSON.stringify(requestBody),
68
+ signal: options.signal,
69
+ });
70
+
71
+ if (!response.ok) {
72
+ const errorText = await response.text();
73
+ logger.error('ARK API 错误', { status: response.status, body: errorText.slice(0, 500) });
74
+ yield { type: 'error', error: friendlyHttpError(response.status, errorText, 'ARK') };
75
+ return;
76
+ }
77
+
78
+ const reader = response.body?.getReader();
79
+ if (!reader) {
80
+ yield { type: 'error', error: '无法获取响应流' };
81
+ return;
82
+ }
83
+
84
+ // 解析 SSE 流
85
+ yield* this.parseSSE(reader);
86
+ }
87
+
88
+ /**
89
+ * 构建请求体
90
+ */
91
+ private buildRequestBody(
92
+ messages: ProtocolMessage[],
93
+ tools: ProtocolToolDefinition[],
94
+ options: ProtocolRequestOptions
95
+ ): Record<string, unknown> {
96
+ const input = this.convertMessages(messages);
97
+
98
+ const body: Record<string, unknown> = {
99
+ model: options.model,
100
+ stream: true,
101
+ max_output_tokens: options.familyConfig.defaultMaxTokens ?? 32768,
102
+ input,
103
+ // ARK 不支持 include: ['usage'],通过 response.completed 事件的 usage 字段获取
104
+ };
105
+
106
+ // 构建工具列表(若已包含 web_search_ai 则不再加原生 web_search,统一走 Tavily)
107
+ const apiTools: unknown[] = [];
108
+ const hasWebSearchTool = tools.some(t => t.name === WEB_SEARCH_TOOL_NAME);
109
+
110
+ if (options.enableSearch && !hasWebSearchTool) {
111
+ apiTools.push({ type: 'web_search', max_keyword: 5, limit: 20 });
112
+ }
113
+
114
+ for (const t of tools) {
115
+ apiTools.push({
116
+ type: 'function',
117
+ name: t.name,
118
+ description: t.description,
119
+ parameters: t.parameters,
120
+ });
121
+ }
122
+
123
+ if (apiTools.length > 0) {
124
+ body.tools = apiTools;
125
+ }
126
+
127
+ // 启用 thinking
128
+ if (options.enableThinking) {
129
+ body.thinking = { type: 'enabled' };
130
+ }
131
+
132
+ return body;
133
+ }
134
+
135
+ /**
136
+ * 转换消息格式
137
+ */
138
+ private convertMessages(messages: ProtocolMessage[]): unknown[] {
139
+ const input: unknown[] = [];
140
+
141
+ for (const msg of messages) {
142
+ switch (msg.role) {
143
+ case 'system':
144
+ input.push({ role: 'system', content: msg.content });
145
+ break;
146
+
147
+ case 'user': {
148
+ // ARK API 要求 input_text 必须有内容,当只有图片时提供默认提示
149
+ const textContent = msg.content || (msg.images?.length ? '请分析这张图片' : '');
150
+ const content: unknown[] = [{ type: 'input_text', text: textContent }];
151
+ if (msg.images?.length) {
152
+ for (const img of msg.images) {
153
+ content.push({
154
+ type: 'input_image',
155
+ image_url: img.startsWith('data:') ? img : `data:image/jpeg;base64,${img}`,
156
+ });
157
+ }
158
+ }
159
+ input.push({ role: 'user', content });
160
+ break;
161
+ }
162
+
163
+ case 'assistant':
164
+ if (msg.toolCalls?.length) {
165
+ // 如果 assistant 同时有文本和工具调用,先保留文本内容
166
+ // 避免模型丢失推理上下文导致重复调用工具
167
+ if (msg.content) {
168
+ input.push({
169
+ type: 'message',
170
+ role: 'assistant',
171
+ content: [{ type: 'output_text', text: msg.content }],
172
+ });
173
+ }
174
+ for (const tc of msg.toolCalls) {
175
+ input.push({
176
+ type: 'function_call',
177
+ call_id: tc.id,
178
+ name: tc.name,
179
+ arguments: tc.arguments,
180
+ });
181
+ }
182
+ } else {
183
+ input.push({
184
+ role: 'developer',
185
+ content: `[上一轮AI回复]: ${msg.content}`,
186
+ });
187
+ }
188
+ break;
189
+
190
+ case 'tool':
191
+ input.push({
192
+ type: 'function_call_output',
193
+ call_id: msg.toolCallId,
194
+ output: msg.content,
195
+ });
196
+ break;
197
+ }
198
+ }
199
+
200
+ return input;
201
+ }
202
+
203
+ /**
204
+ * 解析 SSE 流
205
+ */
206
+ private async *parseSSE(reader: ReadableStreamDefaultReader<Uint8Array>): AsyncGenerator<RawEvent> {
207
+ const decoder = new TextDecoder();
208
+ let buffer = '';
209
+ const pendingToolCalls = new Map<string, RawToolCall>();
210
+ let currentFunctionCallId: string | null = null;
211
+ const searchResults: RawSearchResult[] = [];
212
+ let streamDone = false;
213
+ let thinkingDone = false;
214
+ let textStarted = false;
215
+ let lastUsage: import('./types').RawTokenUsage | undefined;
216
+
217
+ while (true) {
218
+ const { done, value } = await reader.read();
219
+ if (done) break;
220
+
221
+ buffer += decoder.decode(value, { stream: true });
222
+ const lines = buffer.split('\n');
223
+ buffer = lines.pop() || '';
224
+
225
+ for (const line of lines) {
226
+ if (streamDone) continue;
227
+ if (!line.startsWith('data:')) continue;
228
+
229
+ const data = line.slice(5).trim();
230
+ if (data === '[DONE]') {
231
+ streamDone = true;
232
+
233
+ // 输出工具调用
234
+ if (pendingToolCalls.size > 0) {
235
+ for (const tc of pendingToolCalls.values()) {
236
+ yield { type: 'tool_call_done', toolCall: tc };
237
+ }
238
+ yield { type: 'done', finishReason: 'tool_calls', usage: lastUsage };
239
+ } else {
240
+ yield { type: 'done', finishReason: 'stop', usage: lastUsage };
241
+ }
242
+ return;
243
+ }
244
+
245
+ try {
246
+ const event = JSON.parse(data);
247
+
248
+ // 提取 Token 使用统计(response.completed 或任意事件的 usage)
249
+ const eventUsage = event.response?.usage || event.usage;
250
+ if (eventUsage) {
251
+ lastUsage = {
252
+ promptTokens: eventUsage.input_tokens || eventUsage.prompt_tokens || 0,
253
+ completionTokens: eventUsage.output_tokens || eventUsage.completion_tokens || 0,
254
+ totalTokens: (eventUsage.input_tokens || eventUsage.prompt_tokens || 0) + (eventUsage.output_tokens || eventUsage.completion_tokens || 0),
255
+ reasoningTokens: eventUsage.output_tokens_details?.reasoning_tokens || 0,
256
+ cachedTokens: eventUsage.input_tokens_details?.cached_tokens || 0,
257
+ };
258
+ }
259
+
260
+ switch (event.type) {
261
+ case 'response.output_item.added': {
262
+ const item = event.item;
263
+ if (item?.type === 'function_call' && item.call_id) {
264
+ currentFunctionCallId = item.call_id;
265
+ pendingToolCalls.set(item.call_id, {
266
+ id: item.call_id,
267
+ name: item.name || '',
268
+ arguments: item.arguments || '',
269
+ });
270
+ yield { type: 'tool_call_start', toolCall: { id: item.call_id, name: item.name || '' } };
271
+ }
272
+ break;
273
+ }
274
+
275
+ case 'response.function_call_arguments.delta': {
276
+ if (currentFunctionCallId) {
277
+ const call = pendingToolCalls.get(currentFunctionCallId);
278
+ if (call) {
279
+ call.arguments += event.delta || '';
280
+ yield { type: 'tool_call_delta', toolCall: { id: currentFunctionCallId, arguments: event.delta || '' } };
281
+ }
282
+ }
283
+ break;
284
+ }
285
+
286
+ case 'response.function_call_arguments.done':
287
+ case 'response.output_item.done': {
288
+ const item = event.item;
289
+ if (item?.type === 'function_call' && item.call_id) {
290
+ const existing = pendingToolCalls.get(item.call_id);
291
+ pendingToolCalls.set(item.call_id, {
292
+ id: item.call_id,
293
+ name: item.name || existing?.name || '',
294
+ arguments: item.arguments || existing?.arguments || '{}',
295
+ });
296
+ }
297
+ break;
298
+ }
299
+
300
+ case 'response.output_text.annotation.added': {
301
+ const annotation = event.annotation;
302
+ if (annotation?.url) {
303
+ const exists = searchResults.some(r => r.url === annotation.url);
304
+ if (!exists) {
305
+ searchResults.push({
306
+ title: annotation.title || annotation.text || '',
307
+ url: annotation.url,
308
+ snippet: annotation.summary || annotation.snippet || '',
309
+ });
310
+ yield { type: 'search_result', searchResults: [...searchResults] };
311
+ }
312
+ }
313
+ break;
314
+ }
315
+
316
+ case 'response.output_text.delta':
317
+ if (event.delta) {
318
+ // 首次收到 text 时,结束 thinking
319
+ if (!textStarted) {
320
+ textStarted = true;
321
+ if (!thinkingDone) {
322
+ thinkingDone = true;
323
+ yield { type: 'thinking_done' };
324
+ }
325
+ }
326
+ yield { type: 'text_delta', delta: event.delta };
327
+ }
328
+ break;
329
+
330
+ case 'response.reasoning_summary_text.delta':
331
+ if (event.delta && !thinkingDone) {
332
+ yield { type: 'thinking_delta', delta: event.delta };
333
+ }
334
+ break;
335
+ }
336
+ } catch {
337
+ // 忽略解析错误
338
+ }
339
+ }
340
+ }
341
+
342
+ // 兜底处理
343
+ if (!streamDone) {
344
+ if (pendingToolCalls.size > 0) {
345
+ for (const tc of pendingToolCalls.values()) {
346
+ yield { type: 'tool_call_done', toolCall: tc };
347
+ }
348
+ yield { type: 'done', finishReason: 'tool_calls', usage: lastUsage };
349
+ } else {
350
+ yield { type: 'done', finishReason: 'stop', usage: lastUsage };
351
+ }
352
+ }
353
+ }
354
+ }
355
+
356
+ /**
357
+ * 创建 ARK Protocol
358
+ */
359
+ export function createArkProtocol(config: ProtocolConfig): ArkProtocol {
360
+ return new ArkProtocol(config);
361
+ }
362
+
@@ -0,0 +1,344 @@
1
+ /**
2
+ * DeepSeek Protocol(通过火山引擎 ARK API 访问)
3
+ *
4
+ * 特点:
5
+ * - 支持 reasoning(深度思考)
6
+ * - 支持 web_search 原生搜索
7
+ * - 与豆包共用 ARK API,但行为可能有差异
8
+ */
9
+
10
+ import type {
11
+ Protocol,
12
+ ProtocolConfig,
13
+ ProtocolMessage,
14
+ ProtocolToolDefinition,
15
+ ProtocolRequestOptions,
16
+ RawEvent,
17
+ RawToolCall,
18
+ RawSearchResult,
19
+ } from './types';
20
+ import { DEFAULT_ARK_URL } from '../../constants';
21
+ import { DebugLogger } from '../../utils';
22
+ import { friendlyHttpError } from './error-utils';
23
+ import { WEB_SEARCH_TOOL_NAME } from '../../internal/web-search';
24
+
25
+ const logger = DebugLogger.module('DeepSeekProtocol');
26
+
27
+ /**
28
+ * DeepSeek Protocol 实现
29
+ */
30
+ export class DeepSeekProtocol implements Protocol {
31
+ readonly name = 'deepseek';
32
+
33
+ private apiKey: string;
34
+ private apiUrl: string;
35
+
36
+ constructor(config: ProtocolConfig) {
37
+ this.apiKey = config.apiKey;
38
+ this.apiUrl = config.apiUrl ?? DEFAULT_ARK_URL;
39
+ }
40
+
41
+ async *stream(
42
+ messages: ProtocolMessage[],
43
+ tools: ProtocolToolDefinition[],
44
+ options: ProtocolRequestOptions
45
+ ): AsyncGenerator<RawEvent> {
46
+ const requestBody = this.buildRequestBody(messages, tools, options);
47
+ const url = `${this.apiUrl}/responses`;
48
+
49
+ logger.debug('发送 DeepSeek 请求', {
50
+ url,
51
+ model: options.model,
52
+ enableSearch: options.enableSearch,
53
+ enableThinking: options.enableThinking,
54
+ toolsCount: tools.length,
55
+ });
56
+
57
+ const response = await fetch(url, {
58
+ method: 'POST',
59
+ headers: {
60
+ 'Authorization': `Bearer ${this.apiKey}`,
61
+ 'Content-Type': 'application/json',
62
+ },
63
+ body: JSON.stringify(requestBody),
64
+ signal: options.signal,
65
+ });
66
+
67
+ if (!response.ok) {
68
+ const errorText = await response.text();
69
+ logger.error('DeepSeek API 错误', { status: response.status, body: errorText.slice(0, 500) });
70
+ yield { type: 'error', error: friendlyHttpError(response.status, errorText, 'DeepSeek') };
71
+ return;
72
+ }
73
+
74
+ const reader = response.body?.getReader();
75
+ if (!reader) {
76
+ yield { type: 'error', error: '无法获取响应流' };
77
+ return;
78
+ }
79
+
80
+ yield* this.parseSSE(reader, options.enableThinking);
81
+ }
82
+
83
+ /**
84
+ * 构建请求体(DeepSeek 专用)
85
+ */
86
+ private buildRequestBody(
87
+ messages: ProtocolMessage[],
88
+ tools: ProtocolToolDefinition[],
89
+ options: ProtocolRequestOptions
90
+ ): Record<string, unknown> {
91
+ const input = this.convertMessages(messages);
92
+
93
+ const body: Record<string, unknown> = {
94
+ model: options.model,
95
+ stream: true,
96
+ max_output_tokens: options.familyConfig.defaultMaxTokens ?? 32768,
97
+ input,
98
+ // 火山引擎 DeepSeek 端点不支持 include: ['usage'],通过 response.completed 等事件的 usage 字段获取
99
+ };
100
+
101
+ // 构建工具列表(若已包含 web_search_ai 则不再加原生 web_search,统一走 Tavily)
102
+ const apiTools: unknown[] = [];
103
+ const hasWebSearchTool = tools.some(t => t.name === WEB_SEARCH_TOOL_NAME);
104
+
105
+ if (options.enableSearch && !hasWebSearchTool) {
106
+ apiTools.push({ type: 'web_search', max_keyword: 5, limit: 20 });
107
+ }
108
+
109
+ for (const t of tools) {
110
+ apiTools.push({
111
+ type: 'function',
112
+ name: t.name,
113
+ description: t.description,
114
+ parameters: t.parameters,
115
+ });
116
+ }
117
+
118
+ if (apiTools.length > 0) {
119
+ body.tools = apiTools;
120
+ }
121
+
122
+ // DeepSeek 启用 thinking(深度思考模式)
123
+ if (options.enableThinking) {
124
+ body.thinking = { type: 'enabled' };
125
+ }
126
+
127
+ return body;
128
+ }
129
+
130
+ private convertMessages(messages: ProtocolMessage[]): unknown[] {
131
+ const input: unknown[] = [];
132
+
133
+ for (const msg of messages) {
134
+ switch (msg.role) {
135
+ case 'system':
136
+ input.push({ role: 'system', content: msg.content });
137
+ break;
138
+
139
+ case 'user': {
140
+ // 当只有图片没有文字时提供默认提示
141
+ const textContent = msg.content || (msg.images?.length ? '请分析这张图片' : '');
142
+ const content: unknown[] = [{ type: 'input_text', text: textContent }];
143
+ if (msg.images?.length) {
144
+ for (const img of msg.images) {
145
+ content.push({
146
+ type: 'input_image',
147
+ image_url: img.startsWith('data:') ? img : `data:image/jpeg;base64,${img}`,
148
+ });
149
+ }
150
+ }
151
+ input.push({ role: 'user', content });
152
+ break;
153
+ }
154
+
155
+ case 'assistant':
156
+ if (msg.toolCalls?.length) {
157
+ for (const tc of msg.toolCalls) {
158
+ input.push({
159
+ type: 'function_call',
160
+ call_id: tc.id,
161
+ name: tc.name,
162
+ arguments: tc.arguments,
163
+ });
164
+ }
165
+ } else {
166
+ input.push({
167
+ role: 'developer',
168
+ content: `[上一轮AI回复]: ${msg.content}`,
169
+ });
170
+ }
171
+ break;
172
+
173
+ case 'tool':
174
+ input.push({
175
+ type: 'function_call_output',
176
+ call_id: msg.toolCallId,
177
+ output: msg.content,
178
+ });
179
+ break;
180
+ }
181
+ }
182
+
183
+ return input;
184
+ }
185
+
186
+ /**
187
+ * 解析 SSE 流(DeepSeek 专用)
188
+ */
189
+ private async *parseSSE(
190
+ reader: ReadableStreamDefaultReader<Uint8Array>,
191
+ enableThinking: boolean
192
+ ): AsyncGenerator<RawEvent> {
193
+ const decoder = new TextDecoder();
194
+ let buffer = '';
195
+ const pendingToolCalls = new Map<string, RawToolCall>();
196
+ let currentFunctionCallId: string | null = null;
197
+ const searchResults: RawSearchResult[] = [];
198
+ let streamDone = false;
199
+ let thinkingDone = false;
200
+ let textStarted = false;
201
+ // Token 使用统计(保存最后一次的 usage)
202
+ let lastUsage: { promptTokens: number; completionTokens: number; totalTokens: number; reasoningTokens: number } | undefined;
203
+
204
+ while (true) {
205
+ const { done, value } = await reader.read();
206
+ if (done) break;
207
+
208
+ buffer += decoder.decode(value, { stream: true });
209
+ const lines = buffer.split('\n');
210
+ buffer = lines.pop() || '';
211
+
212
+ for (const line of lines) {
213
+ if (streamDone) continue;
214
+ if (!line.startsWith('data:')) continue;
215
+
216
+ const data = line.slice(5).trim();
217
+ if (data === '[DONE]') {
218
+ streamDone = true;
219
+
220
+ if (pendingToolCalls.size > 0) {
221
+ for (const tc of pendingToolCalls.values()) {
222
+ yield { type: 'tool_call_done', toolCall: tc };
223
+ }
224
+ yield { type: 'done', finishReason: 'tool_calls', usage: lastUsage };
225
+ } else {
226
+ yield { type: 'done', finishReason: 'stop', usage: lastUsage };
227
+ }
228
+ return;
229
+ }
230
+
231
+ try {
232
+ const event = JSON.parse(data);
233
+
234
+ // 提取 Token 使用统计(response.usage 或 Chat Completions usage)
235
+ const eventUsage = event.response?.usage || event.usage;
236
+ if (eventUsage) {
237
+ lastUsage = {
238
+ promptTokens: eventUsage.input_tokens || eventUsage.prompt_tokens || 0,
239
+ completionTokens: eventUsage.output_tokens || eventUsage.completion_tokens || 0,
240
+ totalTokens: (eventUsage.input_tokens || eventUsage.prompt_tokens || 0) + (eventUsage.output_tokens || eventUsage.completion_tokens || 0),
241
+ reasoningTokens: eventUsage.output_tokens_details?.reasoning_tokens || eventUsage.completion_tokens_details?.reasoning_tokens || 0,
242
+ };
243
+ }
244
+
245
+ switch (event.type) {
246
+ case 'response.output_item.added': {
247
+ const item = event.item;
248
+ if (item?.type === 'function_call' && item.call_id) {
249
+ currentFunctionCallId = item.call_id;
250
+ pendingToolCalls.set(item.call_id, {
251
+ id: item.call_id,
252
+ name: item.name || '',
253
+ arguments: item.arguments || '',
254
+ });
255
+ yield { type: 'tool_call_start', toolCall: { id: item.call_id, name: item.name || '' } };
256
+ }
257
+ break;
258
+ }
259
+
260
+ case 'response.function_call_arguments.delta': {
261
+ if (currentFunctionCallId) {
262
+ const call = pendingToolCalls.get(currentFunctionCallId);
263
+ if (call) {
264
+ call.arguments += event.delta || '';
265
+ yield { type: 'tool_call_delta', toolCall: { id: currentFunctionCallId, arguments: event.delta || '' } };
266
+ }
267
+ }
268
+ break;
269
+ }
270
+
271
+ case 'response.function_call_arguments.done':
272
+ case 'response.output_item.done': {
273
+ const item = event.item;
274
+ if (item?.type === 'function_call' && item.call_id) {
275
+ const existing = pendingToolCalls.get(item.call_id);
276
+ pendingToolCalls.set(item.call_id, {
277
+ id: item.call_id,
278
+ name: item.name || existing?.name || '',
279
+ arguments: item.arguments || existing?.arguments || '{}',
280
+ });
281
+ }
282
+ break;
283
+ }
284
+
285
+ case 'response.output_text.annotation.added': {
286
+ const annotation = event.annotation;
287
+ if (annotation?.url) {
288
+ const exists = searchResults.some(r => r.url === annotation.url);
289
+ if (!exists) {
290
+ searchResults.push({
291
+ title: annotation.title || annotation.text || '',
292
+ url: annotation.url,
293
+ snippet: annotation.summary || annotation.snippet || '',
294
+ });
295
+ yield { type: 'search_result', searchResults: [...searchResults] };
296
+ }
297
+ }
298
+ break;
299
+ }
300
+
301
+ case 'response.output_text.delta':
302
+ if (event.delta) {
303
+ if (!textStarted) {
304
+ textStarted = true;
305
+ if (enableThinking && !thinkingDone) {
306
+ thinkingDone = true;
307
+ yield { type: 'thinking_done' };
308
+ }
309
+ }
310
+ yield { type: 'text_delta', delta: event.delta };
311
+ }
312
+ break;
313
+
314
+ // DeepSeek 支持 reasoning(深度思考)
315
+ case 'response.reasoning_summary_text.delta':
316
+ if (enableThinking && event.delta && !thinkingDone) {
317
+ yield { type: 'thinking_delta', delta: event.delta };
318
+ }
319
+ break;
320
+ }
321
+ } catch {
322
+ // 忽略解析错误
323
+ }
324
+ }
325
+ }
326
+
327
+ // 兜底
328
+ if (!streamDone) {
329
+ if (pendingToolCalls.size > 0) {
330
+ for (const tc of pendingToolCalls.values()) {
331
+ yield { type: 'tool_call_done', toolCall: tc };
332
+ }
333
+ yield { type: 'done', finishReason: 'tool_calls', usage: lastUsage };
334
+ } else {
335
+ yield { type: 'done', finishReason: 'stop', usage: lastUsage };
336
+ }
337
+ }
338
+ }
339
+ }
340
+
341
+ export function createDeepSeekProtocol(config: ProtocolConfig): DeepSeekProtocol {
342
+ return new DeepSeekProtocol(config);
343
+ }
344
+