kimi-vercel-ai-sdk-provider 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/LICENSE +198 -0
  2. package/README.md +871 -0
  3. package/dist/index.d.mts +1317 -0
  4. package/dist/index.d.ts +1317 -0
  5. package/dist/index.js +2764 -0
  6. package/dist/index.js.map +1 -0
  7. package/dist/index.mjs +2734 -0
  8. package/dist/index.mjs.map +1 -0
  9. package/package.json +70 -0
  10. package/src/__tests__/caching.test.ts +97 -0
  11. package/src/__tests__/chat.test.ts +386 -0
  12. package/src/__tests__/code-integration.test.ts +562 -0
  13. package/src/__tests__/code-provider.test.ts +289 -0
  14. package/src/__tests__/code.test.ts +427 -0
  15. package/src/__tests__/core.test.ts +172 -0
  16. package/src/__tests__/files.test.ts +185 -0
  17. package/src/__tests__/integration.test.ts +457 -0
  18. package/src/__tests__/provider.test.ts +188 -0
  19. package/src/__tests__/tools.test.ts +519 -0
  20. package/src/chat/index.ts +42 -0
  21. package/src/chat/kimi-chat-language-model.ts +829 -0
  22. package/src/chat/kimi-chat-messages.ts +297 -0
  23. package/src/chat/kimi-chat-response.ts +84 -0
  24. package/src/chat/kimi-chat-settings.ts +216 -0
  25. package/src/code/index.ts +66 -0
  26. package/src/code/kimi-code-language-model.ts +669 -0
  27. package/src/code/kimi-code-messages.ts +303 -0
  28. package/src/code/kimi-code-provider.ts +239 -0
  29. package/src/code/kimi-code-settings.ts +193 -0
  30. package/src/code/kimi-code-types.ts +354 -0
  31. package/src/core/errors.ts +140 -0
  32. package/src/core/index.ts +36 -0
  33. package/src/core/types.ts +148 -0
  34. package/src/core/utils.ts +210 -0
  35. package/src/files/attachment-processor.ts +276 -0
  36. package/src/files/file-utils.ts +257 -0
  37. package/src/files/index.ts +24 -0
  38. package/src/files/kimi-file-client.ts +292 -0
  39. package/src/index.ts +122 -0
  40. package/src/kimi-provider.ts +263 -0
  41. package/src/tools/builtin-tools.ts +273 -0
  42. package/src/tools/index.ts +33 -0
  43. package/src/tools/prepare-tools.ts +306 -0
  44. package/src/version.ts +4 -0
@@ -0,0 +1,669 @@
1
+ /**
2
+ * Kimi Code language model implementation.
3
+ * @module
4
+ */
5
+
6
+ import type {
7
+ LanguageModelV3,
8
+ LanguageModelV3CallOptions,
9
+ LanguageModelV3Content,
10
+ LanguageModelV3FinishReason,
11
+ LanguageModelV3FunctionTool,
12
+ LanguageModelV3GenerateResult,
13
+ LanguageModelV3StreamPart,
14
+ LanguageModelV3StreamResult,
15
+ SharedV3ProviderMetadata,
16
+ SharedV3Warning
17
+ } from '@ai-sdk/provider';
18
+ import {
19
+ type ParseResult,
20
+ combineHeaders,
21
+ createEventSourceResponseHandler,
22
+ createJsonErrorResponseHandler,
23
+ createJsonResponseHandler,
24
+ generateId,
25
+ parseProviderOptions,
26
+ postJsonToApi,
27
+ removeUndefinedEntries
28
+ } from '@ai-sdk/provider-utils';
29
+ import { z } from 'zod/v4';
30
+ import { convertToKimiCodePrompt } from './kimi-code-messages';
31
+ import {
32
+ type KimiCodeSettings,
33
+ effortToBudgetTokens,
34
+ kimiCodeProviderOptionsSchema,
35
+ normalizeExtendedThinkingConfig
36
+ } from './kimi-code-settings';
37
+ import {
38
+ type KimiCodeCapabilities,
39
+ type KimiCodeConfig,
40
+ type KimiCodeModelId,
41
+ inferKimiCodeCapabilities
42
+ } from './kimi-code-types';
43
+
44
+ // ============================================================================
45
+ // Response Schemas
46
+ // ============================================================================
47
+
48
+ const kimiCodeErrorSchema = z.union([
49
+ z.object({
50
+ error: z.object({
51
+ message: z.string(),
52
+ type: z.string().nullish(),
53
+ code: z.union([z.string(), z.number()]).nullish()
54
+ })
55
+ }),
56
+ z.object({
57
+ message: z.string()
58
+ })
59
+ ]);
60
+
61
+ type KimiCodeErrorData = z.infer<typeof kimiCodeErrorSchema>;
62
+
63
+ const kimiCodeTextContentSchema = z.object({
64
+ type: z.literal('text'),
65
+ text: z.string()
66
+ });
67
+
68
+ const kimiCodeThinkingContentSchema = z.object({
69
+ type: z.literal('thinking'),
70
+ thinking: z.string()
71
+ });
72
+
73
+ const kimiCodeToolUseContentSchema = z.object({
74
+ type: z.literal('tool_use'),
75
+ id: z.string(),
76
+ name: z.string(),
77
+ input: z.record(z.string(), z.unknown())
78
+ });
79
+
80
+ const kimiCodeContentBlockSchema = z.union([
81
+ kimiCodeTextContentSchema,
82
+ kimiCodeThinkingContentSchema,
83
+ kimiCodeToolUseContentSchema
84
+ ]);
85
+
86
+ const kimiCodeResponseSchema = z.object({
87
+ id: z.string().optional(),
88
+ type: z.string().optional(),
89
+ model: z.string().optional(),
90
+ stop_reason: z.string().nullish(),
91
+ stop_sequence: z.string().nullish(),
92
+ content: z.array(kimiCodeContentBlockSchema),
93
+ usage: z
94
+ .object({
95
+ input_tokens: z.number().optional(),
96
+ output_tokens: z.number().optional(),
97
+ cache_read_input_tokens: z.number().optional(),
98
+ cache_creation_input_tokens: z.number().optional()
99
+ })
100
+ .optional()
101
+ });
102
+
103
+ const kimiCodeStreamChunkSchema = z.object({
104
+ type: z.string(),
105
+ index: z.number().optional(),
106
+ message: z
107
+ .object({
108
+ id: z.string().optional(),
109
+ type: z.string().optional(),
110
+ model: z.string().optional(),
111
+ content: z.array(z.unknown()).optional(),
112
+ stop_reason: z.string().nullish(),
113
+ stop_sequence: z.string().nullish(),
114
+ usage: z
115
+ .object({
116
+ input_tokens: z.number().optional(),
117
+ output_tokens: z.number().optional()
118
+ })
119
+ .optional()
120
+ })
121
+ .optional(),
122
+ content_block: z
123
+ .object({
124
+ type: z.string(),
125
+ text: z.string().optional(),
126
+ thinking: z.string().optional(),
127
+ id: z.string().optional(),
128
+ name: z.string().optional(),
129
+ input: z.record(z.string(), z.unknown()).optional()
130
+ })
131
+ .optional(),
132
+ delta: z
133
+ .object({
134
+ type: z.string().optional(),
135
+ text: z.string().optional(),
136
+ thinking: z.string().optional(),
137
+ partial_json: z.string().optional(),
138
+ stop_reason: z.string().optional(),
139
+ stop_sequence: z.string().optional()
140
+ })
141
+ .optional(),
142
+ usage: z
143
+ .object({
144
+ input_tokens: z.number().optional(),
145
+ output_tokens: z.number().optional()
146
+ })
147
+ .optional()
148
+ });
149
+
150
+ type KimiCodeResponse = z.infer<typeof kimiCodeResponseSchema>;
151
+ type KimiCodeStreamChunk = z.infer<typeof kimiCodeStreamChunkSchema>;
152
+
153
+ // ============================================================================
154
+ // Error Handler
155
+ // ============================================================================
156
+
157
+ const kimiCodeFailedResponseHandler = createJsonErrorResponseHandler({
158
+ errorSchema: kimiCodeErrorSchema,
159
+ errorToMessage: (error: KimiCodeErrorData) => {
160
+ if ('error' in error) {
161
+ return error.error.message;
162
+ }
163
+ return error.message;
164
+ },
165
+ isRetryable: (response) =>
166
+ response.status === 408 || response.status === 409 || response.status === 429 || response.status >= 500
167
+ });
168
+
169
+ // ============================================================================
170
+ // Helper Functions
171
+ // ============================================================================
172
+
173
+ /**
174
+ * Map Kimi Code stop reason to AI SDK finish reason.
175
+ */
176
+ function mapStopReason(stopReason: string | null | undefined): LanguageModelV3FinishReason {
177
+ switch (stopReason) {
178
+ case 'end_turn':
179
+ case 'stop_sequence':
180
+ return { unified: 'stop', raw: stopReason };
181
+ case 'tool_use':
182
+ return { unified: 'tool-calls', raw: stopReason };
183
+ case 'max_tokens':
184
+ return { unified: 'length', raw: stopReason };
185
+ default:
186
+ return { unified: 'other', raw: stopReason ?? undefined };
187
+ }
188
+ }
189
+
190
+ /**
191
+ * Convert Kimi Code usage to AI SDK usage format.
192
+ */
193
+ function convertUsage(usage?: {
194
+ input_tokens?: number;
195
+ output_tokens?: number;
196
+ cache_read_input_tokens?: number;
197
+ cache_creation_input_tokens?: number;
198
+ }) {
199
+ const inputTokens = usage?.input_tokens ?? 0;
200
+ const outputTokens = usage?.output_tokens ?? 0;
201
+ const cacheRead = usage?.cache_read_input_tokens ?? 0;
202
+
203
+ return {
204
+ inputTokens: {
205
+ total: inputTokens,
206
+ cacheRead,
207
+ cacheWrite: usage?.cache_creation_input_tokens,
208
+ noCache: inputTokens - cacheRead
209
+ },
210
+ outputTokens: {
211
+ total: outputTokens,
212
+ text: outputTokens,
213
+ reasoning: 0
214
+ },
215
+ raw: usage
216
+ };
217
+ }
218
+
219
+ /**
220
+ * Convert tools to Kimi Code (Anthropic) format.
221
+ */
222
+ function convertTools(tools?: LanguageModelV3FunctionTool[]) {
223
+ if (!tools || tools.length === 0) {
224
+ return undefined;
225
+ }
226
+
227
+ return tools.map((tool) => {
228
+ return {
229
+ name: tool.name,
230
+ description: tool.description,
231
+ input_schema: tool.inputSchema ?? { type: 'object', properties: {} }
232
+ };
233
+ });
234
+ }
235
+
236
+ // ============================================================================
237
+ // Language Model Implementation
238
+ // ============================================================================
239
+
240
+ /**
241
+ * Kimi Code language model implementing LanguageModelV3.
242
+ */
243
+ export class KimiCodeLanguageModel implements LanguageModelV3 {
244
+ readonly specificationVersion = 'v3';
245
+ readonly modelId: KimiCodeModelId;
246
+
247
+ private readonly config: KimiCodeConfig;
248
+ private readonly settings: KimiCodeSettings;
249
+ private readonly generateIdFn: () => string;
250
+
251
+ constructor(modelId: KimiCodeModelId, settings: KimiCodeSettings, config: KimiCodeConfig) {
252
+ this.modelId = modelId;
253
+ this.settings = settings;
254
+ this.config = config;
255
+ this.generateIdFn = config.generateId ?? generateId;
256
+ }
257
+
258
+ get provider(): string {
259
+ return this.config.provider;
260
+ }
261
+
262
+ private get providerOptionsName(): string {
263
+ return 'kimiCode';
264
+ }
265
+
266
+ /**
267
+ * Get the inferred or configured capabilities for this model.
268
+ */
269
+ get capabilities(): KimiCodeCapabilities {
270
+ const inferred = inferKimiCodeCapabilities(this.modelId);
271
+ return {
272
+ ...inferred,
273
+ ...this.settings.capabilities
274
+ };
275
+ }
276
+
277
+ get supportedUrls() {
278
+ const patterns: Record<string, RegExp[]> = {
279
+ 'image/*': [/^https?:\/\/.*$/i]
280
+ };
281
+ return this.settings.supportedUrls ?? this.config.supportedUrls ?? patterns;
282
+ }
283
+
284
+ /**
285
+ * Build request arguments.
286
+ */
287
+ private async getArgs(options: LanguageModelV3CallOptions) {
288
+ const { prompt, maxOutputTokens, temperature, topP, topK, stopSequences, tools, toolChoice, providerOptions } =
289
+ options;
290
+
291
+ const warnings: SharedV3Warning[] = [];
292
+
293
+ // Parse provider options
294
+ const kimiCodeOptions = await parseProviderOptions({
295
+ provider: this.providerOptionsName,
296
+ providerOptions,
297
+ schema: kimiCodeProviderOptionsSchema
298
+ });
299
+
300
+ // Merge extended thinking config from settings and provider options
301
+ const extendedThinking =
302
+ normalizeExtendedThinkingConfig(kimiCodeOptions?.extendedThinking) ??
303
+ normalizeExtendedThinkingConfig(this.settings.extendedThinking);
304
+
305
+ // Warn about unsupported options
306
+ if (topK != null) {
307
+ warnings.push({
308
+ type: 'unsupported',
309
+ feature: 'topK'
310
+ });
311
+ }
312
+
313
+ // Convert prompt to Kimi Code format
314
+ const { system, messages } = await convertToKimiCodePrompt(prompt);
315
+
316
+ // Prepare tool choice
317
+ let toolChoiceParam: { type: string; name?: string } | undefined;
318
+ if (toolChoice != null) {
319
+ switch (toolChoice.type) {
320
+ case 'auto':
321
+ toolChoiceParam = { type: 'auto' };
322
+ break;
323
+ case 'none':
324
+ toolChoiceParam = { type: 'none' };
325
+ break;
326
+ case 'required':
327
+ toolChoiceParam = { type: 'any' };
328
+ break;
329
+ case 'tool':
330
+ toolChoiceParam = { type: 'tool', name: toolChoice.toolName };
331
+ break;
332
+ }
333
+ }
334
+
335
+ // Filter to only function tools
336
+ const functionTools = tools?.filter((t): t is LanguageModelV3FunctionTool => t.type === 'function');
337
+
338
+ // Build request body
339
+ const body = removeUndefinedEntries({
340
+ model: this.modelId,
341
+ system: kimiCodeOptions?.system ?? system,
342
+ messages,
343
+ max_tokens: maxOutputTokens ?? this.capabilities.maxOutputTokens ?? 32768,
344
+ temperature,
345
+ top_p: topP,
346
+ stop_sequences: kimiCodeOptions?.stopSequences ?? stopSequences,
347
+ tools: convertTools(functionTools),
348
+ tool_choice: toolChoiceParam,
349
+ // Extended thinking parameters
350
+ ...(extendedThinking?.enabled && {
351
+ thinking: {
352
+ type: 'enabled',
353
+ budget_tokens: extendedThinking.budgetTokens ?? effortToBudgetTokens(extendedThinking.effort ?? 'medium')
354
+ }
355
+ })
356
+ });
357
+
358
+ const requestHeaders: Record<string, string | undefined> = {
359
+ ...(options.headers ?? {})
360
+ };
361
+
362
+ return {
363
+ body,
364
+ warnings,
365
+ requestHeaders
366
+ };
367
+ }
368
+
369
+ async doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult> {
370
+ const { body, warnings, requestHeaders } = await this.getArgs(options);
371
+
372
+ const {
373
+ responseHeaders,
374
+ value: rawResponse,
375
+ rawValue
376
+ } = await postJsonToApi({
377
+ url: `${this.config.baseURL}/messages`,
378
+ headers: combineHeaders(this.config.headers(), requestHeaders, options.headers),
379
+ body,
380
+ failedResponseHandler: kimiCodeFailedResponseHandler,
381
+ successfulResponseHandler: createJsonResponseHandler(kimiCodeResponseSchema),
382
+ abortSignal: options.abortSignal,
383
+ fetch: this.config.fetch
384
+ });
385
+
386
+ // Parse and validate response
387
+ const response = rawResponse as KimiCodeResponse;
388
+
389
+ // Extract content from response
390
+ const content: Array<LanguageModelV3Content> = [];
391
+
392
+ for (const block of response.content) {
393
+ switch (block.type) {
394
+ case 'text':
395
+ content.push({ type: 'text', text: block.text });
396
+ break;
397
+
398
+ case 'thinking':
399
+ content.push({
400
+ type: 'reasoning',
401
+ text: block.thinking
402
+ });
403
+ break;
404
+
405
+ case 'tool_use':
406
+ content.push({
407
+ type: 'tool-call',
408
+ toolCallId: block.id,
409
+ toolName: block.name,
410
+ input: JSON.stringify(block.input)
411
+ });
412
+ break;
413
+ }
414
+ }
415
+
416
+ const providerMetadata: SharedV3ProviderMetadata = {
417
+ [this.providerOptionsName]: {
418
+ requestId: responseHeaders?.['x-request-id'] ?? undefined,
419
+ modelId: response.model,
420
+ stopReason: response.stop_reason,
421
+ stopSequence: response.stop_sequence
422
+ }
423
+ };
424
+
425
+ return {
426
+ content,
427
+ finishReason: mapStopReason(response.stop_reason),
428
+ usage: convertUsage(response.usage),
429
+ providerMetadata,
430
+ request: { body },
431
+ response: {
432
+ id: response.id,
433
+ modelId: response.model,
434
+ headers: responseHeaders,
435
+ body: rawValue
436
+ },
437
+ warnings
438
+ };
439
+ }
440
+
441
+ async doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult> {
442
+ const { body, warnings, requestHeaders } = await this.getArgs(options);
443
+
444
+ const streamBody = {
445
+ ...body,
446
+ stream: true
447
+ };
448
+
449
+ const { responseHeaders, value: response } = await postJsonToApi({
450
+ url: `${this.config.baseURL}/messages`,
451
+ headers: combineHeaders(this.config.headers(), requestHeaders, options.headers),
452
+ body: streamBody,
453
+ failedResponseHandler: kimiCodeFailedResponseHandler,
454
+ successfulResponseHandler: createEventSourceResponseHandler(kimiCodeStreamChunkSchema),
455
+ abortSignal: options.abortSignal,
456
+ fetch: this.config.fetch
457
+ });
458
+
459
+ const providerOptionsName = this.providerOptionsName;
460
+ const generateIdFn = this.generateIdFn;
461
+ const capturedResponseHeaders = responseHeaders;
462
+
463
+ // Track state across stream
464
+ let currentBlockType: string | undefined;
465
+ let currentToolCallId: string | undefined;
466
+ let currentToolName: string | undefined;
467
+ let accumulatedToolInput = '';
468
+ let finishReason: LanguageModelV3FinishReason = { unified: 'other', raw: undefined };
469
+ let usage: ReturnType<typeof convertUsage> | undefined;
470
+ let responseId: string | undefined;
471
+ let responseModel: string | undefined;
472
+ let isActiveText = false;
473
+ let isActiveReasoning = false;
474
+ let hasToolCallFinished = false;
475
+
476
+ return {
477
+ stream: response.pipeThrough(
478
+ new TransformStream<ParseResult<KimiCodeStreamChunk>, LanguageModelV3StreamPart>({
479
+ start(controller) {
480
+ controller.enqueue({ type: 'stream-start', warnings });
481
+ },
482
+
483
+ transform(chunk, controller) {
484
+ if (!chunk.success) {
485
+ controller.enqueue({ type: 'error', error: chunk.error });
486
+ return;
487
+ }
488
+
489
+ const data = chunk.value;
490
+
491
+ switch (data.type) {
492
+ case 'message_start':
493
+ if (data.message) {
494
+ responseId = data.message.id;
495
+ responseModel = data.message.model;
496
+ if (data.message.usage) {
497
+ usage = convertUsage(data.message.usage);
498
+ }
499
+ // Emit response metadata
500
+ controller.enqueue({
501
+ type: 'response-metadata',
502
+ id: responseId,
503
+ modelId: responseModel
504
+ });
505
+ }
506
+ break;
507
+
508
+ case 'content_block_start':
509
+ if (data.content_block) {
510
+ currentBlockType = data.content_block.type;
511
+ if (data.content_block.type === 'tool_use') {
512
+ currentToolCallId = data.content_block.id ?? generateIdFn();
513
+ currentToolName = data.content_block.name;
514
+ accumulatedToolInput = '';
515
+ hasToolCallFinished = false;
516
+
517
+ // Close any active text/reasoning blocks
518
+ if (isActiveText) {
519
+ controller.enqueue({ type: 'text-end', id: 'text-0' });
520
+ isActiveText = false;
521
+ }
522
+ if (isActiveReasoning) {
523
+ controller.enqueue({ type: 'reasoning-end', id: 'reasoning-0' });
524
+ isActiveReasoning = false;
525
+ }
526
+
527
+ controller.enqueue({
528
+ type: 'tool-input-start',
529
+ id: currentToolCallId,
530
+ toolName: currentToolName ?? ''
531
+ });
532
+ } else if (data.content_block.type === 'text') {
533
+ if (!isActiveText) {
534
+ if (isActiveReasoning) {
535
+ controller.enqueue({ type: 'reasoning-end', id: 'reasoning-0' });
536
+ isActiveReasoning = false;
537
+ }
538
+ controller.enqueue({ type: 'text-start', id: 'text-0' });
539
+ isActiveText = true;
540
+ }
541
+ } else if (data.content_block.type === 'thinking') {
542
+ if (!isActiveReasoning) {
543
+ if (isActiveText) {
544
+ controller.enqueue({ type: 'text-end', id: 'text-0' });
545
+ isActiveText = false;
546
+ }
547
+ controller.enqueue({ type: 'reasoning-start', id: 'reasoning-0' });
548
+ isActiveReasoning = true;
549
+ }
550
+ }
551
+ }
552
+ break;
553
+
554
+ case 'content_block_delta':
555
+ if (data.delta) {
556
+ if (data.delta.type === 'text_delta' && data.delta.text) {
557
+ if (!isActiveText) {
558
+ if (isActiveReasoning) {
559
+ controller.enqueue({ type: 'reasoning-end', id: 'reasoning-0' });
560
+ isActiveReasoning = false;
561
+ }
562
+ controller.enqueue({ type: 'text-start', id: 'text-0' });
563
+ isActiveText = true;
564
+ }
565
+ controller.enqueue({
566
+ type: 'text-delta',
567
+ id: 'text-0',
568
+ delta: data.delta.text
569
+ });
570
+ } else if (data.delta.type === 'thinking_delta' && data.delta.thinking) {
571
+ if (!isActiveReasoning) {
572
+ if (isActiveText) {
573
+ controller.enqueue({ type: 'text-end', id: 'text-0' });
574
+ isActiveText = false;
575
+ }
576
+ controller.enqueue({ type: 'reasoning-start', id: 'reasoning-0' });
577
+ isActiveReasoning = true;
578
+ }
579
+ controller.enqueue({
580
+ type: 'reasoning-delta',
581
+ id: 'reasoning-0',
582
+ delta: data.delta.thinking
583
+ });
584
+ } else if (data.delta.type === 'input_json_delta' && data.delta.partial_json) {
585
+ accumulatedToolInput += data.delta.partial_json;
586
+ controller.enqueue({
587
+ type: 'tool-input-delta',
588
+ id: currentToolCallId ?? '',
589
+ delta: data.delta.partial_json
590
+ });
591
+ }
592
+ }
593
+ break;
594
+
595
+ case 'content_block_stop':
596
+ if (currentBlockType === 'tool_use' && currentToolCallId && !hasToolCallFinished) {
597
+ // Parse accumulated input
598
+ controller.enqueue({ type: 'tool-input-end', id: currentToolCallId });
599
+
600
+ controller.enqueue({
601
+ type: 'tool-call',
602
+ toolCallId: currentToolCallId,
603
+ toolName: currentToolName ?? '',
604
+ input: accumulatedToolInput
605
+ });
606
+ hasToolCallFinished = true;
607
+ } else if (currentBlockType === 'text' && isActiveText) {
608
+ controller.enqueue({ type: 'text-end', id: 'text-0' });
609
+ isActiveText = false;
610
+ } else if (currentBlockType === 'thinking' && isActiveReasoning) {
611
+ controller.enqueue({ type: 'reasoning-end', id: 'reasoning-0' });
612
+ isActiveReasoning = false;
613
+ }
614
+ currentBlockType = undefined;
615
+ currentToolCallId = undefined;
616
+ currentToolName = undefined;
617
+ accumulatedToolInput = '';
618
+ break;
619
+
620
+ case 'message_delta':
621
+ if (data.delta?.stop_reason) {
622
+ finishReason = mapStopReason(data.delta.stop_reason);
623
+ }
624
+ if (data.usage) {
625
+ usage = convertUsage(data.usage);
626
+ }
627
+ break;
628
+
629
+ case 'message_stop':
630
+ // Close any remaining active blocks
631
+ if (isActiveText) {
632
+ controller.enqueue({ type: 'text-end', id: 'text-0' });
633
+ }
634
+ if (isActiveReasoning) {
635
+ controller.enqueue({ type: 'reasoning-end', id: 'reasoning-0' });
636
+ }
637
+
638
+ controller.enqueue({
639
+ type: 'finish',
640
+ finishReason,
641
+ usage: usage ?? convertUsage({}),
642
+ providerMetadata: {
643
+ [providerOptionsName]: {
644
+ requestId: capturedResponseHeaders?.['x-request-id'] ?? undefined,
645
+ modelId: responseModel
646
+ }
647
+ }
648
+ });
649
+ break;
650
+
651
+ case 'error':
652
+ controller.enqueue({
653
+ type: 'error',
654
+ error: new Error(
655
+ (data as unknown as { error?: { message?: string } }).error?.message ?? 'Unknown streaming error'
656
+ )
657
+ });
658
+ break;
659
+ }
660
+ }
661
+ })
662
+ ),
663
+ request: { body: streamBody },
664
+ response: {
665
+ headers: responseHeaders
666
+ }
667
+ };
668
+ }
669
+ }