ai 3.0.27 → 3.0.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,4 +1,4 @@
1
- import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1CallWarning } from '@ai-sdk/provider';
1
+ import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1CallWarning, LanguageModelV1LogProbs } from '@ai-sdk/provider';
2
2
  import { z } from 'zod';
3
3
  import { ServerResponse } from 'node:http';
4
4
  import { AssistantStream } from 'openai/lib/AssistantStream';
@@ -286,11 +286,29 @@ declare class GenerateObjectResult<T> {
286
286
  Warnings from the model provider (e.g. unsupported settings)
287
287
  */
288
288
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
289
+ /**
290
+ Optional raw response data.
291
+ */
292
+ rawResponse?: {
293
+ /**
294
+ Response headers.
295
+ */
296
+ headers?: Record<string, string>;
297
+ };
298
+ /**
299
+ Logprobs for the completion.
300
+ `undefined` if the mode does not support logprobs or if was not enabled
301
+ */
302
+ readonly logprobs: LanguageModelV1LogProbs | undefined;
289
303
  constructor(options: {
290
304
  object: T;
291
305
  finishReason: LanguageModelV1FinishReason;
292
306
  usage: TokenUsage;
293
307
  warnings: LanguageModelV1CallWarning[] | undefined;
308
+ rawResponse?: {
309
+ headers?: Record<string, string>;
310
+ };
311
+ logprobs: LanguageModelV1LogProbs | undefined;
294
312
  });
295
313
  }
296
314
 
@@ -364,6 +382,23 @@ Default and recommended: 'auto' (best mode for the model).
364
382
  */
365
383
  mode?: 'auto' | 'json' | 'tool' | 'grammar';
366
384
  }): Promise<StreamObjectResult<T>>;
385
+ type ObjectStreamPartInput = {
386
+ type: 'error';
387
+ error: unknown;
388
+ } | {
389
+ type: 'finish';
390
+ finishReason: LanguageModelV1FinishReason;
391
+ logprobs?: LanguageModelV1LogProbs;
392
+ usage: {
393
+ promptTokens: number;
394
+ completionTokens: number;
395
+ totalTokens: number;
396
+ };
397
+ };
398
+ type ObjectStreamPart<T> = ObjectStreamPartInput | {
399
+ type: 'object';
400
+ object: DeepPartial<T>;
401
+ };
367
402
  /**
368
403
  The result of a `streamObject` call that contains the partial object stream and additional information.
369
404
  */
@@ -373,16 +408,25 @@ declare class StreamObjectResult<T> {
373
408
  Warnings from the model provider (e.g. unsupported settings)
374
409
  */
375
410
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
376
- constructor({ stream, warnings, }: {
377
- stream: ReadableStream<string | ErrorStreamPart>;
411
+ /**
412
+ Optional raw response data.
413
+ */
414
+ rawResponse?: {
415
+ /**
416
+ Response headers.
417
+ */
418
+ headers?: Record<string, string>;
419
+ };
420
+ constructor({ stream, warnings, rawResponse, }: {
421
+ stream: ReadableStream<string | ObjectStreamPartInput>;
378
422
  warnings: LanguageModelV1CallWarning[] | undefined;
423
+ rawResponse?: {
424
+ headers?: Record<string, string>;
425
+ };
379
426
  });
380
427
  get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
428
+ get fullStream(): AsyncIterableStream<ObjectStreamPart<T>>;
381
429
  }
382
- type ErrorStreamPart = {
383
- type: 'error';
384
- error: unknown;
385
- };
386
430
 
387
431
  /**
388
432
  A tool contains the description and the schema of the input that the tool expects.
@@ -565,6 +609,20 @@ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>>
565
609
  Warnings from the model provider (e.g. unsupported settings)
566
610
  */
567
611
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
612
+ /**
613
+ Optional raw response data.
614
+ */
615
+ rawResponse?: {
616
+ /**
617
+ Response headers.
618
+ */
619
+ headers?: Record<string, string>;
620
+ };
621
+ /**
622
+ Logprobs for the completion.
623
+ `undefined` if the mode does not support logprobs or if was not enabled
624
+ */
625
+ readonly logprobs: LanguageModelV1LogProbs | undefined;
568
626
  constructor(options: {
569
627
  text: string;
570
628
  toolCalls: ToToolCallArray<TOOLS>;
@@ -572,6 +630,10 @@ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>>
572
630
  finishReason: LanguageModelV1FinishReason;
573
631
  usage: TokenUsage;
574
632
  warnings: LanguageModelV1CallWarning[] | undefined;
633
+ rawResponse?: {
634
+ headers?: Record<string, string>;
635
+ };
636
+ logprobs: LanguageModelV1LogProbs | undefined;
575
637
  });
576
638
  }
577
639
 
@@ -632,6 +694,7 @@ type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> = {
632
694
  } & ToToolResult<TOOLS>) | {
633
695
  type: 'finish';
634
696
  finishReason: LanguageModelV1FinishReason;
697
+ logprobs?: LanguageModelV1LogProbs;
635
698
  usage: {
636
699
  promptTokens: number;
637
700
  completionTokens: number;
@@ -647,9 +710,21 @@ declare class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
647
710
  Warnings from the model provider (e.g. unsupported settings)
648
711
  */
649
712
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
650
- constructor({ stream, warnings, }: {
713
+ /**
714
+ Optional raw response data.
715
+ */
716
+ rawResponse?: {
717
+ /**
718
+ Response headers.
719
+ */
720
+ headers?: Record<string, string>;
721
+ };
722
+ constructor({ stream, warnings, rawResponse, }: {
651
723
  stream: ReadableStream<TextStreamPart<TOOLS>>;
652
724
  warnings: LanguageModelV1CallWarning[] | undefined;
725
+ rawResponse?: {
726
+ headers?: Record<string, string>;
727
+ };
653
728
  });
654
729
  /**
655
730
  A text stream that returns only the generated text deltas. You can use it
@@ -1795,4 +1870,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
1795
1870
  status?: number;
1796
1871
  }): void;
1797
1872
 
1798
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataContent, DataMessage, DeepPartial, ErrorStreamPart, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamToResponse, tool, trimStartOfStreamHelper };
1873
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamToResponse, tool, trimStartOfStreamHelper };
package/dist/index.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1CallWarning } from '@ai-sdk/provider';
1
+ import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1CallWarning, LanguageModelV1LogProbs } from '@ai-sdk/provider';
2
2
  import { z } from 'zod';
3
3
  import { ServerResponse } from 'node:http';
4
4
  import { AssistantStream } from 'openai/lib/AssistantStream';
@@ -286,11 +286,29 @@ declare class GenerateObjectResult<T> {
286
286
  Warnings from the model provider (e.g. unsupported settings)
287
287
  */
288
288
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
289
+ /**
290
+ Optional raw response data.
291
+ */
292
+ rawResponse?: {
293
+ /**
294
+ Response headers.
295
+ */
296
+ headers?: Record<string, string>;
297
+ };
298
+ /**
299
+ Logprobs for the completion.
300
+ `undefined` if the mode does not support logprobs or if was not enabled
301
+ */
302
+ readonly logprobs: LanguageModelV1LogProbs | undefined;
289
303
  constructor(options: {
290
304
  object: T;
291
305
  finishReason: LanguageModelV1FinishReason;
292
306
  usage: TokenUsage;
293
307
  warnings: LanguageModelV1CallWarning[] | undefined;
308
+ rawResponse?: {
309
+ headers?: Record<string, string>;
310
+ };
311
+ logprobs: LanguageModelV1LogProbs | undefined;
294
312
  });
295
313
  }
296
314
 
@@ -364,6 +382,23 @@ Default and recommended: 'auto' (best mode for the model).
364
382
  */
365
383
  mode?: 'auto' | 'json' | 'tool' | 'grammar';
366
384
  }): Promise<StreamObjectResult<T>>;
385
+ type ObjectStreamPartInput = {
386
+ type: 'error';
387
+ error: unknown;
388
+ } | {
389
+ type: 'finish';
390
+ finishReason: LanguageModelV1FinishReason;
391
+ logprobs?: LanguageModelV1LogProbs;
392
+ usage: {
393
+ promptTokens: number;
394
+ completionTokens: number;
395
+ totalTokens: number;
396
+ };
397
+ };
398
+ type ObjectStreamPart<T> = ObjectStreamPartInput | {
399
+ type: 'object';
400
+ object: DeepPartial<T>;
401
+ };
367
402
  /**
368
403
  The result of a `streamObject` call that contains the partial object stream and additional information.
369
404
  */
@@ -373,16 +408,25 @@ declare class StreamObjectResult<T> {
373
408
  Warnings from the model provider (e.g. unsupported settings)
374
409
  */
375
410
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
376
- constructor({ stream, warnings, }: {
377
- stream: ReadableStream<string | ErrorStreamPart>;
411
+ /**
412
+ Optional raw response data.
413
+ */
414
+ rawResponse?: {
415
+ /**
416
+ Response headers.
417
+ */
418
+ headers?: Record<string, string>;
419
+ };
420
+ constructor({ stream, warnings, rawResponse, }: {
421
+ stream: ReadableStream<string | ObjectStreamPartInput>;
378
422
  warnings: LanguageModelV1CallWarning[] | undefined;
423
+ rawResponse?: {
424
+ headers?: Record<string, string>;
425
+ };
379
426
  });
380
427
  get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
428
+ get fullStream(): AsyncIterableStream<ObjectStreamPart<T>>;
381
429
  }
382
- type ErrorStreamPart = {
383
- type: 'error';
384
- error: unknown;
385
- };
386
430
 
387
431
  /**
388
432
  A tool contains the description and the schema of the input that the tool expects.
@@ -565,6 +609,20 @@ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>>
565
609
  Warnings from the model provider (e.g. unsupported settings)
566
610
  */
567
611
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
612
+ /**
613
+ Optional raw response data.
614
+ */
615
+ rawResponse?: {
616
+ /**
617
+ Response headers.
618
+ */
619
+ headers?: Record<string, string>;
620
+ };
621
+ /**
622
+ Logprobs for the completion.
623
+ `undefined` if the mode does not support logprobs or if was not enabled
624
+ */
625
+ readonly logprobs: LanguageModelV1LogProbs | undefined;
568
626
  constructor(options: {
569
627
  text: string;
570
628
  toolCalls: ToToolCallArray<TOOLS>;
@@ -572,6 +630,10 @@ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>>
572
630
  finishReason: LanguageModelV1FinishReason;
573
631
  usage: TokenUsage;
574
632
  warnings: LanguageModelV1CallWarning[] | undefined;
633
+ rawResponse?: {
634
+ headers?: Record<string, string>;
635
+ };
636
+ logprobs: LanguageModelV1LogProbs | undefined;
575
637
  });
576
638
  }
577
639
 
@@ -632,6 +694,7 @@ type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> = {
632
694
  } & ToToolResult<TOOLS>) | {
633
695
  type: 'finish';
634
696
  finishReason: LanguageModelV1FinishReason;
697
+ logprobs?: LanguageModelV1LogProbs;
635
698
  usage: {
636
699
  promptTokens: number;
637
700
  completionTokens: number;
@@ -647,9 +710,21 @@ declare class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
647
710
  Warnings from the model provider (e.g. unsupported settings)
648
711
  */
649
712
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
650
- constructor({ stream, warnings, }: {
713
+ /**
714
+ Optional raw response data.
715
+ */
716
+ rawResponse?: {
717
+ /**
718
+ Response headers.
719
+ */
720
+ headers?: Record<string, string>;
721
+ };
722
+ constructor({ stream, warnings, rawResponse, }: {
651
723
  stream: ReadableStream<TextStreamPart<TOOLS>>;
652
724
  warnings: LanguageModelV1CallWarning[] | undefined;
725
+ rawResponse?: {
726
+ headers?: Record<string, string>;
727
+ };
653
728
  });
654
729
  /**
655
730
  A text stream that returns only the generated text deltas. You can use it
@@ -1795,4 +1870,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
1795
1870
  status?: number;
1796
1871
  }): void;
1797
1872
 
1798
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataContent, DataMessage, DeepPartial, ErrorStreamPart, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamToResponse, tool, trimStartOfStreamHelper };
1873
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamToResponse, tool, trimStartOfStreamHelper };
package/dist/index.js CHANGED
@@ -450,6 +450,8 @@ async function experimental_generateObject({
450
450
  let finishReason;
451
451
  let usage;
452
452
  let warnings;
453
+ let rawResponse;
454
+ let logprobs;
453
455
  switch (mode) {
454
456
  case "json": {
455
457
  const validatedPrompt = getValidatedPrompt({
@@ -473,6 +475,8 @@ async function experimental_generateObject({
473
475
  finishReason = generateResult.finishReason;
474
476
  usage = generateResult.usage;
475
477
  warnings = generateResult.warnings;
478
+ rawResponse = generateResult.rawResponse;
479
+ logprobs = generateResult.logprobs;
476
480
  break;
477
481
  }
478
482
  case "grammar": {
@@ -497,6 +501,8 @@ async function experimental_generateObject({
497
501
  finishReason = generateResult.finishReason;
498
502
  usage = generateResult.usage;
499
503
  warnings = generateResult.warnings;
504
+ rawResponse = generateResult.rawResponse;
505
+ logprobs = generateResult.logprobs;
500
506
  break;
501
507
  }
502
508
  case "tool": {
@@ -530,6 +536,8 @@ async function experimental_generateObject({
530
536
  finishReason = generateResult.finishReason;
531
537
  usage = generateResult.usage;
532
538
  warnings = generateResult.warnings;
539
+ rawResponse = generateResult.rawResponse;
540
+ logprobs = generateResult.logprobs;
533
541
  break;
534
542
  }
535
543
  case void 0: {
@@ -548,7 +556,9 @@ async function experimental_generateObject({
548
556
  object: parseResult.value,
549
557
  finishReason,
550
558
  usage: calculateTokenUsage(usage),
551
- warnings
559
+ warnings,
560
+ rawResponse,
561
+ logprobs
552
562
  });
553
563
  }
554
564
  var GenerateObjectResult = class {
@@ -557,6 +567,8 @@ var GenerateObjectResult = class {
557
567
  this.finishReason = options.finishReason;
558
568
  this.usage = options.usage;
559
569
  this.warnings = options.warnings;
570
+ this.rawResponse = options.rawResponse;
571
+ this.logprobs = options.logprobs;
560
572
  }
561
573
  };
562
574
 
@@ -988,6 +1000,7 @@ async function experimental_streamObject({
988
1000
  case "text-delta":
989
1001
  controller.enqueue(chunk.textDelta);
990
1002
  break;
1003
+ case "finish":
991
1004
  case "error":
992
1005
  controller.enqueue(chunk);
993
1006
  break;
@@ -1015,6 +1028,7 @@ async function experimental_streamObject({
1015
1028
  case "text-delta":
1016
1029
  controller.enqueue(chunk.textDelta);
1017
1030
  break;
1031
+ case "finish":
1018
1032
  case "error":
1019
1033
  controller.enqueue(chunk);
1020
1034
  break;
@@ -1050,6 +1064,7 @@ async function experimental_streamObject({
1050
1064
  case "tool-call-delta":
1051
1065
  controller.enqueue(chunk.argsTextDelta);
1052
1066
  break;
1067
+ case "finish":
1053
1068
  case "error":
1054
1069
  controller.enqueue(chunk);
1055
1070
  break;
@@ -1069,16 +1084,19 @@ async function experimental_streamObject({
1069
1084
  const result = await retry(() => model.doStream(callOptions));
1070
1085
  return new StreamObjectResult({
1071
1086
  stream: result.stream.pipeThrough(new TransformStream(transformer)),
1072
- warnings: result.warnings
1087
+ warnings: result.warnings,
1088
+ rawResponse: result.rawResponse
1073
1089
  });
1074
1090
  }
1075
1091
  var StreamObjectResult = class {
1076
1092
  constructor({
1077
1093
  stream,
1078
- warnings
1094
+ warnings,
1095
+ rawResponse
1079
1096
  }) {
1080
1097
  this.originalStream = stream;
1081
1098
  this.warnings = warnings;
1099
+ this.rawResponse = rawResponse;
1082
1100
  }
1083
1101
  get partialObjectStream() {
1084
1102
  let accumulatedText = "";
@@ -1094,13 +1112,42 @@ var StreamObjectResult = class {
1094
1112
  latestObject = currentObject;
1095
1113
  controller.enqueue(currentObject);
1096
1114
  }
1097
- }
1098
- if (typeof chunk === "object" && chunk.type === "error") {
1115
+ } else if (chunk.type === "error") {
1099
1116
  throw chunk.error;
1100
1117
  }
1101
1118
  }
1102
1119
  });
1103
1120
  }
1121
+ get fullStream() {
1122
+ let accumulatedText = "";
1123
+ let latestObject = void 0;
1124
+ return createAsyncIterableStream(this.originalStream, {
1125
+ transform(chunk, controller) {
1126
+ if (typeof chunk === "string") {
1127
+ accumulatedText += chunk;
1128
+ const currentObject = parsePartialJson(
1129
+ accumulatedText
1130
+ );
1131
+ if (!isDeepEqualData(latestObject, currentObject)) {
1132
+ latestObject = currentObject;
1133
+ controller.enqueue({ type: "object", object: currentObject });
1134
+ }
1135
+ } else {
1136
+ switch (chunk.type) {
1137
+ case "finish":
1138
+ controller.enqueue({
1139
+ ...chunk,
1140
+ usage: calculateTokenUsage(chunk.usage)
1141
+ });
1142
+ break;
1143
+ default:
1144
+ controller.enqueue(chunk);
1145
+ break;
1146
+ }
1147
+ }
1148
+ }
1149
+ });
1150
+ }
1104
1151
  };
1105
1152
 
1106
1153
  // core/generate-text/tool-call.ts
@@ -1185,7 +1232,9 @@ async function experimental_generateText({
1185
1232
  toolResults,
1186
1233
  finishReason: modelResponse.finishReason,
1187
1234
  usage: calculateTokenUsage(modelResponse.usage),
1188
- warnings: modelResponse.warnings
1235
+ warnings: modelResponse.warnings,
1236
+ rawResponse: modelResponse.rawResponse,
1237
+ logprobs: modelResponse.logprobs
1189
1238
  });
1190
1239
  }
1191
1240
  async function executeTools({
@@ -1219,6 +1268,8 @@ var GenerateTextResult = class {
1219
1268
  this.finishReason = options.finishReason;
1220
1269
  this.usage = options.usage;
1221
1270
  this.warnings = options.warnings;
1271
+ this.rawResponse = options.rawResponse;
1272
+ this.logprobs = options.logprobs;
1222
1273
  }
1223
1274
  };
1224
1275
 
@@ -1319,11 +1370,8 @@ function runToolsTransformation({
1319
1370
  controller.enqueue({
1320
1371
  type: "finish",
1321
1372
  finishReason: chunk.finishReason,
1322
- usage: {
1323
- promptTokens: chunk.usage.promptTokens,
1324
- completionTokens: chunk.usage.completionTokens,
1325
- totalTokens: chunk.usage.promptTokens + chunk.usage.completionTokens
1326
- }
1373
+ logprobs: chunk.logprobs,
1374
+ usage: calculateTokenUsage(chunk.usage)
1327
1375
  });
1328
1376
  break;
1329
1377
  }
@@ -1381,7 +1429,7 @@ async function experimental_streamText({
1381
1429
  }) {
1382
1430
  const retry = retryWithExponentialBackoff({ maxRetries });
1383
1431
  const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1384
- const { stream, warnings } = await retry(
1432
+ const { stream, warnings, rawResponse } = await retry(
1385
1433
  () => model.doStream({
1386
1434
  mode: {
1387
1435
  type: "regular",
@@ -1403,16 +1451,19 @@ async function experimental_streamText({
1403
1451
  tools,
1404
1452
  generatorStream: stream
1405
1453
  }),
1406
- warnings
1454
+ warnings,
1455
+ rawResponse
1407
1456
  });
1408
1457
  }
1409
1458
  var StreamTextResult = class {
1410
1459
  constructor({
1411
1460
  stream,
1412
- warnings
1461
+ warnings,
1462
+ rawResponse
1413
1463
  }) {
1414
1464
  this.originalStream = stream;
1415
1465
  this.warnings = warnings;
1466
+ this.rawResponse = rawResponse;
1416
1467
  }
1417
1468
  /**
1418
1469
  A text stream that returns only the generated text deltas. You can use it