ai 3.0.26 → 3.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,4 +1,4 @@
1
- import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1CallWarning } from '@ai-sdk/provider';
1
+ import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1CallWarning, LanguageModelV1LogProbs } from '@ai-sdk/provider';
2
2
  import { z } from 'zod';
3
3
  import { ServerResponse } from 'node:http';
4
4
  import { AssistantStream } from 'openai/lib/AssistantStream';
@@ -230,19 +230,17 @@ This function does not stream the output. If you want to stream the output, use
230
230
 
231
231
  @param maxTokens - Maximum number of tokens to generate.
232
232
  @param temperature - Temperature setting.
233
- This is a number between 0 (almost no randomness) and 1 (very random).
233
+ The value is passed through to the provider. The range depends on the provider and model.
234
234
  It is recommended to set either `temperature` or `topP`, but not both.
235
- @param topP - Nucleus sampling. This is a number between 0 and 1.
236
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
235
+ @param topP - Nucleus sampling.
236
+ The value is passed through to the provider. The range depends on the provider and model.
237
237
  It is recommended to set either `temperature` or `topP`, but not both.
238
238
  @param presencePenalty - Presence penalty setting.
239
239
  It affects the likelihood of the model to repeat information that is already in the prompt.
240
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
241
- 0 means no penalty.
240
+ The value is passed through to the provider. The range depends on the provider and model.
242
241
  @param frequencyPenalty - Frequency penalty setting.
243
242
  It affects the likelihood of the model to repeatedly use the same words or phrases.
244
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
245
- 0 means no penalty.
243
+ The value is passed through to the provider. The range depends on the provider and model.
246
244
  @param seed - The seed (integer) to use for random sampling.
247
245
  If set and supported by the model, calls will generate deterministic results.
248
246
 
@@ -288,11 +286,29 @@ declare class GenerateObjectResult<T> {
288
286
  Warnings from the model provider (e.g. unsupported settings)
289
287
  */
290
288
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
289
+ /**
290
+ Optional raw response data.
291
+ */
292
+ rawResponse?: {
293
+ /**
294
+ Response headers.
295
+ */
296
+ headers?: Record<string, string>;
297
+ };
298
+ /**
299
+ Logprobs for the completion.
300
+ `undefined` if the mode does not support logprobs or if was not enabled
301
+ */
302
+ readonly logprobs: LanguageModelV1LogProbs | undefined;
291
303
  constructor(options: {
292
304
  object: T;
293
305
  finishReason: LanguageModelV1FinishReason;
294
306
  usage: TokenUsage;
295
307
  warnings: LanguageModelV1CallWarning[] | undefined;
308
+ rawResponse?: {
309
+ headers?: Record<string, string>;
310
+ };
311
+ logprobs: LanguageModelV1LogProbs | undefined;
296
312
  });
297
313
  }
298
314
 
@@ -330,19 +346,17 @@ This function streams the output. If you do not want to stream the output, use `
330
346
 
331
347
  @param maxTokens - Maximum number of tokens to generate.
332
348
  @param temperature - Temperature setting.
333
- This is a number between 0 (almost no randomness) and 1 (very random).
349
+ The value is passed through to the provider. The range depends on the provider and model.
334
350
  It is recommended to set either `temperature` or `topP`, but not both.
335
- @param topP - Nucleus sampling. This is a number between 0 and 1.
336
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
351
+ @param topP - Nucleus sampling.
352
+ The value is passed through to the provider. The range depends on the provider and model.
337
353
  It is recommended to set either `temperature` or `topP`, but not both.
338
354
  @param presencePenalty - Presence penalty setting.
339
355
  It affects the likelihood of the model to repeat information that is already in the prompt.
340
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
341
- 0 means no penalty.
356
+ The value is passed through to the provider. The range depends on the provider and model.
342
357
  @param frequencyPenalty - Frequency penalty setting.
343
358
  It affects the likelihood of the model to repeatedly use the same words or phrases.
344
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
345
- 0 means no penalty.
359
+ The value is passed through to the provider. The range depends on the provider and model.
346
360
  @param seed - The seed (integer) to use for random sampling.
347
361
  If set and supported by the model, calls will generate deterministic results.
348
362
 
@@ -368,6 +382,23 @@ Default and recommended: 'auto' (best mode for the model).
368
382
  */
369
383
  mode?: 'auto' | 'json' | 'tool' | 'grammar';
370
384
  }): Promise<StreamObjectResult<T>>;
385
+ type ObjectStreamPartInput = {
386
+ type: 'error';
387
+ error: unknown;
388
+ } | {
389
+ type: 'finish';
390
+ finishReason: LanguageModelV1FinishReason;
391
+ logprobs?: LanguageModelV1LogProbs;
392
+ usage: {
393
+ promptTokens: number;
394
+ completionTokens: number;
395
+ totalTokens: number;
396
+ };
397
+ };
398
+ type ObjectStreamPart<T> = ObjectStreamPartInput | {
399
+ type: 'object';
400
+ object: DeepPartial<T>;
401
+ };
371
402
  /**
372
403
  The result of a `streamObject` call that contains the partial object stream and additional information.
373
404
  */
@@ -377,16 +408,25 @@ declare class StreamObjectResult<T> {
377
408
  Warnings from the model provider (e.g. unsupported settings)
378
409
  */
379
410
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
380
- constructor({ stream, warnings, }: {
381
- stream: ReadableStream<string | ErrorStreamPart>;
411
+ /**
412
+ Optional raw response data.
413
+ */
414
+ rawResponse?: {
415
+ /**
416
+ Response headers.
417
+ */
418
+ headers?: Record<string, string>;
419
+ };
420
+ constructor({ stream, warnings, rawResponse, }: {
421
+ stream: ReadableStream<string | ObjectStreamPartInput>;
382
422
  warnings: LanguageModelV1CallWarning[] | undefined;
423
+ rawResponse?: {
424
+ headers?: Record<string, string>;
425
+ };
383
426
  });
384
427
  get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
428
+ get fullStream(): AsyncIterableStream<ObjectStreamPart<T>>;
385
429
  }
386
- type ErrorStreamPart = {
387
- type: 'error';
388
- error: unknown;
389
- };
390
430
 
391
431
  /**
392
432
  A tool contains the description and the schema of the input that the tool expects.
@@ -510,19 +550,17 @@ This function does not stream the output. If you want to stream the output, use
510
550
 
511
551
  @param maxTokens - Maximum number of tokens to generate.
512
552
  @param temperature - Temperature setting.
513
- This is a number between 0 (almost no randomness) and 1 (very random).
553
+ The value is passed through to the provider. The range depends on the provider and model.
514
554
  It is recommended to set either `temperature` or `topP`, but not both.
515
- @param topP - Nucleus sampling. This is a number between 0 and 1.
516
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
555
+ @param topP - Nucleus sampling.
556
+ The value is passed through to the provider. The range depends on the provider and model.
517
557
  It is recommended to set either `temperature` or `topP`, but not both.
518
558
  @param presencePenalty - Presence penalty setting.
519
559
  It affects the likelihood of the model to repeat information that is already in the prompt.
520
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
521
- 0 means no penalty.
560
+ The value is passed through to the provider. The range depends on the provider and model.
522
561
  @param frequencyPenalty - Frequency penalty setting.
523
562
  It affects the likelihood of the model to repeatedly use the same words or phrases.
524
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
525
- 0 means no penalty.
563
+ The value is passed through to the provider. The range depends on the provider and model.
526
564
  @param seed - The seed (integer) to use for random sampling.
527
565
  If set and supported by the model, calls will generate deterministic results.
528
566
 
@@ -571,6 +609,20 @@ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>>
571
609
  Warnings from the model provider (e.g. unsupported settings)
572
610
  */
573
611
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
612
+ /**
613
+ Optional raw response data.
614
+ */
615
+ rawResponse?: {
616
+ /**
617
+ Response headers.
618
+ */
619
+ headers?: Record<string, string>;
620
+ };
621
+ /**
622
+ Logprobs for the completion.
623
+ `undefined` if the mode does not support logprobs or if was not enabled
624
+ */
625
+ readonly logprobs: LanguageModelV1LogProbs | undefined;
574
626
  constructor(options: {
575
627
  text: string;
576
628
  toolCalls: ToToolCallArray<TOOLS>;
@@ -578,6 +630,10 @@ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>>
578
630
  finishReason: LanguageModelV1FinishReason;
579
631
  usage: TokenUsage;
580
632
  warnings: LanguageModelV1CallWarning[] | undefined;
633
+ rawResponse?: {
634
+ headers?: Record<string, string>;
635
+ };
636
+ logprobs: LanguageModelV1LogProbs | undefined;
581
637
  });
582
638
  }
583
639
 
@@ -595,19 +651,17 @@ This function streams the output. If you do not want to stream the output, use `
595
651
 
596
652
  @param maxTokens - Maximum number of tokens to generate.
597
653
  @param temperature - Temperature setting.
598
- This is a number between 0 (almost no randomness) and 1 (very random).
654
+ The value is passed through to the provider. The range depends on the provider and model.
599
655
  It is recommended to set either `temperature` or `topP`, but not both.
600
- @param topP - Nucleus sampling. This is a number between 0 and 1.
601
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
656
+ @param topP - Nucleus sampling.
657
+ The value is passed through to the provider. The range depends on the provider and model.
602
658
  It is recommended to set either `temperature` or `topP`, but not both.
603
659
  @param presencePenalty - Presence penalty setting.
604
660
  It affects the likelihood of the model to repeat information that is already in the prompt.
605
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
606
- 0 means no penalty.
661
+ The value is passed through to the provider. The range depends on the provider and model.
607
662
  @param frequencyPenalty - Frequency penalty setting.
608
663
  It affects the likelihood of the model to repeatedly use the same words or phrases.
609
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
610
- 0 means no penalty.
664
+ The value is passed through to the provider. The range depends on the provider and model.
611
665
  @param seed - The seed (integer) to use for random sampling.
612
666
  If set and supported by the model, calls will generate deterministic results.
613
667
 
@@ -640,6 +694,7 @@ type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> = {
640
694
  } & ToToolResult<TOOLS>) | {
641
695
  type: 'finish';
642
696
  finishReason: LanguageModelV1FinishReason;
697
+ logprobs?: LanguageModelV1LogProbs;
643
698
  usage: {
644
699
  promptTokens: number;
645
700
  completionTokens: number;
@@ -655,9 +710,21 @@ declare class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
655
710
  Warnings from the model provider (e.g. unsupported settings)
656
711
  */
657
712
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
658
- constructor({ stream, warnings, }: {
713
+ /**
714
+ Optional raw response data.
715
+ */
716
+ rawResponse?: {
717
+ /**
718
+ Response headers.
719
+ */
720
+ headers?: Record<string, string>;
721
+ };
722
+ constructor({ stream, warnings, rawResponse, }: {
659
723
  stream: ReadableStream<TextStreamPart<TOOLS>>;
660
724
  warnings: LanguageModelV1CallWarning[] | undefined;
725
+ rawResponse?: {
726
+ headers?: Record<string, string>;
727
+ };
661
728
  });
662
729
  /**
663
730
  A text stream that returns only the generated text deltas. You can use it
@@ -1803,4 +1870,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
1803
1870
  status?: number;
1804
1871
  }): void;
1805
1872
 
1806
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataContent, DataMessage, DeepPartial, ErrorStreamPart, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamToResponse, tool, trimStartOfStreamHelper };
1873
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamToResponse, tool, trimStartOfStreamHelper };
package/dist/index.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1CallWarning } from '@ai-sdk/provider';
1
+ import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1CallWarning, LanguageModelV1LogProbs } from '@ai-sdk/provider';
2
2
  import { z } from 'zod';
3
3
  import { ServerResponse } from 'node:http';
4
4
  import { AssistantStream } from 'openai/lib/AssistantStream';
@@ -230,19 +230,17 @@ This function does not stream the output. If you want to stream the output, use
230
230
 
231
231
  @param maxTokens - Maximum number of tokens to generate.
232
232
  @param temperature - Temperature setting.
233
- This is a number between 0 (almost no randomness) and 1 (very random).
233
+ The value is passed through to the provider. The range depends on the provider and model.
234
234
  It is recommended to set either `temperature` or `topP`, but not both.
235
- @param topP - Nucleus sampling. This is a number between 0 and 1.
236
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
235
+ @param topP - Nucleus sampling.
236
+ The value is passed through to the provider. The range depends on the provider and model.
237
237
  It is recommended to set either `temperature` or `topP`, but not both.
238
238
  @param presencePenalty - Presence penalty setting.
239
239
  It affects the likelihood of the model to repeat information that is already in the prompt.
240
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
241
- 0 means no penalty.
240
+ The value is passed through to the provider. The range depends on the provider and model.
242
241
  @param frequencyPenalty - Frequency penalty setting.
243
242
  It affects the likelihood of the model to repeatedly use the same words or phrases.
244
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
245
- 0 means no penalty.
243
+ The value is passed through to the provider. The range depends on the provider and model.
246
244
  @param seed - The seed (integer) to use for random sampling.
247
245
  If set and supported by the model, calls will generate deterministic results.
248
246
 
@@ -288,11 +286,29 @@ declare class GenerateObjectResult<T> {
288
286
  Warnings from the model provider (e.g. unsupported settings)
289
287
  */
290
288
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
289
+ /**
290
+ Optional raw response data.
291
+ */
292
+ rawResponse?: {
293
+ /**
294
+ Response headers.
295
+ */
296
+ headers?: Record<string, string>;
297
+ };
298
+ /**
299
+ Logprobs for the completion.
300
+ `undefined` if the mode does not support logprobs or if was not enabled
301
+ */
302
+ readonly logprobs: LanguageModelV1LogProbs | undefined;
291
303
  constructor(options: {
292
304
  object: T;
293
305
  finishReason: LanguageModelV1FinishReason;
294
306
  usage: TokenUsage;
295
307
  warnings: LanguageModelV1CallWarning[] | undefined;
308
+ rawResponse?: {
309
+ headers?: Record<string, string>;
310
+ };
311
+ logprobs: LanguageModelV1LogProbs | undefined;
296
312
  });
297
313
  }
298
314
 
@@ -330,19 +346,17 @@ This function streams the output. If you do not want to stream the output, use `
330
346
 
331
347
  @param maxTokens - Maximum number of tokens to generate.
332
348
  @param temperature - Temperature setting.
333
- This is a number between 0 (almost no randomness) and 1 (very random).
349
+ The value is passed through to the provider. The range depends on the provider and model.
334
350
  It is recommended to set either `temperature` or `topP`, but not both.
335
- @param topP - Nucleus sampling. This is a number between 0 and 1.
336
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
351
+ @param topP - Nucleus sampling.
352
+ The value is passed through to the provider. The range depends on the provider and model.
337
353
  It is recommended to set either `temperature` or `topP`, but not both.
338
354
  @param presencePenalty - Presence penalty setting.
339
355
  It affects the likelihood of the model to repeat information that is already in the prompt.
340
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
341
- 0 means no penalty.
356
+ The value is passed through to the provider. The range depends on the provider and model.
342
357
  @param frequencyPenalty - Frequency penalty setting.
343
358
  It affects the likelihood of the model to repeatedly use the same words or phrases.
344
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
345
- 0 means no penalty.
359
+ The value is passed through to the provider. The range depends on the provider and model.
346
360
  @param seed - The seed (integer) to use for random sampling.
347
361
  If set and supported by the model, calls will generate deterministic results.
348
362
 
@@ -368,6 +382,23 @@ Default and recommended: 'auto' (best mode for the model).
368
382
  */
369
383
  mode?: 'auto' | 'json' | 'tool' | 'grammar';
370
384
  }): Promise<StreamObjectResult<T>>;
385
+ type ObjectStreamPartInput = {
386
+ type: 'error';
387
+ error: unknown;
388
+ } | {
389
+ type: 'finish';
390
+ finishReason: LanguageModelV1FinishReason;
391
+ logprobs?: LanguageModelV1LogProbs;
392
+ usage: {
393
+ promptTokens: number;
394
+ completionTokens: number;
395
+ totalTokens: number;
396
+ };
397
+ };
398
+ type ObjectStreamPart<T> = ObjectStreamPartInput | {
399
+ type: 'object';
400
+ object: DeepPartial<T>;
401
+ };
371
402
  /**
372
403
  The result of a `streamObject` call that contains the partial object stream and additional information.
373
404
  */
@@ -377,16 +408,25 @@ declare class StreamObjectResult<T> {
377
408
  Warnings from the model provider (e.g. unsupported settings)
378
409
  */
379
410
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
380
- constructor({ stream, warnings, }: {
381
- stream: ReadableStream<string | ErrorStreamPart>;
411
+ /**
412
+ Optional raw response data.
413
+ */
414
+ rawResponse?: {
415
+ /**
416
+ Response headers.
417
+ */
418
+ headers?: Record<string, string>;
419
+ };
420
+ constructor({ stream, warnings, rawResponse, }: {
421
+ stream: ReadableStream<string | ObjectStreamPartInput>;
382
422
  warnings: LanguageModelV1CallWarning[] | undefined;
423
+ rawResponse?: {
424
+ headers?: Record<string, string>;
425
+ };
383
426
  });
384
427
  get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
428
+ get fullStream(): AsyncIterableStream<ObjectStreamPart<T>>;
385
429
  }
386
- type ErrorStreamPart = {
387
- type: 'error';
388
- error: unknown;
389
- };
390
430
 
391
431
  /**
392
432
  A tool contains the description and the schema of the input that the tool expects.
@@ -510,19 +550,17 @@ This function does not stream the output. If you want to stream the output, use
510
550
 
511
551
  @param maxTokens - Maximum number of tokens to generate.
512
552
  @param temperature - Temperature setting.
513
- This is a number between 0 (almost no randomness) and 1 (very random).
553
+ The value is passed through to the provider. The range depends on the provider and model.
514
554
  It is recommended to set either `temperature` or `topP`, but not both.
515
- @param topP - Nucleus sampling. This is a number between 0 and 1.
516
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
555
+ @param topP - Nucleus sampling.
556
+ The value is passed through to the provider. The range depends on the provider and model.
517
557
  It is recommended to set either `temperature` or `topP`, but not both.
518
558
  @param presencePenalty - Presence penalty setting.
519
559
  It affects the likelihood of the model to repeat information that is already in the prompt.
520
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
521
- 0 means no penalty.
560
+ The value is passed through to the provider. The range depends on the provider and model.
522
561
  @param frequencyPenalty - Frequency penalty setting.
523
562
  It affects the likelihood of the model to repeatedly use the same words or phrases.
524
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
525
- 0 means no penalty.
563
+ The value is passed through to the provider. The range depends on the provider and model.
526
564
  @param seed - The seed (integer) to use for random sampling.
527
565
  If set and supported by the model, calls will generate deterministic results.
528
566
 
@@ -571,6 +609,20 @@ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>>
571
609
  Warnings from the model provider (e.g. unsupported settings)
572
610
  */
573
611
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
612
+ /**
613
+ Optional raw response data.
614
+ */
615
+ rawResponse?: {
616
+ /**
617
+ Response headers.
618
+ */
619
+ headers?: Record<string, string>;
620
+ };
621
+ /**
622
+ Logprobs for the completion.
623
+ `undefined` if the mode does not support logprobs or if was not enabled
624
+ */
625
+ readonly logprobs: LanguageModelV1LogProbs | undefined;
574
626
  constructor(options: {
575
627
  text: string;
576
628
  toolCalls: ToToolCallArray<TOOLS>;
@@ -578,6 +630,10 @@ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>>
578
630
  finishReason: LanguageModelV1FinishReason;
579
631
  usage: TokenUsage;
580
632
  warnings: LanguageModelV1CallWarning[] | undefined;
633
+ rawResponse?: {
634
+ headers?: Record<string, string>;
635
+ };
636
+ logprobs: LanguageModelV1LogProbs | undefined;
581
637
  });
582
638
  }
583
639
 
@@ -595,19 +651,17 @@ This function streams the output. If you do not want to stream the output, use `
595
651
 
596
652
  @param maxTokens - Maximum number of tokens to generate.
597
653
  @param temperature - Temperature setting.
598
- This is a number between 0 (almost no randomness) and 1 (very random).
654
+ The value is passed through to the provider. The range depends on the provider and model.
599
655
  It is recommended to set either `temperature` or `topP`, but not both.
600
- @param topP - Nucleus sampling. This is a number between 0 and 1.
601
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
656
+ @param topP - Nucleus sampling.
657
+ The value is passed through to the provider. The range depends on the provider and model.
602
658
  It is recommended to set either `temperature` or `topP`, but not both.
603
659
  @param presencePenalty - Presence penalty setting.
604
660
  It affects the likelihood of the model to repeat information that is already in the prompt.
605
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
606
- 0 means no penalty.
661
+ The value is passed through to the provider. The range depends on the provider and model.
607
662
  @param frequencyPenalty - Frequency penalty setting.
608
663
  It affects the likelihood of the model to repeatedly use the same words or phrases.
609
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
610
- 0 means no penalty.
664
+ The value is passed through to the provider. The range depends on the provider and model.
611
665
  @param seed - The seed (integer) to use for random sampling.
612
666
  If set and supported by the model, calls will generate deterministic results.
613
667
 
@@ -640,6 +694,7 @@ type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> = {
640
694
  } & ToToolResult<TOOLS>) | {
641
695
  type: 'finish';
642
696
  finishReason: LanguageModelV1FinishReason;
697
+ logprobs?: LanguageModelV1LogProbs;
643
698
  usage: {
644
699
  promptTokens: number;
645
700
  completionTokens: number;
@@ -655,9 +710,21 @@ declare class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
655
710
  Warnings from the model provider (e.g. unsupported settings)
656
711
  */
657
712
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
658
- constructor({ stream, warnings, }: {
713
+ /**
714
+ Optional raw response data.
715
+ */
716
+ rawResponse?: {
717
+ /**
718
+ Response headers.
719
+ */
720
+ headers?: Record<string, string>;
721
+ };
722
+ constructor({ stream, warnings, rawResponse, }: {
659
723
  stream: ReadableStream<TextStreamPart<TOOLS>>;
660
724
  warnings: LanguageModelV1CallWarning[] | undefined;
725
+ rawResponse?: {
726
+ headers?: Record<string, string>;
727
+ };
661
728
  });
662
729
  /**
663
730
  A text stream that returns only the generated text deltas. You can use it
@@ -1803,4 +1870,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
1803
1870
  status?: number;
1804
1871
  }): void;
1805
1872
 
1806
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataContent, DataMessage, DeepPartial, ErrorStreamPart, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamToResponse, tool, trimStartOfStreamHelper };
1873
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamToResponse, tool, trimStartOfStreamHelper };