@ai-sdk/provider 0.0.12 → 0.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +29 -4
- package/dist/index.d.ts +29 -4
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
@@ -488,12 +488,25 @@ type LanguageModelV1CallSettings = {
|
|
488
488
|
*/
|
489
489
|
temperature?: number;
|
490
490
|
/**
|
491
|
+
Stop sequences.
|
492
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
493
|
+
Providers may have limits on the number of stop sequences.
|
494
|
+
*/
|
495
|
+
stopSequences?: string[];
|
496
|
+
/**
|
491
497
|
Nucleus sampling.
|
492
498
|
|
493
499
|
It is recommended to set either `temperature` or `topP`, but not both.
|
494
500
|
*/
|
495
501
|
topP?: number;
|
496
502
|
/**
|
503
|
+
Only sample from the top K options for each subsequent token.
|
504
|
+
|
505
|
+
Used to remove "long tail" low probability responses.
|
506
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
507
|
+
*/
|
508
|
+
topK?: number;
|
509
|
+
/**
|
497
510
|
Presence penalty setting. It affects the likelihood of the model to
|
498
511
|
repeat information that is already in the prompt.
|
499
512
|
*/
|
@@ -504,6 +517,17 @@ type LanguageModelV1CallSettings = {
|
|
504
517
|
*/
|
505
518
|
frequencyPenalty?: number;
|
506
519
|
/**
|
520
|
+
Response format. The output can either be text or JSON. Default is text.
|
521
|
+
|
522
|
+
If JSON is selected, a schema can optionally be provided to guide the LLM.
|
523
|
+
*/
|
524
|
+
responseFormat?: {
|
525
|
+
type: 'text';
|
526
|
+
} | {
|
527
|
+
type: 'json';
|
528
|
+
schema?: JSONSchema7;
|
529
|
+
};
|
530
|
+
/**
|
507
531
|
The seed (integer) to use for random sampling. If set and supported
|
508
532
|
by the model, calls will generate deterministic results.
|
509
533
|
*/
|
@@ -633,6 +657,9 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
|
|
633
657
|
low level grammar, etc. It can also be used to optimize the efficiency of the
|
634
658
|
streaming, e.g. tool-delta stream parts are only needed in the
|
635
659
|
object-tool mode.
|
660
|
+
|
661
|
+
@deprecated mode will be removed in v2.
|
662
|
+
All necessary settings will be directly supported through the call settings.
|
636
663
|
*/
|
637
664
|
mode: {
|
638
665
|
type: 'regular';
|
@@ -646,9 +673,6 @@ Specifies how the tool should be selected. Defaults to 'auto'.
|
|
646
673
|
toolChoice?: LanguageModelV1ToolChoice;
|
647
674
|
} | {
|
648
675
|
type: 'object-json';
|
649
|
-
} | {
|
650
|
-
type: 'object-grammar';
|
651
|
-
schema: JSONSchema7;
|
652
676
|
} | {
|
653
677
|
type: 'object-tool';
|
654
678
|
tool: LanguageModelV1FunctionTool;
|
@@ -671,6 +695,7 @@ some settings might not be supported, which can lead to suboptimal results.
|
|
671
695
|
type LanguageModelV1CallWarning = {
|
672
696
|
type: 'unsupported-setting';
|
673
697
|
setting: keyof LanguageModelV1CallSettings;
|
698
|
+
details?: string;
|
674
699
|
} | {
|
675
700
|
type: 'other';
|
676
701
|
message: string;
|
@@ -741,7 +766,7 @@ type LanguageModelV1 = {
|
|
741
766
|
This is needed to generate the best objects possible w/o requiring the
|
742
767
|
user to explicitly specify the object generation mode.
|
743
768
|
*/
|
744
|
-
readonly defaultObjectGenerationMode: 'json' | 'tool' |
|
769
|
+
readonly defaultObjectGenerationMode: 'json' | 'tool' | undefined;
|
745
770
|
/**
|
746
771
|
Generates a language model output (non-streaming).
|
747
772
|
|
package/dist/index.d.ts
CHANGED
@@ -488,12 +488,25 @@ type LanguageModelV1CallSettings = {
|
|
488
488
|
*/
|
489
489
|
temperature?: number;
|
490
490
|
/**
|
491
|
+
Stop sequences.
|
492
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
493
|
+
Providers may have limits on the number of stop sequences.
|
494
|
+
*/
|
495
|
+
stopSequences?: string[];
|
496
|
+
/**
|
491
497
|
Nucleus sampling.
|
492
498
|
|
493
499
|
It is recommended to set either `temperature` or `topP`, but not both.
|
494
500
|
*/
|
495
501
|
topP?: number;
|
496
502
|
/**
|
503
|
+
Only sample from the top K options for each subsequent token.
|
504
|
+
|
505
|
+
Used to remove "long tail" low probability responses.
|
506
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
507
|
+
*/
|
508
|
+
topK?: number;
|
509
|
+
/**
|
497
510
|
Presence penalty setting. It affects the likelihood of the model to
|
498
511
|
repeat information that is already in the prompt.
|
499
512
|
*/
|
@@ -504,6 +517,17 @@ type LanguageModelV1CallSettings = {
|
|
504
517
|
*/
|
505
518
|
frequencyPenalty?: number;
|
506
519
|
/**
|
520
|
+
Response format. The output can either be text or JSON. Default is text.
|
521
|
+
|
522
|
+
If JSON is selected, a schema can optionally be provided to guide the LLM.
|
523
|
+
*/
|
524
|
+
responseFormat?: {
|
525
|
+
type: 'text';
|
526
|
+
} | {
|
527
|
+
type: 'json';
|
528
|
+
schema?: JSONSchema7;
|
529
|
+
};
|
530
|
+
/**
|
507
531
|
The seed (integer) to use for random sampling. If set and supported
|
508
532
|
by the model, calls will generate deterministic results.
|
509
533
|
*/
|
@@ -633,6 +657,9 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
|
|
633
657
|
low level grammar, etc. It can also be used to optimize the efficiency of the
|
634
658
|
streaming, e.g. tool-delta stream parts are only needed in the
|
635
659
|
object-tool mode.
|
660
|
+
|
661
|
+
@deprecated mode will be removed in v2.
|
662
|
+
All necessary settings will be directly supported through the call settings.
|
636
663
|
*/
|
637
664
|
mode: {
|
638
665
|
type: 'regular';
|
@@ -646,9 +673,6 @@ Specifies how the tool should be selected. Defaults to 'auto'.
|
|
646
673
|
toolChoice?: LanguageModelV1ToolChoice;
|
647
674
|
} | {
|
648
675
|
type: 'object-json';
|
649
|
-
} | {
|
650
|
-
type: 'object-grammar';
|
651
|
-
schema: JSONSchema7;
|
652
676
|
} | {
|
653
677
|
type: 'object-tool';
|
654
678
|
tool: LanguageModelV1FunctionTool;
|
@@ -671,6 +695,7 @@ some settings might not be supported, which can lead to suboptimal results.
|
|
671
695
|
type LanguageModelV1CallWarning = {
|
672
696
|
type: 'unsupported-setting';
|
673
697
|
setting: keyof LanguageModelV1CallSettings;
|
698
|
+
details?: string;
|
674
699
|
} | {
|
675
700
|
type: 'other';
|
676
701
|
message: string;
|
@@ -741,7 +766,7 @@ type LanguageModelV1 = {
|
|
741
766
|
This is needed to generate the best objects possible w/o requiring the
|
742
767
|
user to explicitly specify the object generation mode.
|
743
768
|
*/
|
744
|
-
readonly defaultObjectGenerationMode: 'json' | 'tool' |
|
769
|
+
readonly defaultObjectGenerationMode: 'json' | 'tool' | undefined;
|
745
770
|
/**
|
746
771
|
Generates a language model output (non-streaming).
|
747
772
|
|