@ai-sdk/provider 0.0.11 → 0.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +35 -4
- package/dist/index.d.ts +35 -4
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
@@ -65,6 +65,12 @@ type EmbeddingModelV1<VALUE> = {
|
|
65
65
|
*/
|
66
66
|
embeddings: Array<EmbeddingModelV1Embedding>;
|
67
67
|
/**
|
68
|
+
Token usage. We only have input tokens for embeddings.
|
69
|
+
*/
|
70
|
+
usage?: {
|
71
|
+
tokens: number;
|
72
|
+
};
|
73
|
+
/**
|
68
74
|
Optional raw response information for debugging purposes.
|
69
75
|
*/
|
70
76
|
rawResponse?: {
|
@@ -482,12 +488,25 @@ type LanguageModelV1CallSettings = {
|
|
482
488
|
*/
|
483
489
|
temperature?: number;
|
484
490
|
/**
|
491
|
+
Stop sequences.
|
492
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
493
|
+
Providers may have limits on the number of stop sequences.
|
494
|
+
*/
|
495
|
+
stopSequences?: string[];
|
496
|
+
/**
|
485
497
|
Nucleus sampling.
|
486
498
|
|
487
499
|
It is recommended to set either `temperature` or `topP`, but not both.
|
488
500
|
*/
|
489
501
|
topP?: number;
|
490
502
|
/**
|
503
|
+
Only sample from the top K options for each subsequent token.
|
504
|
+
|
505
|
+
Used to remove "long tail" low probability responses.
|
506
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
507
|
+
*/
|
508
|
+
topK?: number;
|
509
|
+
/**
|
491
510
|
Presence penalty setting. It affects the likelihood of the model to
|
492
511
|
repeat information that is already in the prompt.
|
493
512
|
*/
|
@@ -498,6 +517,17 @@ type LanguageModelV1CallSettings = {
|
|
498
517
|
*/
|
499
518
|
frequencyPenalty?: number;
|
500
519
|
/**
|
520
|
+
Response format. The output can either be text or JSON. Default is text.
|
521
|
+
|
522
|
+
If JSON is selected, a schema can optionally be provided to guide the LLM.
|
523
|
+
*/
|
524
|
+
responseFormat?: {
|
525
|
+
type: 'text';
|
526
|
+
} | {
|
527
|
+
type: 'json';
|
528
|
+
schema?: JSONSchema7;
|
529
|
+
};
|
530
|
+
/**
|
501
531
|
The seed (integer) to use for random sampling. If set and supported
|
502
532
|
by the model, calls will generate deterministic results.
|
503
533
|
*/
|
@@ -627,6 +657,9 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
|
|
627
657
|
low level grammar, etc. It can also be used to optimize the efficiency of the
|
628
658
|
streaming, e.g. tool-delta stream parts are only needed in the
|
629
659
|
object-tool mode.
|
660
|
+
|
661
|
+
@deprecated mode will be removed in v2.
|
662
|
+
All necessary settings will be directly supported through the call settings.
|
630
663
|
*/
|
631
664
|
mode: {
|
632
665
|
type: 'regular';
|
@@ -640,9 +673,6 @@ Specifies how the tool should be selected. Defaults to 'auto'.
|
|
640
673
|
toolChoice?: LanguageModelV1ToolChoice;
|
641
674
|
} | {
|
642
675
|
type: 'object-json';
|
643
|
-
} | {
|
644
|
-
type: 'object-grammar';
|
645
|
-
schema: JSONSchema7;
|
646
676
|
} | {
|
647
677
|
type: 'object-tool';
|
648
678
|
tool: LanguageModelV1FunctionTool;
|
@@ -665,6 +695,7 @@ some settings might not be supported, which can lead to suboptimal results.
|
|
665
695
|
type LanguageModelV1CallWarning = {
|
666
696
|
type: 'unsupported-setting';
|
667
697
|
setting: keyof LanguageModelV1CallSettings;
|
698
|
+
details?: string;
|
668
699
|
} | {
|
669
700
|
type: 'other';
|
670
701
|
message: string;
|
@@ -735,7 +766,7 @@ type LanguageModelV1 = {
|
|
735
766
|
This is needed to generate the best objects possible w/o requiring the
|
736
767
|
user to explicitly specify the object generation mode.
|
737
768
|
*/
|
738
|
-
readonly defaultObjectGenerationMode: 'json' | 'tool' |
|
769
|
+
readonly defaultObjectGenerationMode: 'json' | 'tool' | undefined;
|
739
770
|
/**
|
740
771
|
Generates a language model output (non-streaming).
|
741
772
|
|
package/dist/index.d.ts
CHANGED
@@ -65,6 +65,12 @@ type EmbeddingModelV1<VALUE> = {
|
|
65
65
|
*/
|
66
66
|
embeddings: Array<EmbeddingModelV1Embedding>;
|
67
67
|
/**
|
68
|
+
Token usage. We only have input tokens for embeddings.
|
69
|
+
*/
|
70
|
+
usage?: {
|
71
|
+
tokens: number;
|
72
|
+
};
|
73
|
+
/**
|
68
74
|
Optional raw response information for debugging purposes.
|
69
75
|
*/
|
70
76
|
rawResponse?: {
|
@@ -482,12 +488,25 @@ type LanguageModelV1CallSettings = {
|
|
482
488
|
*/
|
483
489
|
temperature?: number;
|
484
490
|
/**
|
491
|
+
Stop sequences.
|
492
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
493
|
+
Providers may have limits on the number of stop sequences.
|
494
|
+
*/
|
495
|
+
stopSequences?: string[];
|
496
|
+
/**
|
485
497
|
Nucleus sampling.
|
486
498
|
|
487
499
|
It is recommended to set either `temperature` or `topP`, but not both.
|
488
500
|
*/
|
489
501
|
topP?: number;
|
490
502
|
/**
|
503
|
+
Only sample from the top K options for each subsequent token.
|
504
|
+
|
505
|
+
Used to remove "long tail" low probability responses.
|
506
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
507
|
+
*/
|
508
|
+
topK?: number;
|
509
|
+
/**
|
491
510
|
Presence penalty setting. It affects the likelihood of the model to
|
492
511
|
repeat information that is already in the prompt.
|
493
512
|
*/
|
@@ -498,6 +517,17 @@ type LanguageModelV1CallSettings = {
|
|
498
517
|
*/
|
499
518
|
frequencyPenalty?: number;
|
500
519
|
/**
|
520
|
+
Response format. The output can either be text or JSON. Default is text.
|
521
|
+
|
522
|
+
If JSON is selected, a schema can optionally be provided to guide the LLM.
|
523
|
+
*/
|
524
|
+
responseFormat?: {
|
525
|
+
type: 'text';
|
526
|
+
} | {
|
527
|
+
type: 'json';
|
528
|
+
schema?: JSONSchema7;
|
529
|
+
};
|
530
|
+
/**
|
501
531
|
The seed (integer) to use for random sampling. If set and supported
|
502
532
|
by the model, calls will generate deterministic results.
|
503
533
|
*/
|
@@ -627,6 +657,9 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
|
|
627
657
|
low level grammar, etc. It can also be used to optimize the efficiency of the
|
628
658
|
streaming, e.g. tool-delta stream parts are only needed in the
|
629
659
|
object-tool mode.
|
660
|
+
|
661
|
+
@deprecated mode will be removed in v2.
|
662
|
+
All necessary settings will be directly supported through the call settings.
|
630
663
|
*/
|
631
664
|
mode: {
|
632
665
|
type: 'regular';
|
@@ -640,9 +673,6 @@ Specifies how the tool should be selected. Defaults to 'auto'.
|
|
640
673
|
toolChoice?: LanguageModelV1ToolChoice;
|
641
674
|
} | {
|
642
675
|
type: 'object-json';
|
643
|
-
} | {
|
644
|
-
type: 'object-grammar';
|
645
|
-
schema: JSONSchema7;
|
646
676
|
} | {
|
647
677
|
type: 'object-tool';
|
648
678
|
tool: LanguageModelV1FunctionTool;
|
@@ -665,6 +695,7 @@ some settings might not be supported, which can lead to suboptimal results.
|
|
665
695
|
type LanguageModelV1CallWarning = {
|
666
696
|
type: 'unsupported-setting';
|
667
697
|
setting: keyof LanguageModelV1CallSettings;
|
698
|
+
details?: string;
|
668
699
|
} | {
|
669
700
|
type: 'other';
|
670
701
|
message: string;
|
@@ -735,7 +766,7 @@ type LanguageModelV1 = {
|
|
735
766
|
This is needed to generate the best objects possible w/o requiring the
|
736
767
|
user to explicitly specify the object generation mode.
|
737
768
|
*/
|
738
|
-
readonly defaultObjectGenerationMode: 'json' | 'tool' |
|
769
|
+
readonly defaultObjectGenerationMode: 'json' | 'tool' | undefined;
|
739
770
|
/**
|
740
771
|
Generates a language model output (non-streaming).
|
741
772
|
|