ai 5.0.0-canary.13 → 5.0.0-canary.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.d.mts +35 -134
- package/dist/index.d.ts +35 -134
- package/dist/index.js +89 -88
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +89 -88
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +0 -5
- package/dist/internal/index.d.ts +0 -5
- package/dist/internal/index.js +0 -2
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +0 -2
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,18 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 5.0.0-canary.14
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- 9bf7291: chore(providers/openai): enable structuredOutputs by default & switch to provider option
|
8
|
+
- 4617fab: chore(embedding-models): remove remaining settings
|
9
|
+
- a76a62b: feat (ai): add experimental prepareStep callback to generateText
|
10
|
+
- Updated dependencies [9bf7291]
|
11
|
+
- Updated dependencies [4617fab]
|
12
|
+
- Updated dependencies [e030615]
|
13
|
+
- @ai-sdk/provider@2.0.0-canary.11
|
14
|
+
- @ai-sdk/provider-utils@3.0.0-canary.12
|
15
|
+
|
3
16
|
## 5.0.0-canary.13
|
4
17
|
|
5
18
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -3092,7 +3092,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
3092
3092
|
@returns
|
3093
3093
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
3094
3094
|
*/
|
3095
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, providerOptions, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
3095
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, providerOptions, experimental_activeTools: activeTools, experimental_prepareStep: prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
3096
3096
|
/**
|
3097
3097
|
The language model to use.
|
3098
3098
|
*/
|
@@ -3143,6 +3143,28 @@ Optional specification for parsing structured outputs from the LLM response.
|
|
3143
3143
|
*/
|
3144
3144
|
experimental_output?: Output$1<OUTPUT, OUTPUT_PARTIAL>;
|
3145
3145
|
/**
|
3146
|
+
Optional function that you can use to provide different settings for a step.
|
3147
|
+
|
3148
|
+
@param options - The options for the step.
|
3149
|
+
@param options.steps - The steps that have been executed so far.
|
3150
|
+
@param options.stepNumber - The number of the step that is being executed.
|
3151
|
+
@param options.maxSteps - The maximum number of steps.
|
3152
|
+
@param options.model - The model that is being used.
|
3153
|
+
|
3154
|
+
@returns An object that contains the settings for the step.
|
3155
|
+
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
3156
|
+
*/
|
3157
|
+
experimental_prepareStep?: (options: {
|
3158
|
+
steps: Array<StepResult<TOOLS>>;
|
3159
|
+
stepNumber: number;
|
3160
|
+
maxSteps: number;
|
3161
|
+
model: LanguageModel;
|
3162
|
+
}) => PromiseLike<{
|
3163
|
+
model?: LanguageModel;
|
3164
|
+
toolChoice?: ToolChoice<TOOLS>;
|
3165
|
+
experimental_activeTools?: Array<keyof TOOLS>;
|
3166
|
+
} | undefined>;
|
3167
|
+
/**
|
3146
3168
|
A function that attempts to repair a tool call that failed to parse.
|
3147
3169
|
*/
|
3148
3170
|
experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
|
@@ -3850,11 +3872,11 @@ This function does not stream the output. If you want to stream the output, use
|
|
3850
3872
|
@returns
|
3851
3873
|
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
3852
3874
|
*/
|
3853
|
-
declare function generateObject<
|
3875
|
+
declare function generateObject<RESULT extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'enum' | 'no-schema' = RESULT extends string ? 'enum' : 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'enum' ? {
|
3854
3876
|
/**
|
3855
3877
|
The enum values that the model should use.
|
3856
3878
|
*/
|
3857
|
-
enum: Array<
|
3879
|
+
enum: Array<RESULT>;
|
3858
3880
|
mode?: 'json';
|
3859
3881
|
output: 'enum';
|
3860
3882
|
} : Output extends 'no-schema' ? {} : {
|
@@ -3916,7 +3938,7 @@ functionality that can be fully encapsulated in the provider.
|
|
3916
3938
|
generateId?: () => string;
|
3917
3939
|
currentDate?: () => Date;
|
3918
3940
|
};
|
3919
|
-
}): Promise<GenerateObjectResult<
|
3941
|
+
}): Promise<GenerateObjectResult<RESULT>>;
|
3920
3942
|
|
3921
3943
|
/**
|
3922
3944
|
The result of a `streamObject` call that contains the partial object stream and additional information.
|
@@ -4048,27 +4070,20 @@ type StreamObjectOnFinishCallback<RESULT> = (event: {
|
|
4048
4070
|
}) => Promise<void> | void;
|
4049
4071
|
/**
|
4050
4072
|
Generate a structured, typed object for a given prompt and schema using a language model.
|
4051
|
-
|
4052
4073
|
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
4053
|
-
|
4054
4074
|
@return
|
4055
4075
|
A result object for accessing the partial object stream and additional information.
|
4056
4076
|
*/
|
4057
|
-
declare function streamObject<
|
4058
|
-
output?: 'object' | undefined;
|
4059
|
-
/**
|
4060
|
-
The language model to use.
|
4061
|
-
*/
|
4062
|
-
model: LanguageModel;
|
4077
|
+
declare function streamObject<RESULT extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'no-schema' = 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'no-schema' ? {} : {
|
4063
4078
|
/**
|
4064
4079
|
The schema of the object that the model should generate.
|
4065
4080
|
*/
|
4066
|
-
schema:
|
4081
|
+
schema: SCHEMA;
|
4067
4082
|
/**
|
4068
4083
|
Optional name of the output that should be generated.
|
4069
4084
|
Used by some providers for additional LLM guidance, e.g.
|
4070
4085
|
via tool or schema name.
|
4071
|
-
|
4086
|
+
*/
|
4072
4087
|
schemaName?: string;
|
4073
4088
|
/**
|
4074
4089
|
Optional description of the output that should be generated.
|
@@ -4088,131 +4103,17 @@ The schema is converted into a JSON schema and used in one of the following ways
|
|
4088
4103
|
Please note that most providers do not support all modes.
|
4089
4104
|
|
4090
4105
|
Default and recommended: 'auto' (best mode for the model).
|
4091
|
-
*/
|
4092
|
-
mode?: 'auto' | 'json' | 'tool';
|
4093
|
-
/**
|
4094
|
-
Optional telemetry configuration (experimental).
|
4095
|
-
*/
|
4096
|
-
experimental_telemetry?: TelemetrySettings;
|
4097
|
-
/**
|
4098
|
-
Additional provider-specific options. They are passed through
|
4099
|
-
to the provider from the AI SDK and enable provider-specific
|
4100
|
-
functionality that can be fully encapsulated in the provider.
|
4101
4106
|
*/
|
4102
|
-
providerOptions?: ProviderOptions;
|
4103
|
-
/**
|
4104
|
-
Callback that is invoked when an error occurs during streaming.
|
4105
|
-
You can use it to log errors.
|
4106
|
-
The stream processing will pause until the callback promise is resolved.
|
4107
|
-
*/
|
4108
|
-
onError?: StreamObjectOnErrorCallback;
|
4109
|
-
/**
|
4110
|
-
Callback that is called when the LLM response and the final object validation are finished.
|
4111
|
-
*/
|
4112
|
-
onFinish?: StreamObjectOnFinishCallback<OBJECT>;
|
4113
|
-
/**
|
4114
|
-
* Internal. For test use only. May change without notice.
|
4115
|
-
*/
|
4116
|
-
_internal?: {
|
4117
|
-
generateId?: () => string;
|
4118
|
-
currentDate?: () => Date;
|
4119
|
-
now?: () => number;
|
4120
|
-
};
|
4121
|
-
}): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
|
4122
|
-
/**
|
4123
|
-
Generate an array with structured, typed elements for a given prompt and element schema using a language model.
|
4124
|
-
|
4125
|
-
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
4126
|
-
|
4127
|
-
@return
|
4128
|
-
A result object for accessing the partial object stream and additional information.
|
4129
|
-
*/
|
4130
|
-
declare function streamObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
4131
|
-
output: 'array';
|
4132
|
-
/**
|
4133
|
-
The language model to use.
|
4134
|
-
*/
|
4135
|
-
model: LanguageModel;
|
4136
|
-
/**
|
4137
|
-
The element schema of the array that the model should generate.
|
4138
|
-
*/
|
4139
|
-
schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
|
4140
|
-
/**
|
4141
|
-
Optional name of the array that should be generated.
|
4142
|
-
Used by some providers for additional LLM guidance, e.g.
|
4143
|
-
via tool or schema name.
|
4144
|
-
*/
|
4145
|
-
schemaName?: string;
|
4146
|
-
/**
|
4147
|
-
Optional description of the array that should be generated.
|
4148
|
-
Used by some providers for additional LLM guidance, e.g.
|
4149
|
-
via tool or schema description.
|
4150
|
-
*/
|
4151
|
-
schemaDescription?: string;
|
4152
|
-
/**
|
4153
|
-
The mode to use for object generation.
|
4154
|
-
|
4155
|
-
The schema is converted into a JSON schema and used in one of the following ways
|
4156
|
-
|
4157
|
-
- 'auto': The provider will choose the best mode for the model.
|
4158
|
-
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
4159
|
-
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
4160
|
-
|
4161
|
-
Please note that most providers do not support all modes.
|
4162
|
-
|
4163
|
-
Default and recommended: 'auto' (best mode for the model).
|
4164
|
-
*/
|
4165
4107
|
mode?: 'auto' | 'json' | 'tool';
|
4166
|
-
|
4167
|
-
|
4168
|
-
*/
|
4169
|
-
experimental_telemetry?: TelemetrySettings;
|
4170
|
-
/**
|
4171
|
-
Additional provider-specific options. They are passed through
|
4172
|
-
to the provider from the AI SDK and enable provider-specific
|
4173
|
-
functionality that can be fully encapsulated in the provider.
|
4174
|
-
*/
|
4175
|
-
providerOptions?: ProviderOptions;
|
4176
|
-
/**
|
4177
|
-
Callback that is invoked when an error occurs during streaming.
|
4178
|
-
You can use it to log errors.
|
4179
|
-
The stream processing will pause until the callback promise is resolved.
|
4180
|
-
*/
|
4181
|
-
onError?: StreamObjectOnErrorCallback;
|
4182
|
-
/**
|
4183
|
-
Callback that is called when the LLM response and the final object validation are finished.
|
4184
|
-
*/
|
4185
|
-
onFinish?: StreamObjectOnFinishCallback<Array<ELEMENT>>;
|
4186
|
-
/**
|
4187
|
-
* Internal. For test use only. May change without notice.
|
4188
|
-
*/
|
4189
|
-
_internal?: {
|
4190
|
-
generateId?: () => string;
|
4191
|
-
currentDate?: () => Date;
|
4192
|
-
now?: () => number;
|
4193
|
-
};
|
4194
|
-
}): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
|
4195
|
-
/**
|
4196
|
-
Generate JSON with any schema for a given prompt using a language model.
|
4197
|
-
|
4198
|
-
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
4199
|
-
|
4200
|
-
@return
|
4201
|
-
A result object for accessing the partial object stream and additional information.
|
4202
|
-
*/
|
4203
|
-
declare function streamObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
4204
|
-
output: 'no-schema';
|
4108
|
+
}) & {
|
4109
|
+
output?: Output;
|
4205
4110
|
/**
|
4206
4111
|
The language model to use.
|
4207
4112
|
*/
|
4208
4113
|
model: LanguageModel;
|
4209
4114
|
/**
|
4210
|
-
The mode to use for object generation. Must be "json" for no-schema output.
|
4211
|
-
*/
|
4212
|
-
mode?: 'json';
|
4213
|
-
/**
|
4214
4115
|
Optional telemetry configuration (experimental).
|
4215
|
-
|
4116
|
+
*/
|
4216
4117
|
experimental_telemetry?: TelemetrySettings;
|
4217
4118
|
/**
|
4218
4119
|
Additional provider-specific options. They are passed through
|
@@ -4228,8 +4129,8 @@ The stream processing will pause until the callback promise is resolved.
|
|
4228
4129
|
onError?: StreamObjectOnErrorCallback;
|
4229
4130
|
/**
|
4230
4131
|
Callback that is called when the LLM response and the final object validation are finished.
|
4231
|
-
|
4232
|
-
onFinish?: StreamObjectOnFinishCallback<
|
4132
|
+
*/
|
4133
|
+
onFinish?: StreamObjectOnFinishCallback<RESULT>;
|
4233
4134
|
/**
|
4234
4135
|
* Internal. For test use only. May change without notice.
|
4235
4136
|
*/
|
@@ -4238,7 +4139,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
4238
4139
|
currentDate?: () => Date;
|
4239
4140
|
now?: () => number;
|
4240
4141
|
};
|
4241
|
-
}): StreamObjectResult<
|
4142
|
+
}): StreamObjectResult<Output extends 'array' ? RESULT : DeepPartial<RESULT>, Output extends 'array' ? RESULT : RESULT, Output extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
|
4242
4143
|
|
4243
4144
|
/**
|
4244
4145
|
* A generated audio file.
|
package/dist/index.d.ts
CHANGED
@@ -3092,7 +3092,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
3092
3092
|
@returns
|
3093
3093
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
3094
3094
|
*/
|
3095
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, providerOptions, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
3095
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, providerOptions, experimental_activeTools: activeTools, experimental_prepareStep: prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
3096
3096
|
/**
|
3097
3097
|
The language model to use.
|
3098
3098
|
*/
|
@@ -3143,6 +3143,28 @@ Optional specification for parsing structured outputs from the LLM response.
|
|
3143
3143
|
*/
|
3144
3144
|
experimental_output?: Output$1<OUTPUT, OUTPUT_PARTIAL>;
|
3145
3145
|
/**
|
3146
|
+
Optional function that you can use to provide different settings for a step.
|
3147
|
+
|
3148
|
+
@param options - The options for the step.
|
3149
|
+
@param options.steps - The steps that have been executed so far.
|
3150
|
+
@param options.stepNumber - The number of the step that is being executed.
|
3151
|
+
@param options.maxSteps - The maximum number of steps.
|
3152
|
+
@param options.model - The model that is being used.
|
3153
|
+
|
3154
|
+
@returns An object that contains the settings for the step.
|
3155
|
+
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
3156
|
+
*/
|
3157
|
+
experimental_prepareStep?: (options: {
|
3158
|
+
steps: Array<StepResult<TOOLS>>;
|
3159
|
+
stepNumber: number;
|
3160
|
+
maxSteps: number;
|
3161
|
+
model: LanguageModel;
|
3162
|
+
}) => PromiseLike<{
|
3163
|
+
model?: LanguageModel;
|
3164
|
+
toolChoice?: ToolChoice<TOOLS>;
|
3165
|
+
experimental_activeTools?: Array<keyof TOOLS>;
|
3166
|
+
} | undefined>;
|
3167
|
+
/**
|
3146
3168
|
A function that attempts to repair a tool call that failed to parse.
|
3147
3169
|
*/
|
3148
3170
|
experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
|
@@ -3850,11 +3872,11 @@ This function does not stream the output. If you want to stream the output, use
|
|
3850
3872
|
@returns
|
3851
3873
|
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
3852
3874
|
*/
|
3853
|
-
declare function generateObject<
|
3875
|
+
declare function generateObject<RESULT extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'enum' | 'no-schema' = RESULT extends string ? 'enum' : 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'enum' ? {
|
3854
3876
|
/**
|
3855
3877
|
The enum values that the model should use.
|
3856
3878
|
*/
|
3857
|
-
enum: Array<
|
3879
|
+
enum: Array<RESULT>;
|
3858
3880
|
mode?: 'json';
|
3859
3881
|
output: 'enum';
|
3860
3882
|
} : Output extends 'no-schema' ? {} : {
|
@@ -3916,7 +3938,7 @@ functionality that can be fully encapsulated in the provider.
|
|
3916
3938
|
generateId?: () => string;
|
3917
3939
|
currentDate?: () => Date;
|
3918
3940
|
};
|
3919
|
-
}): Promise<GenerateObjectResult<
|
3941
|
+
}): Promise<GenerateObjectResult<RESULT>>;
|
3920
3942
|
|
3921
3943
|
/**
|
3922
3944
|
The result of a `streamObject` call that contains the partial object stream and additional information.
|
@@ -4048,27 +4070,20 @@ type StreamObjectOnFinishCallback<RESULT> = (event: {
|
|
4048
4070
|
}) => Promise<void> | void;
|
4049
4071
|
/**
|
4050
4072
|
Generate a structured, typed object for a given prompt and schema using a language model.
|
4051
|
-
|
4052
4073
|
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
4053
|
-
|
4054
4074
|
@return
|
4055
4075
|
A result object for accessing the partial object stream and additional information.
|
4056
4076
|
*/
|
4057
|
-
declare function streamObject<
|
4058
|
-
output?: 'object' | undefined;
|
4059
|
-
/**
|
4060
|
-
The language model to use.
|
4061
|
-
*/
|
4062
|
-
model: LanguageModel;
|
4077
|
+
declare function streamObject<RESULT extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'no-schema' = 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'no-schema' ? {} : {
|
4063
4078
|
/**
|
4064
4079
|
The schema of the object that the model should generate.
|
4065
4080
|
*/
|
4066
|
-
schema:
|
4081
|
+
schema: SCHEMA;
|
4067
4082
|
/**
|
4068
4083
|
Optional name of the output that should be generated.
|
4069
4084
|
Used by some providers for additional LLM guidance, e.g.
|
4070
4085
|
via tool or schema name.
|
4071
|
-
|
4086
|
+
*/
|
4072
4087
|
schemaName?: string;
|
4073
4088
|
/**
|
4074
4089
|
Optional description of the output that should be generated.
|
@@ -4088,131 +4103,17 @@ The schema is converted into a JSON schema and used in one of the following ways
|
|
4088
4103
|
Please note that most providers do not support all modes.
|
4089
4104
|
|
4090
4105
|
Default and recommended: 'auto' (best mode for the model).
|
4091
|
-
*/
|
4092
|
-
mode?: 'auto' | 'json' | 'tool';
|
4093
|
-
/**
|
4094
|
-
Optional telemetry configuration (experimental).
|
4095
|
-
*/
|
4096
|
-
experimental_telemetry?: TelemetrySettings;
|
4097
|
-
/**
|
4098
|
-
Additional provider-specific options. They are passed through
|
4099
|
-
to the provider from the AI SDK and enable provider-specific
|
4100
|
-
functionality that can be fully encapsulated in the provider.
|
4101
4106
|
*/
|
4102
|
-
providerOptions?: ProviderOptions;
|
4103
|
-
/**
|
4104
|
-
Callback that is invoked when an error occurs during streaming.
|
4105
|
-
You can use it to log errors.
|
4106
|
-
The stream processing will pause until the callback promise is resolved.
|
4107
|
-
*/
|
4108
|
-
onError?: StreamObjectOnErrorCallback;
|
4109
|
-
/**
|
4110
|
-
Callback that is called when the LLM response and the final object validation are finished.
|
4111
|
-
*/
|
4112
|
-
onFinish?: StreamObjectOnFinishCallback<OBJECT>;
|
4113
|
-
/**
|
4114
|
-
* Internal. For test use only. May change without notice.
|
4115
|
-
*/
|
4116
|
-
_internal?: {
|
4117
|
-
generateId?: () => string;
|
4118
|
-
currentDate?: () => Date;
|
4119
|
-
now?: () => number;
|
4120
|
-
};
|
4121
|
-
}): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
|
4122
|
-
/**
|
4123
|
-
Generate an array with structured, typed elements for a given prompt and element schema using a language model.
|
4124
|
-
|
4125
|
-
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
4126
|
-
|
4127
|
-
@return
|
4128
|
-
A result object for accessing the partial object stream and additional information.
|
4129
|
-
*/
|
4130
|
-
declare function streamObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
4131
|
-
output: 'array';
|
4132
|
-
/**
|
4133
|
-
The language model to use.
|
4134
|
-
*/
|
4135
|
-
model: LanguageModel;
|
4136
|
-
/**
|
4137
|
-
The element schema of the array that the model should generate.
|
4138
|
-
*/
|
4139
|
-
schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
|
4140
|
-
/**
|
4141
|
-
Optional name of the array that should be generated.
|
4142
|
-
Used by some providers for additional LLM guidance, e.g.
|
4143
|
-
via tool or schema name.
|
4144
|
-
*/
|
4145
|
-
schemaName?: string;
|
4146
|
-
/**
|
4147
|
-
Optional description of the array that should be generated.
|
4148
|
-
Used by some providers for additional LLM guidance, e.g.
|
4149
|
-
via tool or schema description.
|
4150
|
-
*/
|
4151
|
-
schemaDescription?: string;
|
4152
|
-
/**
|
4153
|
-
The mode to use for object generation.
|
4154
|
-
|
4155
|
-
The schema is converted into a JSON schema and used in one of the following ways
|
4156
|
-
|
4157
|
-
- 'auto': The provider will choose the best mode for the model.
|
4158
|
-
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
4159
|
-
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
4160
|
-
|
4161
|
-
Please note that most providers do not support all modes.
|
4162
|
-
|
4163
|
-
Default and recommended: 'auto' (best mode for the model).
|
4164
|
-
*/
|
4165
4107
|
mode?: 'auto' | 'json' | 'tool';
|
4166
|
-
|
4167
|
-
|
4168
|
-
*/
|
4169
|
-
experimental_telemetry?: TelemetrySettings;
|
4170
|
-
/**
|
4171
|
-
Additional provider-specific options. They are passed through
|
4172
|
-
to the provider from the AI SDK and enable provider-specific
|
4173
|
-
functionality that can be fully encapsulated in the provider.
|
4174
|
-
*/
|
4175
|
-
providerOptions?: ProviderOptions;
|
4176
|
-
/**
|
4177
|
-
Callback that is invoked when an error occurs during streaming.
|
4178
|
-
You can use it to log errors.
|
4179
|
-
The stream processing will pause until the callback promise is resolved.
|
4180
|
-
*/
|
4181
|
-
onError?: StreamObjectOnErrorCallback;
|
4182
|
-
/**
|
4183
|
-
Callback that is called when the LLM response and the final object validation are finished.
|
4184
|
-
*/
|
4185
|
-
onFinish?: StreamObjectOnFinishCallback<Array<ELEMENT>>;
|
4186
|
-
/**
|
4187
|
-
* Internal. For test use only. May change without notice.
|
4188
|
-
*/
|
4189
|
-
_internal?: {
|
4190
|
-
generateId?: () => string;
|
4191
|
-
currentDate?: () => Date;
|
4192
|
-
now?: () => number;
|
4193
|
-
};
|
4194
|
-
}): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
|
4195
|
-
/**
|
4196
|
-
Generate JSON with any schema for a given prompt using a language model.
|
4197
|
-
|
4198
|
-
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
4199
|
-
|
4200
|
-
@return
|
4201
|
-
A result object for accessing the partial object stream and additional information.
|
4202
|
-
*/
|
4203
|
-
declare function streamObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
4204
|
-
output: 'no-schema';
|
4108
|
+
}) & {
|
4109
|
+
output?: Output;
|
4205
4110
|
/**
|
4206
4111
|
The language model to use.
|
4207
4112
|
*/
|
4208
4113
|
model: LanguageModel;
|
4209
4114
|
/**
|
4210
|
-
The mode to use for object generation. Must be "json" for no-schema output.
|
4211
|
-
*/
|
4212
|
-
mode?: 'json';
|
4213
|
-
/**
|
4214
4115
|
Optional telemetry configuration (experimental).
|
4215
|
-
|
4116
|
+
*/
|
4216
4117
|
experimental_telemetry?: TelemetrySettings;
|
4217
4118
|
/**
|
4218
4119
|
Additional provider-specific options. They are passed through
|
@@ -4228,8 +4129,8 @@ The stream processing will pause until the callback promise is resolved.
|
|
4228
4129
|
onError?: StreamObjectOnErrorCallback;
|
4229
4130
|
/**
|
4230
4131
|
Callback that is called when the LLM response and the final object validation are finished.
|
4231
|
-
|
4232
|
-
onFinish?: StreamObjectOnFinishCallback<
|
4132
|
+
*/
|
4133
|
+
onFinish?: StreamObjectOnFinishCallback<RESULT>;
|
4233
4134
|
/**
|
4234
4135
|
* Internal. For test use only. May change without notice.
|
4235
4136
|
*/
|
@@ -4238,7 +4139,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
4238
4139
|
currentDate?: () => Date;
|
4239
4140
|
now?: () => number;
|
4240
4141
|
};
|
4241
|
-
}): StreamObjectResult<
|
4142
|
+
}): StreamObjectResult<Output extends 'array' ? RESULT : DeepPartial<RESULT>, Output extends 'array' ? RESULT : RESULT, Output extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
|
4242
4143
|
|
4243
4144
|
/**
|
4244
4145
|
* A generated audio file.
|