ai 6.0.168 → 6.0.170
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +19 -0
- package/dist/index.d.mts +15 -2
- package/dist/index.d.ts +15 -2
- package/dist/index.js +48 -23
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +48 -23
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +21 -1
- package/dist/internal/index.d.ts +21 -1
- package/dist/internal/index.js +29 -19
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +29 -19
- package/dist/internal/index.mjs.map +1 -1
- package/docs/02-foundations/03-prompts.mdx +14 -10
- package/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +7 -0
- package/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +7 -0
- package/docs/07-reference/01-ai-sdk-core/30-model-message.mdx +5 -2
- package/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +7 -0
- package/package.json +7 -6
- package/src/generate-object/generate-object.ts +3 -0
- package/src/generate-object/stream-object.ts +6 -0
- package/src/generate-text/generate-text.ts +3 -0
- package/src/generate-text/stream-text.ts +6 -0
- package/src/prompt/prompt.ts +10 -0
- package/src/prompt/standardize-prompt.ts +47 -27
- package/src/ui/validate-ui-messages.ts +9 -0
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,24 @@
|
|
|
1
1
|
# ai
|
|
2
2
|
|
|
3
|
+
## 6.0.170
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 19d587a: fix(ai): add allowSystemInMessages option and warn by default when system messages are found in prompt or messages
|
|
8
|
+
|
|
9
|
+
## 6.0.169
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- 2662bb5: skip validation for tool parts in terminal states when tool schema is no longer registered
|
|
14
|
+
- a7f3c72: trigger release for all packages after provenance setup
|
|
15
|
+
- Updated dependencies [a7f3c72]
|
|
16
|
+
- Updated dependencies [4368079]
|
|
17
|
+
- Updated dependencies [c71ad14]
|
|
18
|
+
- @ai-sdk/gateway@3.0.105
|
|
19
|
+
- @ai-sdk/provider@3.0.9
|
|
20
|
+
- @ai-sdk/provider-utils@4.0.24
|
|
21
|
+
|
|
3
22
|
## 6.0.168
|
|
4
23
|
|
|
5
24
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -679,6 +679,15 @@ type Prompt = {
|
|
|
679
679
|
* System message to include in the prompt. Can be used with `prompt` or `messages`.
|
|
680
680
|
*/
|
|
681
681
|
system?: string | SystemModelMessage | Array<SystemModelMessage>;
|
|
682
|
+
/**
|
|
683
|
+
* Whether system messages are allowed in the `prompt` or `messages` fields.
|
|
684
|
+
*
|
|
685
|
+
* When disabled, system messages must be provided through the `system`
|
|
686
|
+
* option. When unset, system messages are allowed with a warning.
|
|
687
|
+
*
|
|
688
|
+
* @default undefined
|
|
689
|
+
*/
|
|
690
|
+
allowSystemInMessages?: boolean;
|
|
682
691
|
} & ({
|
|
683
692
|
/**
|
|
684
693
|
* A prompt. It can be either a text prompt or a list of messages.
|
|
@@ -1382,6 +1391,7 @@ type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: OnFinishEvent
|
|
|
1382
1391
|
* @param system - A system message that will be part of the prompt.
|
|
1383
1392
|
* @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
|
1384
1393
|
* @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
|
1394
|
+
* @param allowSystemInMessages - Whether system messages are allowed in the `prompt` or `messages` fields. When unset, system messages are allowed with a warning.
|
|
1385
1395
|
*
|
|
1386
1396
|
* @param maxOutputTokens - Maximum number of tokens to generate.
|
|
1387
1397
|
* @param temperature - Temperature setting.
|
|
@@ -1423,7 +1433,7 @@ type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: OnFinishEvent
|
|
|
1423
1433
|
* @returns
|
|
1424
1434
|
* A result object that contains the generated text, the results of the tool calls, and additional information.
|
|
1425
1435
|
*/
|
|
1426
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, experimental_include: include, _internal: { generateId }, experimental_onStart: onStart, experimental_onStepStart: onStepStart, experimental_onToolCallStart: onToolCallStart, experimental_onToolCallFinish: onToolCallFinish, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
|
|
1436
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model: modelArg, tools, toolChoice, system, prompt, messages, allowSystemInMessages, maxRetries: maxRetriesArg, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, experimental_include: include, _internal: { generateId }, experimental_onStart: onStart, experimental_onStepStart: onStepStart, experimental_onToolCallStart: onToolCallStart, experimental_onToolCallFinish: onToolCallFinish, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
|
|
1427
1437
|
/**
|
|
1428
1438
|
* The language model to use.
|
|
1429
1439
|
*/
|
|
@@ -2774,6 +2784,7 @@ type StreamTextOnToolCallFinishCallback<TOOLS extends ToolSet = ToolSet> = (even
|
|
|
2774
2784
|
* @param system - A system message that will be part of the prompt.
|
|
2775
2785
|
* @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
|
2776
2786
|
* @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
|
2787
|
+
* @param allowSystemInMessages - Whether system messages are allowed in the `prompt` or `messages` fields. When unset, system messages are allowed with a warning.
|
|
2777
2788
|
*
|
|
2778
2789
|
* @param maxOutputTokens - Maximum number of tokens to generate.
|
|
2779
2790
|
* @param temperature - Temperature setting.
|
|
@@ -2809,7 +2820,7 @@ type StreamTextOnToolCallFinishCallback<TOOLS extends ToolSet = ToolSet> = (even
|
|
|
2809
2820
|
* @returns
|
|
2810
2821
|
* A result object for accessing different stream types and additional information.
|
|
2811
2822
|
*/
|
|
2812
|
-
declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string, never>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_onStart: onStart, experimental_onStepStart: onStepStart, experimental_onToolCallStart: onToolCallStart, experimental_onToolCallFinish: onToolCallFinish, experimental_context, experimental_include: include, _internal: { now, generateId }, ...settings }: CallSettings & Prompt & {
|
|
2823
|
+
declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string, never>>({ model, tools, toolChoice, system, prompt, messages, allowSystemInMessages, maxRetries, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_onStart: onStart, experimental_onStepStart: onStepStart, experimental_onToolCallStart: onToolCallStart, experimental_onToolCallFinish: onToolCallFinish, experimental_context, experimental_include: include, _internal: { now, generateId }, ...settings }: CallSettings & Prompt & {
|
|
2813
2824
|
/**
|
|
2814
2825
|
* The language model to use.
|
|
2815
2826
|
*/
|
|
@@ -5098,6 +5109,7 @@ type RepairTextFunction = (options: {
|
|
|
5098
5109
|
* @param system - A system message that will be part of the prompt.
|
|
5099
5110
|
* @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
|
5100
5111
|
* @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
|
5112
|
+
* @param allowSystemInMessages - Whether system messages are allowed in the `prompt` or `messages` fields. When unset, system messages are allowed with a warning.
|
|
5101
5113
|
*
|
|
5102
5114
|
* @param maxOutputTokens - Maximum number of tokens to generate.
|
|
5103
5115
|
* @param temperature - Temperature setting.
|
|
@@ -5449,6 +5461,7 @@ type StreamObjectOnFinishCallback<RESULT> = (event: {
|
|
|
5449
5461
|
* @param system - A system message that will be part of the prompt.
|
|
5450
5462
|
* @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
|
5451
5463
|
* @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
|
5464
|
+
* @param allowSystemInMessages - Whether system messages are allowed in the `prompt` or `messages` fields. When unset, system messages are allowed with a warning.
|
|
5452
5465
|
*
|
|
5453
5466
|
* @param maxOutputTokens - Maximum number of tokens to generate.
|
|
5454
5467
|
* @param temperature - Temperature setting.
|
package/dist/index.d.ts
CHANGED
|
@@ -679,6 +679,15 @@ type Prompt = {
|
|
|
679
679
|
* System message to include in the prompt. Can be used with `prompt` or `messages`.
|
|
680
680
|
*/
|
|
681
681
|
system?: string | SystemModelMessage | Array<SystemModelMessage>;
|
|
682
|
+
/**
|
|
683
|
+
* Whether system messages are allowed in the `prompt` or `messages` fields.
|
|
684
|
+
*
|
|
685
|
+
* When disabled, system messages must be provided through the `system`
|
|
686
|
+
* option. When unset, system messages are allowed with a warning.
|
|
687
|
+
*
|
|
688
|
+
* @default undefined
|
|
689
|
+
*/
|
|
690
|
+
allowSystemInMessages?: boolean;
|
|
682
691
|
} & ({
|
|
683
692
|
/**
|
|
684
693
|
* A prompt. It can be either a text prompt or a list of messages.
|
|
@@ -1382,6 +1391,7 @@ type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: OnFinishEvent
|
|
|
1382
1391
|
* @param system - A system message that will be part of the prompt.
|
|
1383
1392
|
* @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
|
1384
1393
|
* @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
|
1394
|
+
* @param allowSystemInMessages - Whether system messages are allowed in the `prompt` or `messages` fields. When unset, system messages are allowed with a warning.
|
|
1385
1395
|
*
|
|
1386
1396
|
* @param maxOutputTokens - Maximum number of tokens to generate.
|
|
1387
1397
|
* @param temperature - Temperature setting.
|
|
@@ -1423,7 +1433,7 @@ type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: OnFinishEvent
|
|
|
1423
1433
|
* @returns
|
|
1424
1434
|
* A result object that contains the generated text, the results of the tool calls, and additional information.
|
|
1425
1435
|
*/
|
|
1426
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, experimental_include: include, _internal: { generateId }, experimental_onStart: onStart, experimental_onStepStart: onStepStart, experimental_onToolCallStart: onToolCallStart, experimental_onToolCallFinish: onToolCallFinish, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
|
|
1436
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model: modelArg, tools, toolChoice, system, prompt, messages, allowSystemInMessages, maxRetries: maxRetriesArg, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, experimental_include: include, _internal: { generateId }, experimental_onStart: onStart, experimental_onStepStart: onStepStart, experimental_onToolCallStart: onToolCallStart, experimental_onToolCallFinish: onToolCallFinish, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
|
|
1427
1437
|
/**
|
|
1428
1438
|
* The language model to use.
|
|
1429
1439
|
*/
|
|
@@ -2774,6 +2784,7 @@ type StreamTextOnToolCallFinishCallback<TOOLS extends ToolSet = ToolSet> = (even
|
|
|
2774
2784
|
* @param system - A system message that will be part of the prompt.
|
|
2775
2785
|
* @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
|
2776
2786
|
* @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
|
2787
|
+
* @param allowSystemInMessages - Whether system messages are allowed in the `prompt` or `messages` fields. When unset, system messages are allowed with a warning.
|
|
2777
2788
|
*
|
|
2778
2789
|
* @param maxOutputTokens - Maximum number of tokens to generate.
|
|
2779
2790
|
* @param temperature - Temperature setting.
|
|
@@ -2809,7 +2820,7 @@ type StreamTextOnToolCallFinishCallback<TOOLS extends ToolSet = ToolSet> = (even
|
|
|
2809
2820
|
* @returns
|
|
2810
2821
|
* A result object for accessing different stream types and additional information.
|
|
2811
2822
|
*/
|
|
2812
|
-
declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string, never>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_onStart: onStart, experimental_onStepStart: onStepStart, experimental_onToolCallStart: onToolCallStart, experimental_onToolCallFinish: onToolCallFinish, experimental_context, experimental_include: include, _internal: { now, generateId }, ...settings }: CallSettings & Prompt & {
|
|
2823
|
+
declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string, never>>({ model, tools, toolChoice, system, prompt, messages, allowSystemInMessages, maxRetries, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_onStart: onStart, experimental_onStepStart: onStepStart, experimental_onToolCallStart: onToolCallStart, experimental_onToolCallFinish: onToolCallFinish, experimental_context, experimental_include: include, _internal: { now, generateId }, ...settings }: CallSettings & Prompt & {
|
|
2813
2824
|
/**
|
|
2814
2825
|
* The language model to use.
|
|
2815
2826
|
*/
|
|
@@ -5098,6 +5109,7 @@ type RepairTextFunction = (options: {
|
|
|
5098
5109
|
* @param system - A system message that will be part of the prompt.
|
|
5099
5110
|
* @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
|
5100
5111
|
* @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
|
5112
|
+
* @param allowSystemInMessages - Whether system messages are allowed in the `prompt` or `messages` fields. When unset, system messages are allowed with a warning.
|
|
5101
5113
|
*
|
|
5102
5114
|
* @param maxOutputTokens - Maximum number of tokens to generate.
|
|
5103
5115
|
* @param temperature - Temperature setting.
|
|
@@ -5449,6 +5461,7 @@ type StreamObjectOnFinishCallback<RESULT> = (event: {
|
|
|
5449
5461
|
* @param system - A system message that will be part of the prompt.
|
|
5450
5462
|
* @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
|
5451
5463
|
* @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
|
5464
|
+
* @param allowSystemInMessages - Whether system messages are allowed in the `prompt` or `messages` fields. When unset, system messages are allowed with a warning.
|
|
5452
5465
|
*
|
|
5453
5466
|
* @param maxOutputTokens - Maximum number of tokens to generate.
|
|
5454
5467
|
* @param temperature - Temperature setting.
|
package/dist/index.js
CHANGED
|
@@ -1252,7 +1252,7 @@ var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
|
1252
1252
|
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
1253
1253
|
|
|
1254
1254
|
// src/version.ts
|
|
1255
|
-
var VERSION = true ? "6.0.
|
|
1255
|
+
var VERSION = true ? "6.0.170" : "0.0.0-test";
|
|
1256
1256
|
|
|
1257
1257
|
// src/util/download/download.ts
|
|
1258
1258
|
var download = async ({
|
|
@@ -2144,35 +2144,35 @@ var modelMessageSchema = import_v45.z.union([
|
|
|
2144
2144
|
]);
|
|
2145
2145
|
|
|
2146
2146
|
// src/prompt/standardize-prompt.ts
|
|
2147
|
-
async function standardizePrompt(
|
|
2148
|
-
|
|
2147
|
+
async function standardizePrompt({
|
|
2148
|
+
allowSystemInMessages,
|
|
2149
|
+
system,
|
|
2150
|
+
prompt,
|
|
2151
|
+
messages
|
|
2152
|
+
}) {
|
|
2153
|
+
if (prompt == null && messages == null) {
|
|
2149
2154
|
throw new import_provider24.InvalidPromptError({
|
|
2150
2155
|
prompt,
|
|
2151
2156
|
message: "prompt or messages must be defined"
|
|
2152
2157
|
});
|
|
2153
2158
|
}
|
|
2154
|
-
if (prompt
|
|
2159
|
+
if (prompt != null && messages != null) {
|
|
2155
2160
|
throw new import_provider24.InvalidPromptError({
|
|
2156
2161
|
prompt,
|
|
2157
2162
|
message: "prompt and messages cannot be defined at the same time"
|
|
2158
2163
|
});
|
|
2159
2164
|
}
|
|
2160
|
-
if (
|
|
2161
|
-
(message) => typeof message === "object" && message !== null && "role" in message && message.role === "system"
|
|
2162
|
-
)) {
|
|
2165
|
+
if (typeof system !== "string" && !asArray(system).every((message) => message.role === "system")) {
|
|
2163
2166
|
throw new import_provider24.InvalidPromptError({
|
|
2164
2167
|
prompt,
|
|
2165
2168
|
message: "system must be a string, SystemModelMessage, or array of SystemModelMessage"
|
|
2166
2169
|
});
|
|
2167
2170
|
}
|
|
2168
|
-
|
|
2169
|
-
|
|
2170
|
-
|
|
2171
|
-
|
|
2172
|
-
|
|
2173
|
-
} else if (prompt.messages != null) {
|
|
2174
|
-
messages = prompt.messages;
|
|
2175
|
-
} else {
|
|
2171
|
+
if (prompt != null && typeof prompt === "string") {
|
|
2172
|
+
messages = [{ role: "user", content: prompt }];
|
|
2173
|
+
} else if (prompt != null && Array.isArray(prompt)) {
|
|
2174
|
+
messages = prompt;
|
|
2175
|
+
} else if (messages == null) {
|
|
2176
2176
|
throw new import_provider24.InvalidPromptError({
|
|
2177
2177
|
prompt,
|
|
2178
2178
|
message: "prompt or messages must be defined"
|
|
@@ -2184,6 +2184,19 @@ async function standardizePrompt(prompt) {
|
|
|
2184
2184
|
message: "messages must not be empty"
|
|
2185
2185
|
});
|
|
2186
2186
|
}
|
|
2187
|
+
if (messages.some((message) => message.role === "system")) {
|
|
2188
|
+
if (allowSystemInMessages === false) {
|
|
2189
|
+
throw new import_provider24.InvalidPromptError({
|
|
2190
|
+
prompt,
|
|
2191
|
+
message: "System messages are not allowed in the prompt or messages fields. Use the system option instead."
|
|
2192
|
+
});
|
|
2193
|
+
}
|
|
2194
|
+
if (allowSystemInMessages === void 0) {
|
|
2195
|
+
console.warn(
|
|
2196
|
+
"AI SDK Warning: System messages in the prompt or messages fields can be a security risk because they may enable prompt injection attacks. Use the system option instead when possible. Set allowSystemInMessages to true to suppress this warning, or false to throw an error."
|
|
2197
|
+
);
|
|
2198
|
+
}
|
|
2199
|
+
}
|
|
2187
2200
|
const validationResult = await (0, import_provider_utils8.safeValidateTypes)({
|
|
2188
2201
|
value: messages,
|
|
2189
2202
|
schema: import_v46.z.array(modelMessageSchema)
|
|
@@ -2195,10 +2208,7 @@ async function standardizePrompt(prompt) {
|
|
|
2195
2208
|
cause: validationResult.error
|
|
2196
2209
|
});
|
|
2197
2210
|
}
|
|
2198
|
-
return {
|
|
2199
|
-
messages,
|
|
2200
|
-
system: prompt.system
|
|
2201
|
-
};
|
|
2211
|
+
return { messages, system };
|
|
2202
2212
|
}
|
|
2203
2213
|
|
|
2204
2214
|
// src/prompt/wrap-gateway-error.ts
|
|
@@ -4115,6 +4125,7 @@ async function generateText({
|
|
|
4115
4125
|
system,
|
|
4116
4126
|
prompt,
|
|
4117
4127
|
messages,
|
|
4128
|
+
allowSystemInMessages,
|
|
4118
4129
|
maxRetries: maxRetriesArg,
|
|
4119
4130
|
abortSignal,
|
|
4120
4131
|
timeout,
|
|
@@ -4171,7 +4182,8 @@ async function generateText({
|
|
|
4171
4182
|
const initialPrompt = await standardizePrompt({
|
|
4172
4183
|
system,
|
|
4173
4184
|
prompt,
|
|
4174
|
-
messages
|
|
4185
|
+
messages,
|
|
4186
|
+
allowSystemInMessages
|
|
4175
4187
|
});
|
|
4176
4188
|
const globalTelemetry = createGlobalTelemetry(telemetry == null ? void 0 : telemetry.integrations);
|
|
4177
4189
|
await notify({
|
|
@@ -6452,6 +6464,7 @@ function streamText({
|
|
|
6452
6464
|
system,
|
|
6453
6465
|
prompt,
|
|
6454
6466
|
messages,
|
|
6467
|
+
allowSystemInMessages,
|
|
6455
6468
|
maxRetries,
|
|
6456
6469
|
abortSignal,
|
|
6457
6470
|
timeout,
|
|
@@ -6508,6 +6521,7 @@ function streamText({
|
|
|
6508
6521
|
system,
|
|
6509
6522
|
prompt,
|
|
6510
6523
|
messages,
|
|
6524
|
+
allowSystemInMessages,
|
|
6511
6525
|
tools,
|
|
6512
6526
|
toolChoice,
|
|
6513
6527
|
transforms: asArray(transform),
|
|
@@ -6614,6 +6628,7 @@ var DefaultStreamTextResult = class {
|
|
|
6614
6628
|
system,
|
|
6615
6629
|
prompt,
|
|
6616
6630
|
messages,
|
|
6631
|
+
allowSystemInMessages,
|
|
6617
6632
|
tools,
|
|
6618
6633
|
toolChoice,
|
|
6619
6634
|
transforms,
|
|
@@ -7005,7 +7020,8 @@ var DefaultStreamTextResult = class {
|
|
|
7005
7020
|
const initialPrompt = await standardizePrompt({
|
|
7006
7021
|
system,
|
|
7007
7022
|
prompt,
|
|
7008
|
-
messages
|
|
7023
|
+
messages,
|
|
7024
|
+
allowSystemInMessages
|
|
7009
7025
|
});
|
|
7010
7026
|
await notify({
|
|
7011
7027
|
event: {
|
|
@@ -8951,6 +8967,9 @@ async function safeValidateUIMessages({
|
|
|
8951
8967
|
const toolPart = part;
|
|
8952
8968
|
const toolName = toolPart.type.slice(5);
|
|
8953
8969
|
const tool2 = tools[toolName];
|
|
8970
|
+
if (!tool2 && (toolPart.state === "output-available" || toolPart.state === "output-error" || toolPart.state === "output-denied")) {
|
|
8971
|
+
continue;
|
|
8972
|
+
}
|
|
8954
8973
|
if (!tool2) {
|
|
8955
8974
|
return {
|
|
8956
8975
|
success: false,
|
|
@@ -10109,6 +10128,7 @@ async function generateObject(options) {
|
|
|
10109
10128
|
system,
|
|
10110
10129
|
prompt,
|
|
10111
10130
|
messages,
|
|
10131
|
+
allowSystemInMessages,
|
|
10112
10132
|
maxRetries: maxRetriesArg,
|
|
10113
10133
|
abortSignal,
|
|
10114
10134
|
headers,
|
|
@@ -10193,7 +10213,8 @@ async function generateObject(options) {
|
|
|
10193
10213
|
const standardizedPrompt = await standardizePrompt({
|
|
10194
10214
|
system,
|
|
10195
10215
|
prompt,
|
|
10196
|
-
messages
|
|
10216
|
+
messages,
|
|
10217
|
+
allowSystemInMessages
|
|
10197
10218
|
});
|
|
10198
10219
|
const promptMessages = await convertToLanguageModelPrompt({
|
|
10199
10220
|
prompt: standardizedPrompt,
|
|
@@ -10518,6 +10539,7 @@ function streamObject(options) {
|
|
|
10518
10539
|
system,
|
|
10519
10540
|
prompt,
|
|
10520
10541
|
messages,
|
|
10542
|
+
allowSystemInMessages,
|
|
10521
10543
|
maxRetries,
|
|
10522
10544
|
abortSignal,
|
|
10523
10545
|
headers,
|
|
@@ -10565,6 +10587,7 @@ function streamObject(options) {
|
|
|
10565
10587
|
system,
|
|
10566
10588
|
prompt,
|
|
10567
10589
|
messages,
|
|
10590
|
+
allowSystemInMessages,
|
|
10568
10591
|
schemaName,
|
|
10569
10592
|
schemaDescription,
|
|
10570
10593
|
providerOptions,
|
|
@@ -10589,6 +10612,7 @@ var DefaultStreamObjectResult = class {
|
|
|
10589
10612
|
system,
|
|
10590
10613
|
prompt,
|
|
10591
10614
|
messages,
|
|
10615
|
+
allowSystemInMessages,
|
|
10592
10616
|
schemaName,
|
|
10593
10617
|
schemaDescription,
|
|
10594
10618
|
providerOptions,
|
|
@@ -10659,7 +10683,8 @@ var DefaultStreamObjectResult = class {
|
|
|
10659
10683
|
const standardizedPrompt = await standardizePrompt({
|
|
10660
10684
|
system,
|
|
10661
10685
|
prompt,
|
|
10662
|
-
messages
|
|
10686
|
+
messages,
|
|
10687
|
+
allowSystemInMessages
|
|
10663
10688
|
});
|
|
10664
10689
|
const callOptions = {
|
|
10665
10690
|
responseFormat: {
|