ai 3.4.17 → 3.4.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ai",
3
- "version": "3.4.17",
3
+ "version": "3.4.18",
4
4
  "description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript",
5
5
  "license": "Apache-2.0",
6
6
  "sideEffects": false,
@@ -424,7 +424,8 @@ It can be a user message, an assistant message, or a tool message.
424
424
  type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
425
425
 
426
426
  /**
427
- Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
427
+ Prompt part of the AI function options.
428
+ It contains a system message, a simple text prompt, or a list of messages.
428
429
  */
429
430
  type Prompt = {
430
431
  /**
@@ -436,7 +437,7 @@ type Prompt = {
436
437
  */
437
438
  prompt?: string;
438
439
  /**
439
- A list of messsages. You can either use `prompt` or `messages` but not both.
440
+ A list of messages. You can either use `prompt` or `messages` but not both.
440
441
  */
441
442
  messages?: Array<CoreMessage>;
442
443
  };
@@ -422,7 +422,8 @@ It can be a user message, an assistant message, or a tool message.
422
422
  type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
423
423
 
424
424
  /**
425
- Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
425
+ Prompt part of the AI function options.
426
+ It contains a system message, a simple text prompt, or a list of messages.
426
427
  */
427
428
  type Prompt = {
428
429
  /**
@@ -434,7 +435,7 @@ type Prompt = {
434
435
  */
435
436
  prompt?: string;
436
437
  /**
437
- A list of messsages. You can either use `prompt` or `messages` but not both.
438
+ A list of messages. You can either use `prompt` or `messages` but not both.
438
439
  */
439
440
  messages?: Array<CoreMessage>;
440
441
  };
@@ -459,33 +459,16 @@ async function convertToLanguageModelPrompt({
459
459
  modelSupportsImageUrls = true,
460
460
  downloadImplementation = download
461
461
  }) {
462
+ const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
462
463
  const languageModelMessages = [];
463
464
  if (prompt.system != null) {
464
465
  languageModelMessages.push({ role: "system", content: prompt.system });
465
466
  }
466
- const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
467
- const promptType = prompt.type;
468
- switch (promptType) {
469
- case "prompt": {
470
- languageModelMessages.push({
471
- role: "user",
472
- content: [{ type: "text", text: prompt.prompt }]
473
- });
474
- break;
475
- }
476
- case "messages": {
477
- languageModelMessages.push(
478
- ...prompt.messages.map(
479
- (message) => convertToLanguageModelMessage(message, downloadedAssets)
480
- )
481
- );
482
- break;
483
- }
484
- default: {
485
- const _exhaustiveCheck = promptType;
486
- throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
487
- }
488
- }
467
+ languageModelMessages.push(
468
+ ...prompt.messages.map(
469
+ (message) => convertToLanguageModelMessage(message, downloadedAssets)
470
+ )
471
+ );
489
472
  return languageModelMessages;
490
473
  }
491
474
  function convertToLanguageModelMessage(message, downloadedAssets) {
@@ -933,7 +916,7 @@ function prepareToolsAndToolChoice({
933
916
  };
934
917
  }
935
918
 
936
- // core/prompt/validate-prompt.ts
919
+ // core/prompt/standardize-prompt.ts
937
920
  import { InvalidPromptError } from "@ai-sdk/provider";
938
921
  import { safeValidateTypes } from "@ai-sdk/provider-utils";
939
922
  import { z as z6 } from "zod";
@@ -1031,8 +1014,8 @@ var coreMessageSchema = z5.union([
1031
1014
  coreToolMessageSchema
1032
1015
  ]);
1033
1016
 
1034
- // core/prompt/validate-prompt.ts
1035
- function validatePrompt(prompt) {
1017
+ // core/prompt/standardize-prompt.ts
1018
+ function standardizePrompt(prompt) {
1036
1019
  if (prompt.prompt == null && prompt.messages == null) {
1037
1020
  throw new InvalidPromptError({
1038
1021
  prompt,
@@ -1060,9 +1043,13 @@ function validatePrompt(prompt) {
1060
1043
  }
1061
1044
  return {
1062
1045
  type: "prompt",
1063
- prompt: prompt.prompt,
1064
- messages: void 0,
1065
- system: prompt.system
1046
+ system: prompt.system,
1047
+ messages: [
1048
+ {
1049
+ role: "user",
1050
+ content: prompt.prompt
1051
+ }
1052
+ ]
1066
1053
  };
1067
1054
  }
1068
1055
  if (prompt.messages != null) {
@@ -1079,7 +1066,6 @@ function validatePrompt(prompt) {
1079
1066
  }
1080
1067
  return {
1081
1068
  type: "messages",
1082
- prompt: void 0,
1083
1069
  messages: prompt.messages,
1084
1070
  // only possible case bc of checks above
1085
1071
  system: prompt.system
@@ -1486,7 +1472,7 @@ async function streamUI({
1486
1472
  renderFinished.resolve(void 0);
1487
1473
  }
1488
1474
  const retry = retryWithExponentialBackoff({ maxRetries });
1489
- const validatedPrompt = validatePrompt({ system, prompt, messages });
1475
+ const validatedPrompt = standardizePrompt({ system, prompt, messages });
1490
1476
  const result = await retry(
1491
1477
  async () => model.doStream({
1492
1478
  mode: {