ai 3.4.17 → 3.4.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # ai
2
2
 
3
+ ## 3.4.18
4
+
5
+ ### Patch Changes
6
+
7
+ - 95c67b4: fix (ai/core): handle tool calls without results in message conversion
8
+
3
9
  ## 3.4.17
4
10
 
5
11
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -608,7 +608,8 @@ It can be a user message, an assistant message, or a tool message.
608
608
  type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
609
609
 
610
610
  /**
611
- Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
611
+ Prompt part of the AI function options.
612
+ It contains a system message, a simple text prompt, or a list of messages.
612
613
  */
613
614
  type Prompt = {
614
615
  /**
@@ -620,7 +621,7 @@ type Prompt = {
620
621
  */
621
622
  prompt?: string;
622
623
  /**
623
- A list of messsages. You can either use `prompt` or `messages` but not both.
624
+ A list of messages. You can either use `prompt` or `messages` but not both.
624
625
  */
625
626
  messages?: Array<CoreMessage>;
626
627
  };
@@ -1272,6 +1273,8 @@ type ConvertibleMessage = {
1272
1273
  /**
1273
1274
  Converts an array of messages from useChat into an array of CoreMessages that can be used
1274
1275
  with the AI core functions (e.g. `streamText`).
1276
+
1277
+ Only full tool calls are included in assistant messages. Partial tool calls are removed.
1275
1278
  */
1276
1279
  declare function convertToCoreMessages(messages: Array<ConvertibleMessage>): CoreMessage[];
1277
1280
 
package/dist/index.d.ts CHANGED
@@ -608,7 +608,8 @@ It can be a user message, an assistant message, or a tool message.
608
608
  type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
609
609
 
610
610
  /**
611
- Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
611
+ Prompt part of the AI function options.
612
+ It contains a system message, a simple text prompt, or a list of messages.
612
613
  */
613
614
  type Prompt = {
614
615
  /**
@@ -620,7 +621,7 @@ type Prompt = {
620
621
  */
621
622
  prompt?: string;
622
623
  /**
623
- A list of messsages. You can either use `prompt` or `messages` but not both.
624
+ A list of messages. You can either use `prompt` or `messages` but not both.
624
625
  */
625
626
  messages?: Array<CoreMessage>;
626
627
  };
@@ -1272,6 +1273,8 @@ type ConvertibleMessage = {
1272
1273
  /**
1273
1274
  Converts an array of messages from useChat into an array of CoreMessages that can be used
1274
1275
  with the AI core functions (e.g. `streamText`).
1276
+
1277
+ Only full tool calls are included in assistant messages. Partial tool calls are removed.
1275
1278
  */
1276
1279
  declare function convertToCoreMessages(messages: Array<ConvertibleMessage>): CoreMessage[];
1277
1280
 
package/dist/index.js CHANGED
@@ -925,33 +925,16 @@ async function convertToLanguageModelPrompt({
925
925
  modelSupportsImageUrls = true,
926
926
  downloadImplementation = download
927
927
  }) {
928
+ const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
928
929
  const languageModelMessages = [];
929
930
  if (prompt.system != null) {
930
931
  languageModelMessages.push({ role: "system", content: prompt.system });
931
932
  }
932
- const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
933
- const promptType = prompt.type;
934
- switch (promptType) {
935
- case "prompt": {
936
- languageModelMessages.push({
937
- role: "user",
938
- content: [{ type: "text", text: prompt.prompt }]
939
- });
940
- break;
941
- }
942
- case "messages": {
943
- languageModelMessages.push(
944
- ...prompt.messages.map(
945
- (message) => convertToLanguageModelMessage(message, downloadedAssets)
946
- )
947
- );
948
- break;
949
- }
950
- default: {
951
- const _exhaustiveCheck = promptType;
952
- throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
953
- }
954
- }
933
+ languageModelMessages.push(
934
+ ...prompt.messages.map(
935
+ (message) => convertToLanguageModelMessage(message, downloadedAssets)
936
+ )
937
+ );
955
938
  return languageModelMessages;
956
939
  }
957
940
  function convertToLanguageModelMessage(message, downloadedAssets) {
@@ -1365,7 +1348,7 @@ function prepareCallSettings({
1365
1348
  };
1366
1349
  }
1367
1350
 
1368
- // core/prompt/validate-prompt.ts
1351
+ // core/prompt/standardize-prompt.ts
1369
1352
  var import_provider7 = require("@ai-sdk/provider");
1370
1353
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1371
1354
  var import_zod6 = require("zod");
@@ -1463,8 +1446,8 @@ var coreMessageSchema = import_zod5.z.union([
1463
1446
  coreToolMessageSchema
1464
1447
  ]);
1465
1448
 
1466
- // core/prompt/validate-prompt.ts
1467
- function validatePrompt(prompt) {
1449
+ // core/prompt/standardize-prompt.ts
1450
+ function standardizePrompt(prompt) {
1468
1451
  if (prompt.prompt == null && prompt.messages == null) {
1469
1452
  throw new import_provider7.InvalidPromptError({
1470
1453
  prompt,
@@ -1492,9 +1475,13 @@ function validatePrompt(prompt) {
1492
1475
  }
1493
1476
  return {
1494
1477
  type: "prompt",
1495
- prompt: prompt.prompt,
1496
- messages: void 0,
1497
- system: prompt.system
1478
+ system: prompt.system,
1479
+ messages: [
1480
+ {
1481
+ role: "user",
1482
+ content: prompt.prompt
1483
+ }
1484
+ ]
1498
1485
  };
1499
1486
  }
1500
1487
  if (prompt.messages != null) {
@@ -1511,7 +1498,6 @@ function validatePrompt(prompt) {
1511
1498
  }
1512
1499
  return {
1513
1500
  type: "messages",
1514
- prompt: void 0,
1515
1501
  messages: prompt.messages,
1516
1502
  // only possible case bc of checks above
1517
1503
  system: prompt.system
@@ -2045,7 +2031,7 @@ async function generateObject({
2045
2031
  let resultProviderMetadata;
2046
2032
  switch (mode) {
2047
2033
  case "json": {
2048
- const validatedPrompt = validatePrompt({
2034
+ const standardPrompt = standardizePrompt({
2049
2035
  system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2050
2036
  prompt: system,
2051
2037
  schema: outputStrategy.jsonSchema
@@ -2054,10 +2040,9 @@ async function generateObject({
2054
2040
  messages
2055
2041
  });
2056
2042
  const promptMessages = await convertToLanguageModelPrompt({
2057
- prompt: validatedPrompt,
2043
+ prompt: standardPrompt,
2058
2044
  modelSupportsImageUrls: model.supportsImageUrls
2059
2045
  });
2060
- const inputFormat = validatedPrompt.type;
2061
2046
  const generateResult = await retry(
2062
2047
  () => recordSpan({
2063
2048
  name: "ai.generateObject.doGenerate",
@@ -2070,7 +2055,7 @@ async function generateObject({
2070
2055
  }),
2071
2056
  ...baseTelemetryAttributes,
2072
2057
  "ai.prompt.format": {
2073
- input: () => inputFormat
2058
+ input: () => standardPrompt.type
2074
2059
  },
2075
2060
  "ai.prompt.messages": {
2076
2061
  input: () => JSON.stringify(promptMessages)
@@ -2098,7 +2083,7 @@ async function generateObject({
2098
2083
  description: schemaDescription
2099
2084
  },
2100
2085
  ...prepareCallSettings(settings),
2101
- inputFormat,
2086
+ inputFormat: standardPrompt.type,
2102
2087
  prompt: promptMessages,
2103
2088
  providerMetadata,
2104
2089
  abortSignal,
@@ -2150,7 +2135,7 @@ async function generateObject({
2150
2135
  break;
2151
2136
  }
2152
2137
  case "tool": {
2153
- const validatedPrompt = validatePrompt({
2138
+ const validatedPrompt = standardizePrompt({
2154
2139
  system,
2155
2140
  prompt,
2156
2141
  messages
@@ -2518,7 +2503,7 @@ async function streamObject({
2518
2503
  let transformer;
2519
2504
  switch (mode) {
2520
2505
  case "json": {
2521
- const validatedPrompt = validatePrompt({
2506
+ const standardPrompt = standardizePrompt({
2522
2507
  system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2523
2508
  prompt: system,
2524
2509
  schema: outputStrategy.jsonSchema
@@ -2534,9 +2519,9 @@ async function streamObject({
2534
2519
  description: schemaDescription
2535
2520
  },
2536
2521
  ...prepareCallSettings(settings),
2537
- inputFormat: validatedPrompt.type,
2522
+ inputFormat: standardPrompt.type,
2538
2523
  prompt: await convertToLanguageModelPrompt({
2539
- prompt: validatedPrompt,
2524
+ prompt: standardPrompt,
2540
2525
  modelSupportsImageUrls: model.supportsImageUrls
2541
2526
  }),
2542
2527
  providerMetadata,
@@ -2560,7 +2545,7 @@ async function streamObject({
2560
2545
  break;
2561
2546
  }
2562
2547
  case "tool": {
2563
- const validatedPrompt = validatePrompt({
2548
+ const validatedPrompt = standardizePrompt({
2564
2549
  system,
2565
2550
  prompt,
2566
2551
  messages
@@ -3228,20 +3213,12 @@ async function generateText({
3228
3213
  fn: async (span) => {
3229
3214
  var _a11, _b, _c, _d, _e;
3230
3215
  const retry = retryWithExponentialBackoff({ maxRetries });
3231
- const validatedPrompt = validatePrompt({
3232
- system,
3233
- prompt,
3234
- messages
3235
- });
3216
+ const currentPrompt = standardizePrompt({ system, prompt, messages });
3236
3217
  const mode = {
3237
3218
  type: "regular",
3238
3219
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
3239
3220
  };
3240
3221
  const callSettings = prepareCallSettings(settings);
3241
- const promptMessages = await convertToLanguageModelPrompt({
3242
- prompt: validatedPrompt,
3243
- modelSupportsImageUrls: model.supportsImageUrls
3244
- });
3245
3222
  let currentModelResponse;
3246
3223
  let currentToolCalls = [];
3247
3224
  let currentToolResults = [];
@@ -3256,7 +3233,13 @@ async function generateText({
3256
3233
  };
3257
3234
  let stepType = "initial";
3258
3235
  do {
3259
- const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
3236
+ if (stepCount === 1) {
3237
+ currentPrompt.type = "messages";
3238
+ }
3239
+ const promptMessages = await convertToLanguageModelPrompt({
3240
+ prompt: currentPrompt,
3241
+ modelSupportsImageUrls: model.supportsImageUrls
3242
+ });
3260
3243
  currentModelResponse = await retry(
3261
3244
  () => recordSpan({
3262
3245
  name: "ai.generateText.doGenerate",
@@ -3268,7 +3251,7 @@ async function generateText({
3268
3251
  telemetry
3269
3252
  }),
3270
3253
  ...baseTelemetryAttributes,
3271
- "ai.prompt.format": { input: () => currentInputFormat },
3254
+ "ai.prompt.format": { input: () => currentPrompt.type },
3272
3255
  "ai.prompt.messages": {
3273
3256
  input: () => JSON.stringify(promptMessages)
3274
3257
  },
@@ -3290,7 +3273,7 @@ async function generateText({
3290
3273
  const result = await model.doGenerate({
3291
3274
  mode,
3292
3275
  ...callSettings,
3293
- inputFormat: currentInputFormat,
3276
+ inputFormat: currentPrompt.type,
3294
3277
  prompt: promptMessages,
3295
3278
  providerMetadata,
3296
3279
  abortSignal,
@@ -3388,44 +3371,25 @@ async function generateText({
3388
3371
  steps.push(currentStep);
3389
3372
  await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3390
3373
  if (stepType === "continue") {
3391
- const lastResponseMessage = responseMessages.pop();
3392
- promptMessages.pop();
3393
- if (typeof lastResponseMessage.content === "string") {
3394
- lastResponseMessage.content = text;
3374
+ const lastMessage = currentPrompt.messages[currentPrompt.messages.length - 1];
3375
+ if (typeof lastMessage.content === "string") {
3376
+ lastMessage.content = text;
3395
3377
  } else {
3396
- lastResponseMessage.content.push({
3378
+ lastMessage.content.push({
3397
3379
  text: stepText,
3398
3380
  type: "text"
3399
3381
  });
3400
3382
  }
3401
- responseMessages.push(lastResponseMessage);
3402
- promptMessages.push(
3403
- convertToLanguageModelMessage(lastResponseMessage, null)
3404
- );
3405
- } else if (nextStepType === "continue") {
3406
- const newResponseMessages = toResponseMessages({
3407
- text,
3408
- toolCalls: currentToolCalls,
3409
- toolResults: currentToolResults
3410
- });
3411
- responseMessages.push(...newResponseMessages);
3412
- promptMessages.push(
3413
- ...newResponseMessages.map(
3414
- (message) => convertToLanguageModelMessage(message, null)
3415
- )
3416
- );
3383
+ responseMessages[responseMessages.length - 1] = lastMessage;
3384
+ currentPrompt.messages[currentPrompt.messages.length - 1] = lastMessage;
3417
3385
  } else {
3418
3386
  const newResponseMessages = toResponseMessages({
3419
- text: currentModelResponse.text,
3387
+ text,
3420
3388
  toolCalls: currentToolCalls,
3421
3389
  toolResults: currentToolResults
3422
3390
  });
3423
3391
  responseMessages.push(...newResponseMessages);
3424
- promptMessages.push(
3425
- ...newResponseMessages.map(
3426
- (message) => convertToLanguageModelMessage(message, null)
3427
- )
3428
- );
3392
+ currentPrompt.messages.push(...newResponseMessages);
3429
3393
  }
3430
3394
  stepType = nextStepType;
3431
3395
  } while (stepType !== "done");
@@ -3958,9 +3922,12 @@ async function streamText({
3958
3922
  fn: async (rootSpan) => {
3959
3923
  const retry = retryWithExponentialBackoff({ maxRetries });
3960
3924
  const startStep = async ({
3961
- promptMessages: promptMessages2,
3962
- promptType
3925
+ currentPrompt: currentPrompt2
3963
3926
  }) => {
3927
+ const promptMessages = await convertToLanguageModelPrompt({
3928
+ prompt: currentPrompt2,
3929
+ modelSupportsImageUrls: model.supportsImageUrls
3930
+ });
3964
3931
  const {
3965
3932
  result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
3966
3933
  doStreamSpan: doStreamSpan2,
@@ -3977,10 +3944,10 @@ async function streamText({
3977
3944
  }),
3978
3945
  ...baseTelemetryAttributes,
3979
3946
  "ai.prompt.format": {
3980
- input: () => promptType
3947
+ input: () => currentPrompt2.type
3981
3948
  },
3982
3949
  "ai.prompt.messages": {
3983
- input: () => JSON.stringify(promptMessages2)
3950
+ input: () => JSON.stringify(promptMessages)
3984
3951
  },
3985
3952
  // standardized gen-ai llm span attributes:
3986
3953
  "gen_ai.system": model.provider,
@@ -4010,8 +3977,8 @@ async function streamText({
4010
3977
  })
4011
3978
  },
4012
3979
  ...prepareCallSettings(settings),
4013
- inputFormat: promptType,
4014
- prompt: promptMessages2,
3980
+ inputFormat: currentPrompt2.type,
3981
+ prompt: promptMessages,
4015
3982
  providerMetadata,
4016
3983
  abortSignal,
4017
3984
  headers
@@ -4036,18 +4003,12 @@ async function streamText({
4036
4003
  startTimestampMs: startTimestampMs2
4037
4004
  };
4038
4005
  };
4039
- const promptMessages = await convertToLanguageModelPrompt({
4040
- prompt: validatePrompt({ system, prompt, messages }),
4041
- modelSupportsImageUrls: model.supportsImageUrls
4042
- });
4006
+ const currentPrompt = standardizePrompt({ system, prompt, messages });
4043
4007
  const {
4044
4008
  result: { stream, warnings, rawResponse },
4045
4009
  doStreamSpan,
4046
4010
  startTimestampMs
4047
- } = await startStep({
4048
- promptType: validatePrompt({ system, prompt, messages }).type,
4049
- promptMessages
4050
- });
4011
+ } = await startStep({ currentPrompt });
4051
4012
  return new DefaultStreamTextResult({
4052
4013
  stream,
4053
4014
  warnings,
@@ -4062,7 +4023,7 @@ async function streamText({
4062
4023
  maxSteps,
4063
4024
  continueSteps,
4064
4025
  startStep,
4065
- promptMessages,
4026
+ currentPrompt,
4066
4027
  modelId: model.modelId,
4067
4028
  now: now2,
4068
4029
  currentDate,
@@ -4086,7 +4047,7 @@ var DefaultStreamTextResult = class {
4086
4047
  maxSteps,
4087
4048
  continueSteps,
4088
4049
  startStep,
4089
- promptMessages,
4050
+ currentPrompt,
4090
4051
  modelId,
4091
4052
  now: now2,
4092
4053
  currentDate,
@@ -4131,7 +4092,7 @@ var DefaultStreamTextResult = class {
4131
4092
  startTimestamp,
4132
4093
  doStreamSpan: doStreamSpan2,
4133
4094
  currentStep,
4134
- promptMessages: promptMessages2,
4095
+ currentPrompt: currentPrompt2,
4135
4096
  usage = {
4136
4097
  promptTokens: 0,
4137
4098
  completionTokens: 0,
@@ -4356,29 +4317,34 @@ var DefaultStreamTextResult = class {
4356
4317
  };
4357
4318
  if (nextStepType !== "done") {
4358
4319
  if (stepType === "continue") {
4359
- const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
4360
- lastPromptMessage.content.push({
4320
+ const lastMessage = currentPrompt2.messages[currentPrompt2.messages.length - 1];
4321
+ if (typeof lastMessage.content === "string") {
4322
+ lastMessage.content = stepText;
4323
+ } else {
4324
+ lastMessage.content.push({
4325
+ text: stepText,
4326
+ type: "text"
4327
+ });
4328
+ }
4329
+ currentPrompt2.messages[currentPrompt2.messages.length - 1] = lastMessage;
4330
+ } else {
4331
+ const newResponseMessages = toResponseMessages({
4361
4332
  text: stepText,
4362
- type: "text"
4333
+ toolCalls: stepToolCalls,
4334
+ toolResults: stepToolResults
4363
4335
  });
4364
- } else {
4365
- promptMessages2.push(
4366
- ...toResponseMessages({
4367
- text: stepText,
4368
- toolCalls: stepToolCalls,
4369
- toolResults: stepToolResults
4370
- }).map(
4371
- (message) => convertToLanguageModelMessage(message, null)
4372
- )
4373
- );
4336
+ currentPrompt2.messages.push(...newResponseMessages);
4374
4337
  }
4375
4338
  const {
4376
4339
  result,
4377
4340
  doStreamSpan: doStreamSpan3,
4378
4341
  startTimestampMs: startTimestamp2
4379
4342
  } = await startStep({
4380
- promptType: "messages",
4381
- promptMessages: promptMessages2
4343
+ currentPrompt: {
4344
+ type: "messages",
4345
+ system: currentPrompt2.system,
4346
+ messages: currentPrompt2.messages
4347
+ }
4382
4348
  });
4383
4349
  self.warnings = result.warnings;
4384
4350
  self.rawResponse = result.rawResponse;
@@ -4387,7 +4353,7 @@ var DefaultStreamTextResult = class {
4387
4353
  startTimestamp: startTimestamp2,
4388
4354
  doStreamSpan: doStreamSpan3,
4389
4355
  currentStep: currentStep + 1,
4390
- promptMessages: promptMessages2,
4356
+ currentPrompt: currentPrompt2,
4391
4357
  usage: combinedUsage,
4392
4358
  stepType: nextStepType,
4393
4359
  previousStepText: fullStepText
@@ -4494,7 +4460,7 @@ var DefaultStreamTextResult = class {
4494
4460
  startTimestamp: startTimestampMs,
4495
4461
  doStreamSpan,
4496
4462
  currentStep: 0,
4497
- promptMessages,
4463
+ currentPrompt,
4498
4464
  usage: void 0,
4499
4465
  stepType: "initial"
4500
4466
  });
@@ -4868,7 +4834,7 @@ function convertToCoreMessages(messages) {
4868
4834
  role: "assistant",
4869
4835
  content: [
4870
4836
  { type: "text", text: content },
4871
- ...toolInvocations.map(({ toolCallId, toolName, args }) => ({
4837
+ ...toolInvocations.filter((invocation) => invocation.state !== "partial-call").map(({ toolCallId, toolName, args }) => ({
4872
4838
  type: "tool-call",
4873
4839
  toolCallId,
4874
4840
  toolName,
@@ -4876,25 +4842,19 @@ function convertToCoreMessages(messages) {
4876
4842
  }))
4877
4843
  ]
4878
4844
  });
4879
- coreMessages.push({
4880
- role: "tool",
4881
- content: toolInvocations.map((ToolInvocation) => {
4882
- if (!("result" in ToolInvocation)) {
4883
- throw new MessageConversionError({
4884
- originalMessage: message,
4885
- message: "ToolInvocation must have a result: " + JSON.stringify(ToolInvocation)
4886
- });
4887
- }
4888
- const { toolCallId, toolName, args, result } = ToolInvocation;
4889
- return {
4890
- type: "tool-result",
4891
- toolCallId,
4892
- toolName,
4893
- args,
4894
- result
4895
- };
4896
- })
4897
- });
4845
+ const toolResults = toolInvocations.filter((invocation) => invocation.state === "result").map(({ toolCallId, toolName, args, result }) => ({
4846
+ type: "tool-result",
4847
+ toolCallId,
4848
+ toolName,
4849
+ args,
4850
+ result
4851
+ }));
4852
+ if (toolResults.length > 0) {
4853
+ coreMessages.push({
4854
+ role: "tool",
4855
+ content: toolResults
4856
+ });
4857
+ }
4898
4858
  break;
4899
4859
  }
4900
4860
  case "function":