ai 3.4.16 → 3.4.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # ai
2
2
 
3
+ ## 3.4.18
4
+
5
+ ### Patch Changes
6
+
7
+ - 95c67b4: fix (ai/core): handle tool calls without results in message conversion
8
+
9
+ ## 3.4.17
10
+
11
+ ### Patch Changes
12
+
13
+ - e4ff512: fix (core): prevent unnecessary input/output serialization when telemetry is not enabled
14
+
3
15
  ## 3.4.16
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -608,7 +608,8 @@ It can be a user message, an assistant message, or a tool message.
608
608
  type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
609
609
 
610
610
  /**
611
- Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
611
+ Prompt part of the AI function options.
612
+ It contains a system message, a simple text prompt, or a list of messages.
612
613
  */
613
614
  type Prompt = {
614
615
  /**
@@ -620,7 +621,7 @@ type Prompt = {
620
621
  */
621
622
  prompt?: string;
622
623
  /**
623
- A list of messsages. You can either use `prompt` or `messages` but not both.
624
+ A list of messages. You can either use `prompt` or `messages` but not both.
624
625
  */
625
626
  messages?: Array<CoreMessage>;
626
627
  };
@@ -1272,6 +1273,8 @@ type ConvertibleMessage = {
1272
1273
  /**
1273
1274
  Converts an array of messages from useChat into an array of CoreMessages that can be used
1274
1275
  with the AI core functions (e.g. `streamText`).
1276
+
1277
+ Only full tool calls are included in assistant messages. Partial tool calls are removed.
1275
1278
  */
1276
1279
  declare function convertToCoreMessages(messages: Array<ConvertibleMessage>): CoreMessage[];
1277
1280
 
package/dist/index.d.ts CHANGED
@@ -608,7 +608,8 @@ It can be a user message, an assistant message, or a tool message.
608
608
  type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
609
609
 
610
610
  /**
611
- Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
611
+ Prompt part of the AI function options.
612
+ It contains a system message, a simple text prompt, or a list of messages.
612
613
  */
613
614
  type Prompt = {
614
615
  /**
@@ -620,7 +621,7 @@ type Prompt = {
620
621
  */
621
622
  prompt?: string;
622
623
  /**
623
- A list of messsages. You can either use `prompt` or `messages` but not both.
624
+ A list of messages. You can either use `prompt` or `messages` but not both.
624
625
  */
625
626
  messages?: Array<CoreMessage>;
626
627
  };
@@ -1272,6 +1273,8 @@ type ConvertibleMessage = {
1272
1273
  /**
1273
1274
  Converts an array of messages from useChat into an array of CoreMessages that can be used
1274
1275
  with the AI core functions (e.g. `streamText`).
1276
+
1277
+ Only full tool calls are included in assistant messages. Partial tool calls are removed.
1275
1278
  */
1276
1279
  declare function convertToCoreMessages(messages: Array<ConvertibleMessage>): CoreMessage[];
1277
1280
 
package/dist/index.js CHANGED
@@ -373,6 +373,9 @@ function selectTelemetryAttributes({
373
373
  telemetry,
374
374
  attributes
375
375
  }) {
376
+ if ((telemetry == null ? void 0 : telemetry.isEnabled) !== true) {
377
+ return {};
378
+ }
376
379
  return Object.entries(attributes).reduce((attributes2, [key, value]) => {
377
380
  if (value === void 0) {
378
381
  return attributes2;
@@ -922,33 +925,16 @@ async function convertToLanguageModelPrompt({
922
925
  modelSupportsImageUrls = true,
923
926
  downloadImplementation = download
924
927
  }) {
928
+ const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
925
929
  const languageModelMessages = [];
926
930
  if (prompt.system != null) {
927
931
  languageModelMessages.push({ role: "system", content: prompt.system });
928
932
  }
929
- const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
930
- const promptType = prompt.type;
931
- switch (promptType) {
932
- case "prompt": {
933
- languageModelMessages.push({
934
- role: "user",
935
- content: [{ type: "text", text: prompt.prompt }]
936
- });
937
- break;
938
- }
939
- case "messages": {
940
- languageModelMessages.push(
941
- ...prompt.messages.map(
942
- (message) => convertToLanguageModelMessage(message, downloadedAssets)
943
- )
944
- );
945
- break;
946
- }
947
- default: {
948
- const _exhaustiveCheck = promptType;
949
- throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
950
- }
951
- }
933
+ languageModelMessages.push(
934
+ ...prompt.messages.map(
935
+ (message) => convertToLanguageModelMessage(message, downloadedAssets)
936
+ )
937
+ );
952
938
  return languageModelMessages;
953
939
  }
954
940
  function convertToLanguageModelMessage(message, downloadedAssets) {
@@ -1362,7 +1348,7 @@ function prepareCallSettings({
1362
1348
  };
1363
1349
  }
1364
1350
 
1365
- // core/prompt/validate-prompt.ts
1351
+ // core/prompt/standardize-prompt.ts
1366
1352
  var import_provider7 = require("@ai-sdk/provider");
1367
1353
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1368
1354
  var import_zod6 = require("zod");
@@ -1460,8 +1446,8 @@ var coreMessageSchema = import_zod5.z.union([
1460
1446
  coreToolMessageSchema
1461
1447
  ]);
1462
1448
 
1463
- // core/prompt/validate-prompt.ts
1464
- function validatePrompt(prompt) {
1449
+ // core/prompt/standardize-prompt.ts
1450
+ function standardizePrompt(prompt) {
1465
1451
  if (prompt.prompt == null && prompt.messages == null) {
1466
1452
  throw new import_provider7.InvalidPromptError({
1467
1453
  prompt,
@@ -1489,9 +1475,13 @@ function validatePrompt(prompt) {
1489
1475
  }
1490
1476
  return {
1491
1477
  type: "prompt",
1492
- prompt: prompt.prompt,
1493
- messages: void 0,
1494
- system: prompt.system
1478
+ system: prompt.system,
1479
+ messages: [
1480
+ {
1481
+ role: "user",
1482
+ content: prompt.prompt
1483
+ }
1484
+ ]
1495
1485
  };
1496
1486
  }
1497
1487
  if (prompt.messages != null) {
@@ -1508,7 +1498,6 @@ function validatePrompt(prompt) {
1508
1498
  }
1509
1499
  return {
1510
1500
  type: "messages",
1511
- prompt: void 0,
1512
1501
  messages: prompt.messages,
1513
1502
  // only possible case bc of checks above
1514
1503
  system: prompt.system
@@ -2042,7 +2031,7 @@ async function generateObject({
2042
2031
  let resultProviderMetadata;
2043
2032
  switch (mode) {
2044
2033
  case "json": {
2045
- const validatedPrompt = validatePrompt({
2034
+ const standardPrompt = standardizePrompt({
2046
2035
  system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2047
2036
  prompt: system,
2048
2037
  schema: outputStrategy.jsonSchema
@@ -2051,10 +2040,9 @@ async function generateObject({
2051
2040
  messages
2052
2041
  });
2053
2042
  const promptMessages = await convertToLanguageModelPrompt({
2054
- prompt: validatedPrompt,
2043
+ prompt: standardPrompt,
2055
2044
  modelSupportsImageUrls: model.supportsImageUrls
2056
2045
  });
2057
- const inputFormat = validatedPrompt.type;
2058
2046
  const generateResult = await retry(
2059
2047
  () => recordSpan({
2060
2048
  name: "ai.generateObject.doGenerate",
@@ -2067,7 +2055,7 @@ async function generateObject({
2067
2055
  }),
2068
2056
  ...baseTelemetryAttributes,
2069
2057
  "ai.prompt.format": {
2070
- input: () => inputFormat
2058
+ input: () => standardPrompt.type
2071
2059
  },
2072
2060
  "ai.prompt.messages": {
2073
2061
  input: () => JSON.stringify(promptMessages)
@@ -2095,7 +2083,7 @@ async function generateObject({
2095
2083
  description: schemaDescription
2096
2084
  },
2097
2085
  ...prepareCallSettings(settings),
2098
- inputFormat,
2086
+ inputFormat: standardPrompt.type,
2099
2087
  prompt: promptMessages,
2100
2088
  providerMetadata,
2101
2089
  abortSignal,
@@ -2147,7 +2135,7 @@ async function generateObject({
2147
2135
  break;
2148
2136
  }
2149
2137
  case "tool": {
2150
- const validatedPrompt = validatePrompt({
2138
+ const validatedPrompt = standardizePrompt({
2151
2139
  system,
2152
2140
  prompt,
2153
2141
  messages
@@ -2515,7 +2503,7 @@ async function streamObject({
2515
2503
  let transformer;
2516
2504
  switch (mode) {
2517
2505
  case "json": {
2518
- const validatedPrompt = validatePrompt({
2506
+ const standardPrompt = standardizePrompt({
2519
2507
  system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2520
2508
  prompt: system,
2521
2509
  schema: outputStrategy.jsonSchema
@@ -2531,9 +2519,9 @@ async function streamObject({
2531
2519
  description: schemaDescription
2532
2520
  },
2533
2521
  ...prepareCallSettings(settings),
2534
- inputFormat: validatedPrompt.type,
2522
+ inputFormat: standardPrompt.type,
2535
2523
  prompt: await convertToLanguageModelPrompt({
2536
- prompt: validatedPrompt,
2524
+ prompt: standardPrompt,
2537
2525
  modelSupportsImageUrls: model.supportsImageUrls
2538
2526
  }),
2539
2527
  providerMetadata,
@@ -2557,7 +2545,7 @@ async function streamObject({
2557
2545
  break;
2558
2546
  }
2559
2547
  case "tool": {
2560
- const validatedPrompt = validatePrompt({
2548
+ const validatedPrompt = standardizePrompt({
2561
2549
  system,
2562
2550
  prompt,
2563
2551
  messages
@@ -3225,20 +3213,12 @@ async function generateText({
3225
3213
  fn: async (span) => {
3226
3214
  var _a11, _b, _c, _d, _e;
3227
3215
  const retry = retryWithExponentialBackoff({ maxRetries });
3228
- const validatedPrompt = validatePrompt({
3229
- system,
3230
- prompt,
3231
- messages
3232
- });
3216
+ const currentPrompt = standardizePrompt({ system, prompt, messages });
3233
3217
  const mode = {
3234
3218
  type: "regular",
3235
3219
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
3236
3220
  };
3237
3221
  const callSettings = prepareCallSettings(settings);
3238
- const promptMessages = await convertToLanguageModelPrompt({
3239
- prompt: validatedPrompt,
3240
- modelSupportsImageUrls: model.supportsImageUrls
3241
- });
3242
3222
  let currentModelResponse;
3243
3223
  let currentToolCalls = [];
3244
3224
  let currentToolResults = [];
@@ -3253,7 +3233,13 @@ async function generateText({
3253
3233
  };
3254
3234
  let stepType = "initial";
3255
3235
  do {
3256
- const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
3236
+ if (stepCount === 1) {
3237
+ currentPrompt.type = "messages";
3238
+ }
3239
+ const promptMessages = await convertToLanguageModelPrompt({
3240
+ prompt: currentPrompt,
3241
+ modelSupportsImageUrls: model.supportsImageUrls
3242
+ });
3257
3243
  currentModelResponse = await retry(
3258
3244
  () => recordSpan({
3259
3245
  name: "ai.generateText.doGenerate",
@@ -3265,7 +3251,7 @@ async function generateText({
3265
3251
  telemetry
3266
3252
  }),
3267
3253
  ...baseTelemetryAttributes,
3268
- "ai.prompt.format": { input: () => currentInputFormat },
3254
+ "ai.prompt.format": { input: () => currentPrompt.type },
3269
3255
  "ai.prompt.messages": {
3270
3256
  input: () => JSON.stringify(promptMessages)
3271
3257
  },
@@ -3287,7 +3273,7 @@ async function generateText({
3287
3273
  const result = await model.doGenerate({
3288
3274
  mode,
3289
3275
  ...callSettings,
3290
- inputFormat: currentInputFormat,
3276
+ inputFormat: currentPrompt.type,
3291
3277
  prompt: promptMessages,
3292
3278
  providerMetadata,
3293
3279
  abortSignal,
@@ -3385,44 +3371,25 @@ async function generateText({
3385
3371
  steps.push(currentStep);
3386
3372
  await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3387
3373
  if (stepType === "continue") {
3388
- const lastResponseMessage = responseMessages.pop();
3389
- promptMessages.pop();
3390
- if (typeof lastResponseMessage.content === "string") {
3391
- lastResponseMessage.content = text;
3374
+ const lastMessage = currentPrompt.messages[currentPrompt.messages.length - 1];
3375
+ if (typeof lastMessage.content === "string") {
3376
+ lastMessage.content = text;
3392
3377
  } else {
3393
- lastResponseMessage.content.push({
3378
+ lastMessage.content.push({
3394
3379
  text: stepText,
3395
3380
  type: "text"
3396
3381
  });
3397
3382
  }
3398
- responseMessages.push(lastResponseMessage);
3399
- promptMessages.push(
3400
- convertToLanguageModelMessage(lastResponseMessage, null)
3401
- );
3402
- } else if (nextStepType === "continue") {
3403
- const newResponseMessages = toResponseMessages({
3404
- text,
3405
- toolCalls: currentToolCalls,
3406
- toolResults: currentToolResults
3407
- });
3408
- responseMessages.push(...newResponseMessages);
3409
- promptMessages.push(
3410
- ...newResponseMessages.map(
3411
- (message) => convertToLanguageModelMessage(message, null)
3412
- )
3413
- );
3383
+ responseMessages[responseMessages.length - 1] = lastMessage;
3384
+ currentPrompt.messages[currentPrompt.messages.length - 1] = lastMessage;
3414
3385
  } else {
3415
3386
  const newResponseMessages = toResponseMessages({
3416
- text: currentModelResponse.text,
3387
+ text,
3417
3388
  toolCalls: currentToolCalls,
3418
3389
  toolResults: currentToolResults
3419
3390
  });
3420
3391
  responseMessages.push(...newResponseMessages);
3421
- promptMessages.push(
3422
- ...newResponseMessages.map(
3423
- (message) => convertToLanguageModelMessage(message, null)
3424
- )
3425
- );
3392
+ currentPrompt.messages.push(...newResponseMessages);
3426
3393
  }
3427
3394
  stepType = nextStepType;
3428
3395
  } while (stepType !== "done");
@@ -3955,9 +3922,12 @@ async function streamText({
3955
3922
  fn: async (rootSpan) => {
3956
3923
  const retry = retryWithExponentialBackoff({ maxRetries });
3957
3924
  const startStep = async ({
3958
- promptMessages: promptMessages2,
3959
- promptType
3925
+ currentPrompt: currentPrompt2
3960
3926
  }) => {
3927
+ const promptMessages = await convertToLanguageModelPrompt({
3928
+ prompt: currentPrompt2,
3929
+ modelSupportsImageUrls: model.supportsImageUrls
3930
+ });
3961
3931
  const {
3962
3932
  result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
3963
3933
  doStreamSpan: doStreamSpan2,
@@ -3974,10 +3944,10 @@ async function streamText({
3974
3944
  }),
3975
3945
  ...baseTelemetryAttributes,
3976
3946
  "ai.prompt.format": {
3977
- input: () => promptType
3947
+ input: () => currentPrompt2.type
3978
3948
  },
3979
3949
  "ai.prompt.messages": {
3980
- input: () => JSON.stringify(promptMessages2)
3950
+ input: () => JSON.stringify(promptMessages)
3981
3951
  },
3982
3952
  // standardized gen-ai llm span attributes:
3983
3953
  "gen_ai.system": model.provider,
@@ -4007,8 +3977,8 @@ async function streamText({
4007
3977
  })
4008
3978
  },
4009
3979
  ...prepareCallSettings(settings),
4010
- inputFormat: promptType,
4011
- prompt: promptMessages2,
3980
+ inputFormat: currentPrompt2.type,
3981
+ prompt: promptMessages,
4012
3982
  providerMetadata,
4013
3983
  abortSignal,
4014
3984
  headers
@@ -4033,18 +4003,12 @@ async function streamText({
4033
4003
  startTimestampMs: startTimestampMs2
4034
4004
  };
4035
4005
  };
4036
- const promptMessages = await convertToLanguageModelPrompt({
4037
- prompt: validatePrompt({ system, prompt, messages }),
4038
- modelSupportsImageUrls: model.supportsImageUrls
4039
- });
4006
+ const currentPrompt = standardizePrompt({ system, prompt, messages });
4040
4007
  const {
4041
4008
  result: { stream, warnings, rawResponse },
4042
4009
  doStreamSpan,
4043
4010
  startTimestampMs
4044
- } = await startStep({
4045
- promptType: validatePrompt({ system, prompt, messages }).type,
4046
- promptMessages
4047
- });
4011
+ } = await startStep({ currentPrompt });
4048
4012
  return new DefaultStreamTextResult({
4049
4013
  stream,
4050
4014
  warnings,
@@ -4059,7 +4023,7 @@ async function streamText({
4059
4023
  maxSteps,
4060
4024
  continueSteps,
4061
4025
  startStep,
4062
- promptMessages,
4026
+ currentPrompt,
4063
4027
  modelId: model.modelId,
4064
4028
  now: now2,
4065
4029
  currentDate,
@@ -4083,7 +4047,7 @@ var DefaultStreamTextResult = class {
4083
4047
  maxSteps,
4084
4048
  continueSteps,
4085
4049
  startStep,
4086
- promptMessages,
4050
+ currentPrompt,
4087
4051
  modelId,
4088
4052
  now: now2,
4089
4053
  currentDate,
@@ -4128,7 +4092,7 @@ var DefaultStreamTextResult = class {
4128
4092
  startTimestamp,
4129
4093
  doStreamSpan: doStreamSpan2,
4130
4094
  currentStep,
4131
- promptMessages: promptMessages2,
4095
+ currentPrompt: currentPrompt2,
4132
4096
  usage = {
4133
4097
  promptTokens: 0,
4134
4098
  completionTokens: 0,
@@ -4353,29 +4317,34 @@ var DefaultStreamTextResult = class {
4353
4317
  };
4354
4318
  if (nextStepType !== "done") {
4355
4319
  if (stepType === "continue") {
4356
- const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
4357
- lastPromptMessage.content.push({
4320
+ const lastMessage = currentPrompt2.messages[currentPrompt2.messages.length - 1];
4321
+ if (typeof lastMessage.content === "string") {
4322
+ lastMessage.content = stepText;
4323
+ } else {
4324
+ lastMessage.content.push({
4325
+ text: stepText,
4326
+ type: "text"
4327
+ });
4328
+ }
4329
+ currentPrompt2.messages[currentPrompt2.messages.length - 1] = lastMessage;
4330
+ } else {
4331
+ const newResponseMessages = toResponseMessages({
4358
4332
  text: stepText,
4359
- type: "text"
4333
+ toolCalls: stepToolCalls,
4334
+ toolResults: stepToolResults
4360
4335
  });
4361
- } else {
4362
- promptMessages2.push(
4363
- ...toResponseMessages({
4364
- text: stepText,
4365
- toolCalls: stepToolCalls,
4366
- toolResults: stepToolResults
4367
- }).map(
4368
- (message) => convertToLanguageModelMessage(message, null)
4369
- )
4370
- );
4336
+ currentPrompt2.messages.push(...newResponseMessages);
4371
4337
  }
4372
4338
  const {
4373
4339
  result,
4374
4340
  doStreamSpan: doStreamSpan3,
4375
4341
  startTimestampMs: startTimestamp2
4376
4342
  } = await startStep({
4377
- promptType: "messages",
4378
- promptMessages: promptMessages2
4343
+ currentPrompt: {
4344
+ type: "messages",
4345
+ system: currentPrompt2.system,
4346
+ messages: currentPrompt2.messages
4347
+ }
4379
4348
  });
4380
4349
  self.warnings = result.warnings;
4381
4350
  self.rawResponse = result.rawResponse;
@@ -4384,7 +4353,7 @@ var DefaultStreamTextResult = class {
4384
4353
  startTimestamp: startTimestamp2,
4385
4354
  doStreamSpan: doStreamSpan3,
4386
4355
  currentStep: currentStep + 1,
4387
- promptMessages: promptMessages2,
4356
+ currentPrompt: currentPrompt2,
4388
4357
  usage: combinedUsage,
4389
4358
  stepType: nextStepType,
4390
4359
  previousStepText: fullStepText
@@ -4491,7 +4460,7 @@ var DefaultStreamTextResult = class {
4491
4460
  startTimestamp: startTimestampMs,
4492
4461
  doStreamSpan,
4493
4462
  currentStep: 0,
4494
- promptMessages,
4463
+ currentPrompt,
4495
4464
  usage: void 0,
4496
4465
  stepType: "initial"
4497
4466
  });
@@ -4865,7 +4834,7 @@ function convertToCoreMessages(messages) {
4865
4834
  role: "assistant",
4866
4835
  content: [
4867
4836
  { type: "text", text: content },
4868
- ...toolInvocations.map(({ toolCallId, toolName, args }) => ({
4837
+ ...toolInvocations.filter((invocation) => invocation.state !== "partial-call").map(({ toolCallId, toolName, args }) => ({
4869
4838
  type: "tool-call",
4870
4839
  toolCallId,
4871
4840
  toolName,
@@ -4873,25 +4842,19 @@ function convertToCoreMessages(messages) {
4873
4842
  }))
4874
4843
  ]
4875
4844
  });
4876
- coreMessages.push({
4877
- role: "tool",
4878
- content: toolInvocations.map((ToolInvocation) => {
4879
- if (!("result" in ToolInvocation)) {
4880
- throw new MessageConversionError({
4881
- originalMessage: message,
4882
- message: "ToolInvocation must have a result: " + JSON.stringify(ToolInvocation)
4883
- });
4884
- }
4885
- const { toolCallId, toolName, args, result } = ToolInvocation;
4886
- return {
4887
- type: "tool-result",
4888
- toolCallId,
4889
- toolName,
4890
- args,
4891
- result
4892
- };
4893
- })
4894
- });
4845
+ const toolResults = toolInvocations.filter((invocation) => invocation.state === "result").map(({ toolCallId, toolName, args, result }) => ({
4846
+ type: "tool-result",
4847
+ toolCallId,
4848
+ toolName,
4849
+ args,
4850
+ result
4851
+ }));
4852
+ if (toolResults.length > 0) {
4853
+ coreMessages.push({
4854
+ role: "tool",
4855
+ content: toolResults
4856
+ });
4857
+ }
4895
4858
  break;
4896
4859
  }
4897
4860
  case "function":