ai 3.4.17 → 3.4.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -846,33 +846,16 @@ async function convertToLanguageModelPrompt({
846
846
  modelSupportsImageUrls = true,
847
847
  downloadImplementation = download
848
848
  }) {
849
+ const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
849
850
  const languageModelMessages = [];
850
851
  if (prompt.system != null) {
851
852
  languageModelMessages.push({ role: "system", content: prompt.system });
852
853
  }
853
- const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
854
- const promptType = prompt.type;
855
- switch (promptType) {
856
- case "prompt": {
857
- languageModelMessages.push({
858
- role: "user",
859
- content: [{ type: "text", text: prompt.prompt }]
860
- });
861
- break;
862
- }
863
- case "messages": {
864
- languageModelMessages.push(
865
- ...prompt.messages.map(
866
- (message) => convertToLanguageModelMessage(message, downloadedAssets)
867
- )
868
- );
869
- break;
870
- }
871
- default: {
872
- const _exhaustiveCheck = promptType;
873
- throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
874
- }
875
- }
854
+ languageModelMessages.push(
855
+ ...prompt.messages.map(
856
+ (message) => convertToLanguageModelMessage(message, downloadedAssets)
857
+ )
858
+ );
876
859
  return languageModelMessages;
877
860
  }
878
861
  function convertToLanguageModelMessage(message, downloadedAssets) {
@@ -1286,7 +1269,7 @@ function prepareCallSettings({
1286
1269
  };
1287
1270
  }
1288
1271
 
1289
- // core/prompt/validate-prompt.ts
1272
+ // core/prompt/standardize-prompt.ts
1290
1273
  import { InvalidPromptError } from "@ai-sdk/provider";
1291
1274
  import { safeValidateTypes } from "@ai-sdk/provider-utils";
1292
1275
  import { z as z6 } from "zod";
@@ -1384,8 +1367,8 @@ var coreMessageSchema = z5.union([
1384
1367
  coreToolMessageSchema
1385
1368
  ]);
1386
1369
 
1387
- // core/prompt/validate-prompt.ts
1388
- function validatePrompt(prompt) {
1370
+ // core/prompt/standardize-prompt.ts
1371
+ function standardizePrompt(prompt) {
1389
1372
  if (prompt.prompt == null && prompt.messages == null) {
1390
1373
  throw new InvalidPromptError({
1391
1374
  prompt,
@@ -1413,9 +1396,13 @@ function validatePrompt(prompt) {
1413
1396
  }
1414
1397
  return {
1415
1398
  type: "prompt",
1416
- prompt: prompt.prompt,
1417
- messages: void 0,
1418
- system: prompt.system
1399
+ system: prompt.system,
1400
+ messages: [
1401
+ {
1402
+ role: "user",
1403
+ content: prompt.prompt
1404
+ }
1405
+ ]
1419
1406
  };
1420
1407
  }
1421
1408
  if (prompt.messages != null) {
@@ -1432,7 +1419,6 @@ function validatePrompt(prompt) {
1432
1419
  }
1433
1420
  return {
1434
1421
  type: "messages",
1435
- prompt: void 0,
1436
1422
  messages: prompt.messages,
1437
1423
  // only possible case bc of checks above
1438
1424
  system: prompt.system
@@ -1971,7 +1957,7 @@ async function generateObject({
1971
1957
  let resultProviderMetadata;
1972
1958
  switch (mode) {
1973
1959
  case "json": {
1974
- const validatedPrompt = validatePrompt({
1960
+ const standardPrompt = standardizePrompt({
1975
1961
  system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
1976
1962
  prompt: system,
1977
1963
  schema: outputStrategy.jsonSchema
@@ -1980,10 +1966,9 @@ async function generateObject({
1980
1966
  messages
1981
1967
  });
1982
1968
  const promptMessages = await convertToLanguageModelPrompt({
1983
- prompt: validatedPrompt,
1969
+ prompt: standardPrompt,
1984
1970
  modelSupportsImageUrls: model.supportsImageUrls
1985
1971
  });
1986
- const inputFormat = validatedPrompt.type;
1987
1972
  const generateResult = await retry(
1988
1973
  () => recordSpan({
1989
1974
  name: "ai.generateObject.doGenerate",
@@ -1996,7 +1981,7 @@ async function generateObject({
1996
1981
  }),
1997
1982
  ...baseTelemetryAttributes,
1998
1983
  "ai.prompt.format": {
1999
- input: () => inputFormat
1984
+ input: () => standardPrompt.type
2000
1985
  },
2001
1986
  "ai.prompt.messages": {
2002
1987
  input: () => JSON.stringify(promptMessages)
@@ -2024,7 +2009,7 @@ async function generateObject({
2024
2009
  description: schemaDescription
2025
2010
  },
2026
2011
  ...prepareCallSettings(settings),
2027
- inputFormat,
2012
+ inputFormat: standardPrompt.type,
2028
2013
  prompt: promptMessages,
2029
2014
  providerMetadata,
2030
2015
  abortSignal,
@@ -2076,7 +2061,7 @@ async function generateObject({
2076
2061
  break;
2077
2062
  }
2078
2063
  case "tool": {
2079
- const validatedPrompt = validatePrompt({
2064
+ const validatedPrompt = standardizePrompt({
2080
2065
  system,
2081
2066
  prompt,
2082
2067
  messages
@@ -2447,7 +2432,7 @@ async function streamObject({
2447
2432
  let transformer;
2448
2433
  switch (mode) {
2449
2434
  case "json": {
2450
- const validatedPrompt = validatePrompt({
2435
+ const standardPrompt = standardizePrompt({
2451
2436
  system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2452
2437
  prompt: system,
2453
2438
  schema: outputStrategy.jsonSchema
@@ -2463,9 +2448,9 @@ async function streamObject({
2463
2448
  description: schemaDescription
2464
2449
  },
2465
2450
  ...prepareCallSettings(settings),
2466
- inputFormat: validatedPrompt.type,
2451
+ inputFormat: standardPrompt.type,
2467
2452
  prompt: await convertToLanguageModelPrompt({
2468
- prompt: validatedPrompt,
2453
+ prompt: standardPrompt,
2469
2454
  modelSupportsImageUrls: model.supportsImageUrls
2470
2455
  }),
2471
2456
  providerMetadata,
@@ -2489,7 +2474,7 @@ async function streamObject({
2489
2474
  break;
2490
2475
  }
2491
2476
  case "tool": {
2492
- const validatedPrompt = validatePrompt({
2477
+ const validatedPrompt = standardizePrompt({
2493
2478
  system,
2494
2479
  prompt,
2495
2480
  messages
@@ -3169,20 +3154,12 @@ async function generateText({
3169
3154
  fn: async (span) => {
3170
3155
  var _a11, _b, _c, _d, _e;
3171
3156
  const retry = retryWithExponentialBackoff({ maxRetries });
3172
- const validatedPrompt = validatePrompt({
3173
- system,
3174
- prompt,
3175
- messages
3176
- });
3157
+ const currentPrompt = standardizePrompt({ system, prompt, messages });
3177
3158
  const mode = {
3178
3159
  type: "regular",
3179
3160
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
3180
3161
  };
3181
3162
  const callSettings = prepareCallSettings(settings);
3182
- const promptMessages = await convertToLanguageModelPrompt({
3183
- prompt: validatedPrompt,
3184
- modelSupportsImageUrls: model.supportsImageUrls
3185
- });
3186
3163
  let currentModelResponse;
3187
3164
  let currentToolCalls = [];
3188
3165
  let currentToolResults = [];
@@ -3197,7 +3174,13 @@ async function generateText({
3197
3174
  };
3198
3175
  let stepType = "initial";
3199
3176
  do {
3200
- const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
3177
+ if (stepCount === 1) {
3178
+ currentPrompt.type = "messages";
3179
+ }
3180
+ const promptMessages = await convertToLanguageModelPrompt({
3181
+ prompt: currentPrompt,
3182
+ modelSupportsImageUrls: model.supportsImageUrls
3183
+ });
3201
3184
  currentModelResponse = await retry(
3202
3185
  () => recordSpan({
3203
3186
  name: "ai.generateText.doGenerate",
@@ -3209,7 +3192,7 @@ async function generateText({
3209
3192
  telemetry
3210
3193
  }),
3211
3194
  ...baseTelemetryAttributes,
3212
- "ai.prompt.format": { input: () => currentInputFormat },
3195
+ "ai.prompt.format": { input: () => currentPrompt.type },
3213
3196
  "ai.prompt.messages": {
3214
3197
  input: () => JSON.stringify(promptMessages)
3215
3198
  },
@@ -3231,7 +3214,7 @@ async function generateText({
3231
3214
  const result = await model.doGenerate({
3232
3215
  mode,
3233
3216
  ...callSettings,
3234
- inputFormat: currentInputFormat,
3217
+ inputFormat: currentPrompt.type,
3235
3218
  prompt: promptMessages,
3236
3219
  providerMetadata,
3237
3220
  abortSignal,
@@ -3329,44 +3312,25 @@ async function generateText({
3329
3312
  steps.push(currentStep);
3330
3313
  await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3331
3314
  if (stepType === "continue") {
3332
- const lastResponseMessage = responseMessages.pop();
3333
- promptMessages.pop();
3334
- if (typeof lastResponseMessage.content === "string") {
3335
- lastResponseMessage.content = text;
3315
+ const lastMessage = currentPrompt.messages[currentPrompt.messages.length - 1];
3316
+ if (typeof lastMessage.content === "string") {
3317
+ lastMessage.content = text;
3336
3318
  } else {
3337
- lastResponseMessage.content.push({
3319
+ lastMessage.content.push({
3338
3320
  text: stepText,
3339
3321
  type: "text"
3340
3322
  });
3341
3323
  }
3342
- responseMessages.push(lastResponseMessage);
3343
- promptMessages.push(
3344
- convertToLanguageModelMessage(lastResponseMessage, null)
3345
- );
3346
- } else if (nextStepType === "continue") {
3347
- const newResponseMessages = toResponseMessages({
3348
- text,
3349
- toolCalls: currentToolCalls,
3350
- toolResults: currentToolResults
3351
- });
3352
- responseMessages.push(...newResponseMessages);
3353
- promptMessages.push(
3354
- ...newResponseMessages.map(
3355
- (message) => convertToLanguageModelMessage(message, null)
3356
- )
3357
- );
3324
+ responseMessages[responseMessages.length - 1] = lastMessage;
3325
+ currentPrompt.messages[currentPrompt.messages.length - 1] = lastMessage;
3358
3326
  } else {
3359
3327
  const newResponseMessages = toResponseMessages({
3360
- text: currentModelResponse.text,
3328
+ text,
3361
3329
  toolCalls: currentToolCalls,
3362
3330
  toolResults: currentToolResults
3363
3331
  });
3364
3332
  responseMessages.push(...newResponseMessages);
3365
- promptMessages.push(
3366
- ...newResponseMessages.map(
3367
- (message) => convertToLanguageModelMessage(message, null)
3368
- )
3369
- );
3333
+ currentPrompt.messages.push(...newResponseMessages);
3370
3334
  }
3371
3335
  stepType = nextStepType;
3372
3336
  } while (stepType !== "done");
@@ -3899,9 +3863,12 @@ async function streamText({
3899
3863
  fn: async (rootSpan) => {
3900
3864
  const retry = retryWithExponentialBackoff({ maxRetries });
3901
3865
  const startStep = async ({
3902
- promptMessages: promptMessages2,
3903
- promptType
3866
+ currentPrompt: currentPrompt2
3904
3867
  }) => {
3868
+ const promptMessages = await convertToLanguageModelPrompt({
3869
+ prompt: currentPrompt2,
3870
+ modelSupportsImageUrls: model.supportsImageUrls
3871
+ });
3905
3872
  const {
3906
3873
  result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
3907
3874
  doStreamSpan: doStreamSpan2,
@@ -3918,10 +3885,10 @@ async function streamText({
3918
3885
  }),
3919
3886
  ...baseTelemetryAttributes,
3920
3887
  "ai.prompt.format": {
3921
- input: () => promptType
3888
+ input: () => currentPrompt2.type
3922
3889
  },
3923
3890
  "ai.prompt.messages": {
3924
- input: () => JSON.stringify(promptMessages2)
3891
+ input: () => JSON.stringify(promptMessages)
3925
3892
  },
3926
3893
  // standardized gen-ai llm span attributes:
3927
3894
  "gen_ai.system": model.provider,
@@ -3951,8 +3918,8 @@ async function streamText({
3951
3918
  })
3952
3919
  },
3953
3920
  ...prepareCallSettings(settings),
3954
- inputFormat: promptType,
3955
- prompt: promptMessages2,
3921
+ inputFormat: currentPrompt2.type,
3922
+ prompt: promptMessages,
3956
3923
  providerMetadata,
3957
3924
  abortSignal,
3958
3925
  headers
@@ -3977,18 +3944,12 @@ async function streamText({
3977
3944
  startTimestampMs: startTimestampMs2
3978
3945
  };
3979
3946
  };
3980
- const promptMessages = await convertToLanguageModelPrompt({
3981
- prompt: validatePrompt({ system, prompt, messages }),
3982
- modelSupportsImageUrls: model.supportsImageUrls
3983
- });
3947
+ const currentPrompt = standardizePrompt({ system, prompt, messages });
3984
3948
  const {
3985
3949
  result: { stream, warnings, rawResponse },
3986
3950
  doStreamSpan,
3987
3951
  startTimestampMs
3988
- } = await startStep({
3989
- promptType: validatePrompt({ system, prompt, messages }).type,
3990
- promptMessages
3991
- });
3952
+ } = await startStep({ currentPrompt });
3992
3953
  return new DefaultStreamTextResult({
3993
3954
  stream,
3994
3955
  warnings,
@@ -4003,7 +3964,7 @@ async function streamText({
4003
3964
  maxSteps,
4004
3965
  continueSteps,
4005
3966
  startStep,
4006
- promptMessages,
3967
+ currentPrompt,
4007
3968
  modelId: model.modelId,
4008
3969
  now: now2,
4009
3970
  currentDate,
@@ -4027,7 +3988,7 @@ var DefaultStreamTextResult = class {
4027
3988
  maxSteps,
4028
3989
  continueSteps,
4029
3990
  startStep,
4030
- promptMessages,
3991
+ currentPrompt,
4031
3992
  modelId,
4032
3993
  now: now2,
4033
3994
  currentDate,
@@ -4072,7 +4033,7 @@ var DefaultStreamTextResult = class {
4072
4033
  startTimestamp,
4073
4034
  doStreamSpan: doStreamSpan2,
4074
4035
  currentStep,
4075
- promptMessages: promptMessages2,
4036
+ currentPrompt: currentPrompt2,
4076
4037
  usage = {
4077
4038
  promptTokens: 0,
4078
4039
  completionTokens: 0,
@@ -4297,29 +4258,34 @@ var DefaultStreamTextResult = class {
4297
4258
  };
4298
4259
  if (nextStepType !== "done") {
4299
4260
  if (stepType === "continue") {
4300
- const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
4301
- lastPromptMessage.content.push({
4261
+ const lastMessage = currentPrompt2.messages[currentPrompt2.messages.length - 1];
4262
+ if (typeof lastMessage.content === "string") {
4263
+ lastMessage.content = stepText;
4264
+ } else {
4265
+ lastMessage.content.push({
4266
+ text: stepText,
4267
+ type: "text"
4268
+ });
4269
+ }
4270
+ currentPrompt2.messages[currentPrompt2.messages.length - 1] = lastMessage;
4271
+ } else {
4272
+ const newResponseMessages = toResponseMessages({
4302
4273
  text: stepText,
4303
- type: "text"
4274
+ toolCalls: stepToolCalls,
4275
+ toolResults: stepToolResults
4304
4276
  });
4305
- } else {
4306
- promptMessages2.push(
4307
- ...toResponseMessages({
4308
- text: stepText,
4309
- toolCalls: stepToolCalls,
4310
- toolResults: stepToolResults
4311
- }).map(
4312
- (message) => convertToLanguageModelMessage(message, null)
4313
- )
4314
- );
4277
+ currentPrompt2.messages.push(...newResponseMessages);
4315
4278
  }
4316
4279
  const {
4317
4280
  result,
4318
4281
  doStreamSpan: doStreamSpan3,
4319
4282
  startTimestampMs: startTimestamp2
4320
4283
  } = await startStep({
4321
- promptType: "messages",
4322
- promptMessages: promptMessages2
4284
+ currentPrompt: {
4285
+ type: "messages",
4286
+ system: currentPrompt2.system,
4287
+ messages: currentPrompt2.messages
4288
+ }
4323
4289
  });
4324
4290
  self.warnings = result.warnings;
4325
4291
  self.rawResponse = result.rawResponse;
@@ -4328,7 +4294,7 @@ var DefaultStreamTextResult = class {
4328
4294
  startTimestamp: startTimestamp2,
4329
4295
  doStreamSpan: doStreamSpan3,
4330
4296
  currentStep: currentStep + 1,
4331
- promptMessages: promptMessages2,
4297
+ currentPrompt: currentPrompt2,
4332
4298
  usage: combinedUsage,
4333
4299
  stepType: nextStepType,
4334
4300
  previousStepText: fullStepText
@@ -4435,7 +4401,7 @@ var DefaultStreamTextResult = class {
4435
4401
  startTimestamp: startTimestampMs,
4436
4402
  doStreamSpan,
4437
4403
  currentStep: 0,
4438
- promptMessages,
4404
+ currentPrompt,
4439
4405
  usage: void 0,
4440
4406
  stepType: "initial"
4441
4407
  });
@@ -4809,7 +4775,7 @@ function convertToCoreMessages(messages) {
4809
4775
  role: "assistant",
4810
4776
  content: [
4811
4777
  { type: "text", text: content },
4812
- ...toolInvocations.map(({ toolCallId, toolName, args }) => ({
4778
+ ...toolInvocations.filter((invocation) => invocation.state !== "partial-call").map(({ toolCallId, toolName, args }) => ({
4813
4779
  type: "tool-call",
4814
4780
  toolCallId,
4815
4781
  toolName,
@@ -4817,25 +4783,19 @@ function convertToCoreMessages(messages) {
4817
4783
  }))
4818
4784
  ]
4819
4785
  });
4820
- coreMessages.push({
4821
- role: "tool",
4822
- content: toolInvocations.map((ToolInvocation) => {
4823
- if (!("result" in ToolInvocation)) {
4824
- throw new MessageConversionError({
4825
- originalMessage: message,
4826
- message: "ToolInvocation must have a result: " + JSON.stringify(ToolInvocation)
4827
- });
4828
- }
4829
- const { toolCallId, toolName, args, result } = ToolInvocation;
4830
- return {
4831
- type: "tool-result",
4832
- toolCallId,
4833
- toolName,
4834
- args,
4835
- result
4836
- };
4837
- })
4838
- });
4786
+ const toolResults = toolInvocations.filter((invocation) => invocation.state === "result").map(({ toolCallId, toolName, args, result }) => ({
4787
+ type: "tool-result",
4788
+ toolCallId,
4789
+ toolName,
4790
+ args,
4791
+ result
4792
+ }));
4793
+ if (toolResults.length > 0) {
4794
+ coreMessages.push({
4795
+ role: "tool",
4796
+ content: toolResults
4797
+ });
4798
+ }
4839
4799
  break;
4840
4800
  }
4841
4801
  case "function":