ai 3.2.35 → 3.2.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -324,32 +324,112 @@ async function embedMany({
324
324
  values,
325
325
  maxRetries,
326
326
  abortSignal,
327
- headers
327
+ headers,
328
+ experimental_telemetry: telemetry
328
329
  }) {
329
- var _a, _b, _c;
330
- const retry = retryWithExponentialBackoff({ maxRetries });
331
- const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
332
- if (maxEmbeddingsPerCall == null) {
333
- const modelResponse = await retry(
334
- () => model.doEmbed({ values, abortSignal, headers })
335
- );
336
- return new DefaultEmbedManyResult({
337
- values,
338
- embeddings: modelResponse.embeddings,
339
- usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN }
340
- });
341
- }
342
- const valueChunks = splitArray(values, maxEmbeddingsPerCall);
343
- const embeddings = [];
344
- let tokens = 0;
345
- for (const chunk of valueChunks) {
346
- const modelResponse = await retry(
347
- () => model.doEmbed({ values: chunk, abortSignal, headers })
348
- );
349
- embeddings.push(...modelResponse.embeddings);
350
- tokens += (_c = (_b = modelResponse.usage) == null ? void 0 : _b.tokens) != null ? _c : NaN;
351
- }
352
- return new DefaultEmbedManyResult({ values, embeddings, usage: { tokens } });
330
+ var _a;
331
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
332
+ operationName: "ai.embedMany",
333
+ model,
334
+ telemetry,
335
+ headers,
336
+ settings: { maxRetries }
337
+ });
338
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
339
+ return recordSpan({
340
+ name: "ai.embedMany",
341
+ attributes: {
342
+ ...baseTelemetryAttributes,
343
+ // specific settings that only make sense on the outer level:
344
+ "ai.values": values.map((value) => JSON.stringify(value))
345
+ },
346
+ tracer,
347
+ fn: async (span) => {
348
+ const retry = retryWithExponentialBackoff({ maxRetries });
349
+ const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
350
+ if (maxEmbeddingsPerCall == null) {
351
+ const { embeddings: embeddings2, usage } = await retry(() => {
352
+ return recordSpan({
353
+ name: "ai.embedMany.doEmbed",
354
+ attributes: {
355
+ ...baseTelemetryAttributes,
356
+ // specific settings that only make sense on the outer level:
357
+ "ai.values": values.map((value) => JSON.stringify(value))
358
+ },
359
+ tracer,
360
+ fn: async (doEmbedSpan) => {
361
+ var _a2;
362
+ const modelResponse = await model.doEmbed({
363
+ values,
364
+ abortSignal,
365
+ headers
366
+ });
367
+ const embeddings3 = modelResponse.embeddings;
368
+ const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
369
+ doEmbedSpan.setAttributes({
370
+ "ai.embeddings": embeddings3.map(
371
+ (embedding) => JSON.stringify(embedding)
372
+ ),
373
+ "ai.usage.tokens": usage2.tokens
374
+ });
375
+ return { embeddings: embeddings3, usage: usage2 };
376
+ }
377
+ });
378
+ });
379
+ span.setAttributes({
380
+ "ai.embeddings": embeddings2.map(
381
+ (embedding) => JSON.stringify(embedding)
382
+ ),
383
+ "ai.usage.tokens": usage.tokens
384
+ });
385
+ return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
386
+ }
387
+ const valueChunks = splitArray(values, maxEmbeddingsPerCall);
388
+ const embeddings = [];
389
+ let tokens = 0;
390
+ for (const chunk of valueChunks) {
391
+ const { embeddings: responseEmbeddings, usage } = await retry(() => {
392
+ return recordSpan({
393
+ name: "ai.embedMany.doEmbed",
394
+ attributes: {
395
+ ...baseTelemetryAttributes,
396
+ // specific settings that only make sense on the outer level:
397
+ "ai.values": chunk.map((value) => JSON.stringify(value))
398
+ },
399
+ tracer,
400
+ fn: async (doEmbedSpan) => {
401
+ var _a2;
402
+ const modelResponse = await model.doEmbed({
403
+ values: chunk,
404
+ abortSignal,
405
+ headers
406
+ });
407
+ const embeddings2 = modelResponse.embeddings;
408
+ const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
409
+ doEmbedSpan.setAttributes({
410
+ "ai.embeddings": embeddings2.map(
411
+ (embedding) => JSON.stringify(embedding)
412
+ ),
413
+ "ai.usage.tokens": usage2.tokens
414
+ });
415
+ return { embeddings: embeddings2, usage: usage2 };
416
+ }
417
+ });
418
+ });
419
+ embeddings.push(...responseEmbeddings);
420
+ tokens += usage.tokens;
421
+ }
422
+ span.setAttributes({
423
+ "ai.embeddings": embeddings.map((embedding) => JSON.stringify(embedding)),
424
+ "ai.usage.tokens": tokens
425
+ });
426
+ return new DefaultEmbedManyResult({
427
+ values,
428
+ embeddings,
429
+ usage: { tokens }
430
+ });
431
+ }
432
+ });
353
433
  }
354
434
  var DefaultEmbedManyResult = class {
355
435
  constructor(options) {
@@ -363,6 +443,9 @@ var DefaultEmbedManyResult = class {
363
443
  import { NoObjectGeneratedError } from "@ai-sdk/provider";
364
444
  import { safeParseJSON } from "@ai-sdk/provider-utils";
365
445
 
446
+ // core/prompt/convert-to-language-model-prompt.ts
447
+ import { getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider-utils";
448
+
366
449
  // core/util/detect-image-mimetype.ts
367
450
  var mimeTypeSignatures = [
368
451
  { mimeType: "image/gif", bytes: [71, 73, 70] },
@@ -379,6 +462,35 @@ function detectImageMimeType(image) {
379
462
  return void 0;
380
463
  }
381
464
 
465
+ // core/util/download.ts
466
+ import { DownloadError } from "@ai-sdk/provider";
467
+ async function download({
468
+ url,
469
+ fetchImplementation = fetch
470
+ }) {
471
+ var _a;
472
+ const urlText = url.toString();
473
+ try {
474
+ const response = await fetchImplementation(urlText);
475
+ if (!response.ok) {
476
+ throw new DownloadError({
477
+ url: urlText,
478
+ statusCode: response.status,
479
+ statusText: response.statusText
480
+ });
481
+ }
482
+ return {
483
+ data: new Uint8Array(await response.arrayBuffer()),
484
+ mimeType: (_a = response.headers.get("content-type")) != null ? _a : void 0
485
+ };
486
+ } catch (error) {
487
+ if (DownloadError.isDownloadError(error)) {
488
+ throw error;
489
+ }
490
+ throw new DownloadError({ url: urlText, cause: error });
491
+ }
492
+ }
493
+
382
494
  // core/prompt/data-content.ts
383
495
  import { InvalidDataContentError } from "@ai-sdk/provider";
384
496
  import {
@@ -446,12 +558,16 @@ var InvalidMessageRoleError = class extends Error {
446
558
  };
447
559
 
448
560
  // core/prompt/convert-to-language-model-prompt.ts
449
- import { getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider-utils";
450
- function convertToLanguageModelPrompt(prompt) {
561
+ async function convertToLanguageModelPrompt({
562
+ prompt,
563
+ modelSupportsImageUrls = true,
564
+ downloadImplementation = download
565
+ }) {
451
566
  const languageModelMessages = [];
452
567
  if (prompt.system != null) {
453
568
  languageModelMessages.push({ role: "system", content: prompt.system });
454
569
  }
570
+ const downloadedImages = modelSupportsImageUrls || prompt.messages == null ? null : await downloadImages(prompt.messages, downloadImplementation);
455
571
  const promptType = prompt.type;
456
572
  switch (promptType) {
457
573
  case "prompt": {
@@ -463,7 +579,9 @@ function convertToLanguageModelPrompt(prompt) {
463
579
  }
464
580
  case "messages": {
465
581
  languageModelMessages.push(
466
- ...prompt.messages.map(convertToLanguageModelMessage)
582
+ ...prompt.messages.map(
583
+ (message) => convertToLanguageModelMessage(message, downloadedImages)
584
+ )
467
585
  );
468
586
  break;
469
587
  }
@@ -474,7 +592,7 @@ function convertToLanguageModelPrompt(prompt) {
474
592
  }
475
593
  return languageModelMessages;
476
594
  }
477
- function convertToLanguageModelMessage(message) {
595
+ function convertToLanguageModelMessage(message, downloadedImages) {
478
596
  const role = message.role;
479
597
  switch (role) {
480
598
  case "system": {
@@ -491,18 +609,27 @@ function convertToLanguageModelMessage(message) {
491
609
  role: "user",
492
610
  content: message.content.map(
493
611
  (part) => {
494
- var _a;
612
+ var _a, _b, _c;
495
613
  switch (part.type) {
496
614
  case "text": {
497
615
  return part;
498
616
  }
499
617
  case "image": {
500
618
  if (part.image instanceof URL) {
501
- return {
502
- type: "image",
503
- image: part.image,
504
- mimeType: part.mimeType
505
- };
619
+ if (downloadedImages == null) {
620
+ return {
621
+ type: "image",
622
+ image: part.image,
623
+ mimeType: part.mimeType
624
+ };
625
+ } else {
626
+ const downloadedImage = downloadedImages[part.image.toString()];
627
+ return {
628
+ type: "image",
629
+ image: downloadedImage.data,
630
+ mimeType: (_a = part.mimeType) != null ? _a : downloadedImage.mimeType
631
+ };
632
+ }
506
633
  }
507
634
  if (typeof part.image === "string") {
508
635
  try {
@@ -510,11 +637,20 @@ function convertToLanguageModelMessage(message) {
510
637
  switch (url.protocol) {
511
638
  case "http:":
512
639
  case "https:": {
513
- return {
514
- type: "image",
515
- image: url,
516
- mimeType: part.mimeType
517
- };
640
+ if (downloadedImages == null) {
641
+ return {
642
+ type: "image",
643
+ image: url,
644
+ mimeType: part.mimeType
645
+ };
646
+ } else {
647
+ const downloadedImage = downloadedImages[part.image];
648
+ return {
649
+ type: "image",
650
+ image: downloadedImage.data,
651
+ mimeType: (_b = part.mimeType) != null ? _b : downloadedImage.mimeType
652
+ };
653
+ }
518
654
  }
519
655
  case "data:": {
520
656
  try {
@@ -549,7 +685,7 @@ function convertToLanguageModelMessage(message) {
549
685
  return {
550
686
  type: "image",
551
687
  image: imageUint8,
552
- mimeType: (_a = part.mimeType) != null ? _a : detectImageMimeType(imageUint8)
688
+ mimeType: (_c = part.mimeType) != null ? _c : detectImageMimeType(imageUint8)
553
689
  };
554
690
  }
555
691
  }
@@ -581,6 +717,25 @@ function convertToLanguageModelMessage(message) {
581
717
  }
582
718
  }
583
719
  }
720
+ async function downloadImages(messages, downloadImplementation) {
721
+ const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
722
+ (content) => Array.isArray(content)
723
+ ).flat().filter((part) => part.type === "image").map((part) => part.image).map(
724
+ (part) => (
725
+ // support string urls in image parts:
726
+ typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
727
+ )
728
+ ).filter((image) => image instanceof URL);
729
+ const downloadedImages = await Promise.all(
730
+ urls.map(async (url) => ({
731
+ url,
732
+ data: await downloadImplementation({ url })
733
+ }))
734
+ );
735
+ return Object.fromEntries(
736
+ downloadedImages.map(({ url, data }) => [url.toString(), data])
737
+ );
738
+ }
584
739
 
585
740
  // core/prompt/get-validated-prompt.ts
586
741
  import { InvalidPromptError } from "@ai-sdk/provider";
@@ -825,7 +980,7 @@ async function generateObject({
825
980
  ...baseTelemetryAttributes,
826
981
  // specific settings that only make sense on the outer level:
827
982
  "ai.prompt": JSON.stringify({ system, prompt, messages }),
828
- "ai.settings.jsonSchema": JSON.stringify(schema.jsonSchema),
983
+ "ai.schema": JSON.stringify(schema.jsonSchema),
829
984
  "ai.settings.mode": mode
830
985
  },
831
986
  tracer,
@@ -851,7 +1006,10 @@ async function generateObject({
851
1006
  prompt,
852
1007
  messages
853
1008
  });
854
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1009
+ const promptMessages = await convertToLanguageModelPrompt({
1010
+ prompt: validatedPrompt,
1011
+ modelSupportsImageUrls: model.supportsImageUrls
1012
+ });
855
1013
  const inputFormat = validatedPrompt.type;
856
1014
  const generateResult = await retry(
857
1015
  () => recordSpan({
@@ -899,7 +1057,10 @@ async function generateObject({
899
1057
  prompt,
900
1058
  messages
901
1059
  });
902
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1060
+ const promptMessages = await convertToLanguageModelPrompt({
1061
+ prompt: validatedPrompt,
1062
+ modelSupportsImageUrls: model.supportsImageUrls
1063
+ });
903
1064
  const inputFormat = validatedPrompt.type;
904
1065
  const generateResult = await retry(
905
1066
  () => recordSpan({
@@ -1076,101 +1237,156 @@ async function streamObject({
1076
1237
  maxRetries,
1077
1238
  abortSignal,
1078
1239
  headers,
1240
+ experimental_telemetry: telemetry,
1079
1241
  onFinish,
1080
1242
  ...settings
1081
1243
  }) {
1244
+ var _a;
1245
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
1246
+ operationName: "ai.streamObject",
1247
+ model,
1248
+ telemetry,
1249
+ headers,
1250
+ settings: { ...settings, maxRetries }
1251
+ });
1252
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1082
1253
  const retry = retryWithExponentialBackoff({ maxRetries });
1083
1254
  const schema = asSchema(inputSchema);
1084
- if (mode === "auto" || mode == null) {
1085
- mode = model.defaultObjectGenerationMode;
1086
- }
1087
- let callOptions;
1088
- let transformer;
1089
- switch (mode) {
1090
- case "json": {
1091
- const validatedPrompt = getValidatedPrompt({
1092
- system: injectJsonSchemaIntoSystem({
1093
- system,
1094
- schema: schema.jsonSchema
1095
- }),
1096
- prompt,
1097
- messages
1098
- });
1099
- callOptions = {
1100
- mode: { type: "object-json" },
1101
- ...prepareCallSettings(settings),
1102
- inputFormat: validatedPrompt.type,
1103
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1104
- abortSignal,
1105
- headers
1106
- };
1107
- transformer = {
1108
- transform: (chunk, controller) => {
1109
- switch (chunk.type) {
1110
- case "text-delta":
1111
- controller.enqueue(chunk.textDelta);
1112
- break;
1113
- case "finish":
1114
- case "error":
1115
- controller.enqueue(chunk);
1116
- break;
1117
- }
1255
+ return recordSpan({
1256
+ name: "ai.streamObject",
1257
+ attributes: {
1258
+ ...baseTelemetryAttributes,
1259
+ // specific settings that only make sense on the outer level:
1260
+ "ai.prompt": JSON.stringify({ system, prompt, messages }),
1261
+ "ai.schema": JSON.stringify(schema.jsonSchema),
1262
+ "ai.settings.mode": mode
1263
+ },
1264
+ tracer,
1265
+ endWhenDone: false,
1266
+ fn: async (rootSpan) => {
1267
+ if (mode === "auto" || mode == null) {
1268
+ mode = model.defaultObjectGenerationMode;
1269
+ }
1270
+ let callOptions;
1271
+ let transformer;
1272
+ switch (mode) {
1273
+ case "json": {
1274
+ const validatedPrompt = getValidatedPrompt({
1275
+ system: injectJsonSchemaIntoSystem({
1276
+ system,
1277
+ schema: schema.jsonSchema
1278
+ }),
1279
+ prompt,
1280
+ messages
1281
+ });
1282
+ callOptions = {
1283
+ mode: { type: "object-json" },
1284
+ ...prepareCallSettings(settings),
1285
+ inputFormat: validatedPrompt.type,
1286
+ prompt: await convertToLanguageModelPrompt({
1287
+ prompt: validatedPrompt,
1288
+ modelSupportsImageUrls: model.supportsImageUrls
1289
+ }),
1290
+ abortSignal,
1291
+ headers
1292
+ };
1293
+ transformer = {
1294
+ transform: (chunk, controller) => {
1295
+ switch (chunk.type) {
1296
+ case "text-delta":
1297
+ controller.enqueue(chunk.textDelta);
1298
+ break;
1299
+ case "finish":
1300
+ case "error":
1301
+ controller.enqueue(chunk);
1302
+ break;
1303
+ }
1304
+ }
1305
+ };
1306
+ break;
1118
1307
  }
1119
- };
1120
- break;
1121
- }
1122
- case "tool": {
1123
- const validatedPrompt = getValidatedPrompt({
1124
- system,
1125
- prompt,
1126
- messages
1127
- });
1128
- callOptions = {
1129
- mode: {
1130
- type: "object-tool",
1131
- tool: {
1132
- type: "function",
1133
- name: "json",
1134
- description: "Respond with a JSON object.",
1135
- parameters: schema.jsonSchema
1136
- }
1137
- },
1138
- ...prepareCallSettings(settings),
1139
- inputFormat: validatedPrompt.type,
1140
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1141
- abortSignal,
1142
- headers
1143
- };
1144
- transformer = {
1145
- transform(chunk, controller) {
1146
- switch (chunk.type) {
1147
- case "tool-call-delta":
1148
- controller.enqueue(chunk.argsTextDelta);
1149
- break;
1150
- case "finish":
1151
- case "error":
1152
- controller.enqueue(chunk);
1153
- break;
1154
- }
1308
+ case "tool": {
1309
+ const validatedPrompt = getValidatedPrompt({
1310
+ system,
1311
+ prompt,
1312
+ messages
1313
+ });
1314
+ callOptions = {
1315
+ mode: {
1316
+ type: "object-tool",
1317
+ tool: {
1318
+ type: "function",
1319
+ name: "json",
1320
+ description: "Respond with a JSON object.",
1321
+ parameters: schema.jsonSchema
1322
+ }
1323
+ },
1324
+ ...prepareCallSettings(settings),
1325
+ inputFormat: validatedPrompt.type,
1326
+ prompt: await convertToLanguageModelPrompt({
1327
+ prompt: validatedPrompt,
1328
+ modelSupportsImageUrls: model.supportsImageUrls
1329
+ }),
1330
+ abortSignal,
1331
+ headers
1332
+ };
1333
+ transformer = {
1334
+ transform(chunk, controller) {
1335
+ switch (chunk.type) {
1336
+ case "tool-call-delta":
1337
+ controller.enqueue(chunk.argsTextDelta);
1338
+ break;
1339
+ case "finish":
1340
+ case "error":
1341
+ controller.enqueue(chunk);
1342
+ break;
1343
+ }
1344
+ }
1345
+ };
1346
+ break;
1155
1347
  }
1156
- };
1157
- break;
1158
- }
1159
- case void 0: {
1160
- throw new Error("Model does not have a default object generation mode.");
1161
- }
1162
- default: {
1163
- const _exhaustiveCheck = mode;
1164
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
1348
+ case void 0: {
1349
+ throw new Error(
1350
+ "Model does not have a default object generation mode."
1351
+ );
1352
+ }
1353
+ default: {
1354
+ const _exhaustiveCheck = mode;
1355
+ throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
1356
+ }
1357
+ }
1358
+ const {
1359
+ result: { stream, warnings, rawResponse },
1360
+ doStreamSpan
1361
+ } = await retry(
1362
+ () => recordSpan({
1363
+ name: "ai.streamObject.doStream",
1364
+ attributes: {
1365
+ ...baseTelemetryAttributes,
1366
+ "ai.prompt.format": callOptions.inputFormat,
1367
+ "ai.prompt.messages": JSON.stringify(callOptions.prompt),
1368
+ "ai.settings.mode": mode
1369
+ },
1370
+ tracer,
1371
+ endWhenDone: false,
1372
+ fn: async (doStreamSpan2) => {
1373
+ return {
1374
+ result: await model.doStream(callOptions),
1375
+ doStreamSpan: doStreamSpan2
1376
+ };
1377
+ }
1378
+ })
1379
+ );
1380
+ return new DefaultStreamObjectResult({
1381
+ stream: stream.pipeThrough(new TransformStream(transformer)),
1382
+ warnings,
1383
+ rawResponse,
1384
+ schema,
1385
+ onFinish,
1386
+ rootSpan,
1387
+ doStreamSpan
1388
+ });
1165
1389
  }
1166
- }
1167
- const result = await retry(() => model.doStream(callOptions));
1168
- return new DefaultStreamObjectResult({
1169
- stream: result.stream.pipeThrough(new TransformStream(transformer)),
1170
- warnings: result.warnings,
1171
- rawResponse: result.rawResponse,
1172
- schema,
1173
- onFinish
1174
1390
  });
1175
1391
  }
1176
1392
  var DefaultStreamObjectResult = class {
@@ -1179,7 +1395,9 @@ var DefaultStreamObjectResult = class {
1179
1395
  warnings,
1180
1396
  rawResponse,
1181
1397
  schema,
1182
- onFinish
1398
+ onFinish,
1399
+ rootSpan,
1400
+ doStreamSpan
1183
1401
  }) {
1184
1402
  this.warnings = warnings;
1185
1403
  this.rawResponse = rawResponse;
@@ -1194,10 +1412,15 @@ var DefaultStreamObjectResult = class {
1194
1412
  let accumulatedText = "";
1195
1413
  let delta = "";
1196
1414
  let latestObject = void 0;
1415
+ let firstChunk = true;
1197
1416
  const self = this;
1198
1417
  this.originalStream = stream.pipeThrough(
1199
1418
  new TransformStream({
1200
1419
  async transform(chunk, controller) {
1420
+ if (firstChunk) {
1421
+ firstChunk = false;
1422
+ doStreamSpan.addEvent("ai.stream.firstChunk");
1423
+ }
1201
1424
  if (typeof chunk === "string") {
1202
1425
  accumulatedText += chunk;
1203
1426
  delta += chunk;
@@ -1251,12 +1474,24 @@ var DefaultStreamObjectResult = class {
1251
1474
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
1252
1475
  async flush(controller) {
1253
1476
  try {
1477
+ const finalUsage = usage != null ? usage : {
1478
+ promptTokens: NaN,
1479
+ completionTokens: NaN,
1480
+ totalTokens: NaN
1481
+ };
1482
+ doStreamSpan.setAttributes({
1483
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1484
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1485
+ "ai.result.object": JSON.stringify(object)
1486
+ });
1487
+ doStreamSpan.end();
1488
+ rootSpan.setAttributes({
1489
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1490
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1491
+ "ai.result.object": JSON.stringify(object)
1492
+ });
1254
1493
  await (onFinish == null ? void 0 : onFinish({
1255
- usage: usage != null ? usage : {
1256
- promptTokens: NaN,
1257
- completionTokens: NaN,
1258
- totalTokens: NaN
1259
- },
1494
+ usage: finalUsage,
1260
1495
  object,
1261
1496
  error,
1262
1497
  rawResponse,
@@ -1264,6 +1499,8 @@ var DefaultStreamObjectResult = class {
1264
1499
  }));
1265
1500
  } catch (error2) {
1266
1501
  controller.error(error2);
1502
+ } finally {
1503
+ rootSpan.end();
1267
1504
  }
1268
1505
  }
1269
1506
  })
@@ -1470,7 +1707,10 @@ async function generateText({
1470
1707
  ...prepareToolsAndToolChoice({ tools, toolChoice })
1471
1708
  };
1472
1709
  const callSettings = prepareCallSettings(settings);
1473
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1710
+ const promptMessages = await convertToLanguageModelPrompt({
1711
+ prompt: validatedPrompt,
1712
+ modelSupportsImageUrls: model.supportsImageUrls
1713
+ });
1474
1714
  let currentModelResponse;
1475
1715
  let currentToolCalls = [];
1476
1716
  let currentToolResults = [];
@@ -1543,7 +1783,9 @@ async function generateText({
1543
1783
  });
1544
1784
  responseMessages.push(...newResponseMessages);
1545
1785
  promptMessages.push(
1546
- ...newResponseMessages.map(convertToLanguageModelMessage)
1786
+ ...newResponseMessages.map(
1787
+ (message) => convertToLanguageModelMessage(message, null)
1788
+ )
1547
1789
  );
1548
1790
  } while (
1549
1791
  // there are tool calls:
@@ -1953,7 +2195,10 @@ async function streamText({
1953
2195
  fn: async (rootSpan) => {
1954
2196
  const retry = retryWithExponentialBackoff({ maxRetries });
1955
2197
  const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1956
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
2198
+ const promptMessages = await convertToLanguageModelPrompt({
2199
+ prompt: validatedPrompt,
2200
+ modelSupportsImageUrls: model.supportsImageUrls
2201
+ });
1957
2202
  const {
1958
2203
  result: { stream, warnings, rawResponse },
1959
2204
  doStreamSpan