ai 3.2.35 → 3.2.37

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -31,7 +31,7 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
31
31
  var streams_exports = {};
32
32
  __export(streams_exports, {
33
33
  AIStream: () => AIStream,
34
- APICallError: () => import_provider8.APICallError,
34
+ APICallError: () => import_provider9.APICallError,
35
35
  AWSBedrockAnthropicMessagesStream: () => AWSBedrockAnthropicMessagesStream,
36
36
  AWSBedrockAnthropicStream: () => AWSBedrockAnthropicStream,
37
37
  AWSBedrockCohereStream: () => AWSBedrockCohereStream,
@@ -40,35 +40,35 @@ __export(streams_exports, {
40
40
  AnthropicStream: () => AnthropicStream,
41
41
  AssistantResponse: () => AssistantResponse,
42
42
  CohereStream: () => CohereStream,
43
- EmptyResponseBodyError: () => import_provider8.EmptyResponseBodyError,
43
+ EmptyResponseBodyError: () => import_provider9.EmptyResponseBodyError,
44
44
  GoogleGenerativeAIStream: () => GoogleGenerativeAIStream,
45
45
  HuggingFaceStream: () => HuggingFaceStream,
46
46
  InkeepStream: () => InkeepStream,
47
- InvalidArgumentError: () => import_provider8.InvalidArgumentError,
48
- InvalidDataContentError: () => import_provider8.InvalidDataContentError,
47
+ InvalidArgumentError: () => import_provider9.InvalidArgumentError,
48
+ InvalidDataContentError: () => import_provider9.InvalidDataContentError,
49
49
  InvalidMessageRoleError: () => InvalidMessageRoleError,
50
50
  InvalidModelIdError: () => InvalidModelIdError,
51
- InvalidPromptError: () => import_provider8.InvalidPromptError,
52
- InvalidResponseDataError: () => import_provider8.InvalidResponseDataError,
53
- InvalidToolArgumentsError: () => import_provider8.InvalidToolArgumentsError,
54
- JSONParseError: () => import_provider8.JSONParseError,
51
+ InvalidPromptError: () => import_provider9.InvalidPromptError,
52
+ InvalidResponseDataError: () => import_provider9.InvalidResponseDataError,
53
+ InvalidToolArgumentsError: () => import_provider9.InvalidToolArgumentsError,
54
+ JSONParseError: () => import_provider9.JSONParseError,
55
55
  LangChainAdapter: () => langchain_adapter_exports,
56
56
  LangChainStream: () => LangChainStream,
57
- LoadAPIKeyError: () => import_provider8.LoadAPIKeyError,
57
+ LoadAPIKeyError: () => import_provider9.LoadAPIKeyError,
58
58
  MistralStream: () => MistralStream,
59
- NoObjectGeneratedError: () => import_provider8.NoObjectGeneratedError,
59
+ NoObjectGeneratedError: () => import_provider9.NoObjectGeneratedError,
60
60
  NoSuchModelError: () => NoSuchModelError,
61
61
  NoSuchProviderError: () => NoSuchProviderError,
62
- NoSuchToolError: () => import_provider8.NoSuchToolError,
62
+ NoSuchToolError: () => import_provider9.NoSuchToolError,
63
63
  OpenAIStream: () => OpenAIStream,
64
64
  ReplicateStream: () => ReplicateStream,
65
- RetryError: () => import_provider8.RetryError,
65
+ RetryError: () => import_provider9.RetryError,
66
66
  StreamData: () => StreamData2,
67
67
  StreamingTextResponse: () => StreamingTextResponse,
68
- ToolCallParseError: () => import_provider8.ToolCallParseError,
69
- TypeValidationError: () => import_provider8.TypeValidationError,
70
- UnsupportedFunctionalityError: () => import_provider8.UnsupportedFunctionalityError,
71
- UnsupportedJSONSchemaError: () => import_provider8.UnsupportedJSONSchemaError,
68
+ ToolCallParseError: () => import_provider9.ToolCallParseError,
69
+ TypeValidationError: () => import_provider9.TypeValidationError,
70
+ UnsupportedFunctionalityError: () => import_provider9.UnsupportedFunctionalityError,
71
+ UnsupportedJSONSchemaError: () => import_provider9.UnsupportedJSONSchemaError,
72
72
  convertDataContentToBase64String: () => convertDataContentToBase64String,
73
73
  convertDataContentToUint8Array: () => convertDataContentToUint8Array,
74
74
  convertToCoreMessages: () => convertToCoreMessages,
@@ -418,32 +418,112 @@ async function embedMany({
418
418
  values,
419
419
  maxRetries,
420
420
  abortSignal,
421
- headers
421
+ headers,
422
+ experimental_telemetry: telemetry
422
423
  }) {
423
- var _a, _b, _c;
424
- const retry = retryWithExponentialBackoff({ maxRetries });
425
- const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
426
- if (maxEmbeddingsPerCall == null) {
427
- const modelResponse = await retry(
428
- () => model.doEmbed({ values, abortSignal, headers })
429
- );
430
- return new DefaultEmbedManyResult({
431
- values,
432
- embeddings: modelResponse.embeddings,
433
- usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN }
434
- });
435
- }
436
- const valueChunks = splitArray(values, maxEmbeddingsPerCall);
437
- const embeddings = [];
438
- let tokens = 0;
439
- for (const chunk of valueChunks) {
440
- const modelResponse = await retry(
441
- () => model.doEmbed({ values: chunk, abortSignal, headers })
442
- );
443
- embeddings.push(...modelResponse.embeddings);
444
- tokens += (_c = (_b = modelResponse.usage) == null ? void 0 : _b.tokens) != null ? _c : NaN;
445
- }
446
- return new DefaultEmbedManyResult({ values, embeddings, usage: { tokens } });
424
+ var _a;
425
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
426
+ operationName: "ai.embedMany",
427
+ model,
428
+ telemetry,
429
+ headers,
430
+ settings: { maxRetries }
431
+ });
432
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
433
+ return recordSpan({
434
+ name: "ai.embedMany",
435
+ attributes: {
436
+ ...baseTelemetryAttributes,
437
+ // specific settings that only make sense on the outer level:
438
+ "ai.values": values.map((value) => JSON.stringify(value))
439
+ },
440
+ tracer,
441
+ fn: async (span) => {
442
+ const retry = retryWithExponentialBackoff({ maxRetries });
443
+ const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
444
+ if (maxEmbeddingsPerCall == null) {
445
+ const { embeddings: embeddings2, usage } = await retry(() => {
446
+ return recordSpan({
447
+ name: "ai.embedMany.doEmbed",
448
+ attributes: {
449
+ ...baseTelemetryAttributes,
450
+ // specific settings that only make sense on the outer level:
451
+ "ai.values": values.map((value) => JSON.stringify(value))
452
+ },
453
+ tracer,
454
+ fn: async (doEmbedSpan) => {
455
+ var _a2;
456
+ const modelResponse = await model.doEmbed({
457
+ values,
458
+ abortSignal,
459
+ headers
460
+ });
461
+ const embeddings3 = modelResponse.embeddings;
462
+ const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
463
+ doEmbedSpan.setAttributes({
464
+ "ai.embeddings": embeddings3.map(
465
+ (embedding) => JSON.stringify(embedding)
466
+ ),
467
+ "ai.usage.tokens": usage2.tokens
468
+ });
469
+ return { embeddings: embeddings3, usage: usage2 };
470
+ }
471
+ });
472
+ });
473
+ span.setAttributes({
474
+ "ai.embeddings": embeddings2.map(
475
+ (embedding) => JSON.stringify(embedding)
476
+ ),
477
+ "ai.usage.tokens": usage.tokens
478
+ });
479
+ return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
480
+ }
481
+ const valueChunks = splitArray(values, maxEmbeddingsPerCall);
482
+ const embeddings = [];
483
+ let tokens = 0;
484
+ for (const chunk of valueChunks) {
485
+ const { embeddings: responseEmbeddings, usage } = await retry(() => {
486
+ return recordSpan({
487
+ name: "ai.embedMany.doEmbed",
488
+ attributes: {
489
+ ...baseTelemetryAttributes,
490
+ // specific settings that only make sense on the outer level:
491
+ "ai.values": chunk.map((value) => JSON.stringify(value))
492
+ },
493
+ tracer,
494
+ fn: async (doEmbedSpan) => {
495
+ var _a2;
496
+ const modelResponse = await model.doEmbed({
497
+ values: chunk,
498
+ abortSignal,
499
+ headers
500
+ });
501
+ const embeddings2 = modelResponse.embeddings;
502
+ const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
503
+ doEmbedSpan.setAttributes({
504
+ "ai.embeddings": embeddings2.map(
505
+ (embedding) => JSON.stringify(embedding)
506
+ ),
507
+ "ai.usage.tokens": usage2.tokens
508
+ });
509
+ return { embeddings: embeddings2, usage: usage2 };
510
+ }
511
+ });
512
+ });
513
+ embeddings.push(...responseEmbeddings);
514
+ tokens += usage.tokens;
515
+ }
516
+ span.setAttributes({
517
+ "ai.embeddings": embeddings.map((embedding) => JSON.stringify(embedding)),
518
+ "ai.usage.tokens": tokens
519
+ });
520
+ return new DefaultEmbedManyResult({
521
+ values,
522
+ embeddings,
523
+ usage: { tokens }
524
+ });
525
+ }
526
+ });
447
527
  }
448
528
  var DefaultEmbedManyResult = class {
449
529
  constructor(options) {
@@ -454,9 +534,12 @@ var DefaultEmbedManyResult = class {
454
534
  };
455
535
 
456
536
  // core/generate-object/generate-object.ts
457
- var import_provider5 = require("@ai-sdk/provider");
537
+ var import_provider6 = require("@ai-sdk/provider");
458
538
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
459
539
 
540
+ // core/prompt/convert-to-language-model-prompt.ts
541
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
542
+
460
543
  // core/util/detect-image-mimetype.ts
461
544
  var mimeTypeSignatures = [
462
545
  { mimeType: "image/gif", bytes: [71, 73, 70] },
@@ -473,8 +556,37 @@ function detectImageMimeType(image) {
473
556
  return void 0;
474
557
  }
475
558
 
476
- // core/prompt/data-content.ts
559
+ // core/util/download.ts
477
560
  var import_provider2 = require("@ai-sdk/provider");
561
+ async function download({
562
+ url,
563
+ fetchImplementation = fetch
564
+ }) {
565
+ var _a;
566
+ const urlText = url.toString();
567
+ try {
568
+ const response = await fetchImplementation(urlText);
569
+ if (!response.ok) {
570
+ throw new import_provider2.DownloadError({
571
+ url: urlText,
572
+ statusCode: response.status,
573
+ statusText: response.statusText
574
+ });
575
+ }
576
+ return {
577
+ data: new Uint8Array(await response.arrayBuffer()),
578
+ mimeType: (_a = response.headers.get("content-type")) != null ? _a : void 0
579
+ };
580
+ } catch (error) {
581
+ if (import_provider2.DownloadError.isDownloadError(error)) {
582
+ throw error;
583
+ }
584
+ throw new import_provider2.DownloadError({ url: urlText, cause: error });
585
+ }
586
+ }
587
+
588
+ // core/prompt/data-content.ts
589
+ var import_provider3 = require("@ai-sdk/provider");
478
590
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
479
591
  function convertDataContentToBase64String(content) {
480
592
  if (typeof content === "string") {
@@ -493,7 +605,7 @@ function convertDataContentToUint8Array(content) {
493
605
  try {
494
606
  return (0, import_provider_utils2.convertBase64ToUint8Array)(content);
495
607
  } catch (error) {
496
- throw new import_provider2.InvalidDataContentError({
608
+ throw new import_provider3.InvalidDataContentError({
497
609
  message: "Invalid data content. Content string is not a base64-encoded media.",
498
610
  content,
499
611
  cause: error
@@ -503,7 +615,7 @@ function convertDataContentToUint8Array(content) {
503
615
  if (content instanceof ArrayBuffer) {
504
616
  return new Uint8Array(content);
505
617
  }
506
- throw new import_provider2.InvalidDataContentError({ content });
618
+ throw new import_provider3.InvalidDataContentError({ content });
507
619
  }
508
620
  function convertUint8ArrayToText(uint8Array) {
509
621
  try {
@@ -537,12 +649,16 @@ var InvalidMessageRoleError = class extends Error {
537
649
  };
538
650
 
539
651
  // core/prompt/convert-to-language-model-prompt.ts
540
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
541
- function convertToLanguageModelPrompt(prompt) {
652
+ async function convertToLanguageModelPrompt({
653
+ prompt,
654
+ modelSupportsImageUrls = true,
655
+ downloadImplementation = download
656
+ }) {
542
657
  const languageModelMessages = [];
543
658
  if (prompt.system != null) {
544
659
  languageModelMessages.push({ role: "system", content: prompt.system });
545
660
  }
661
+ const downloadedImages = modelSupportsImageUrls || prompt.messages == null ? null : await downloadImages(prompt.messages, downloadImplementation);
546
662
  const promptType = prompt.type;
547
663
  switch (promptType) {
548
664
  case "prompt": {
@@ -554,7 +670,9 @@ function convertToLanguageModelPrompt(prompt) {
554
670
  }
555
671
  case "messages": {
556
672
  languageModelMessages.push(
557
- ...prompt.messages.map(convertToLanguageModelMessage)
673
+ ...prompt.messages.map(
674
+ (message) => convertToLanguageModelMessage(message, downloadedImages)
675
+ )
558
676
  );
559
677
  break;
560
678
  }
@@ -565,7 +683,7 @@ function convertToLanguageModelPrompt(prompt) {
565
683
  }
566
684
  return languageModelMessages;
567
685
  }
568
- function convertToLanguageModelMessage(message) {
686
+ function convertToLanguageModelMessage(message, downloadedImages) {
569
687
  const role = message.role;
570
688
  switch (role) {
571
689
  case "system": {
@@ -582,18 +700,27 @@ function convertToLanguageModelMessage(message) {
582
700
  role: "user",
583
701
  content: message.content.map(
584
702
  (part) => {
585
- var _a;
703
+ var _a, _b, _c;
586
704
  switch (part.type) {
587
705
  case "text": {
588
706
  return part;
589
707
  }
590
708
  case "image": {
591
709
  if (part.image instanceof URL) {
592
- return {
593
- type: "image",
594
- image: part.image,
595
- mimeType: part.mimeType
596
- };
710
+ if (downloadedImages == null) {
711
+ return {
712
+ type: "image",
713
+ image: part.image,
714
+ mimeType: part.mimeType
715
+ };
716
+ } else {
717
+ const downloadedImage = downloadedImages[part.image.toString()];
718
+ return {
719
+ type: "image",
720
+ image: downloadedImage.data,
721
+ mimeType: (_a = part.mimeType) != null ? _a : downloadedImage.mimeType
722
+ };
723
+ }
597
724
  }
598
725
  if (typeof part.image === "string") {
599
726
  try {
@@ -601,11 +728,20 @@ function convertToLanguageModelMessage(message) {
601
728
  switch (url.protocol) {
602
729
  case "http:":
603
730
  case "https:": {
604
- return {
605
- type: "image",
606
- image: url,
607
- mimeType: part.mimeType
608
- };
731
+ if (downloadedImages == null) {
732
+ return {
733
+ type: "image",
734
+ image: url,
735
+ mimeType: part.mimeType
736
+ };
737
+ } else {
738
+ const downloadedImage = downloadedImages[part.image];
739
+ return {
740
+ type: "image",
741
+ image: downloadedImage.data,
742
+ mimeType: (_b = part.mimeType) != null ? _b : downloadedImage.mimeType
743
+ };
744
+ }
609
745
  }
610
746
  case "data:": {
611
747
  try {
@@ -640,7 +776,7 @@ function convertToLanguageModelMessage(message) {
640
776
  return {
641
777
  type: "image",
642
778
  image: imageUint8,
643
- mimeType: (_a = part.mimeType) != null ? _a : detectImageMimeType(imageUint8)
779
+ mimeType: (_c = part.mimeType) != null ? _c : detectImageMimeType(imageUint8)
644
780
  };
645
781
  }
646
782
  }
@@ -672,18 +808,37 @@ function convertToLanguageModelMessage(message) {
672
808
  }
673
809
  }
674
810
  }
811
+ async function downloadImages(messages, downloadImplementation) {
812
+ const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
813
+ (content) => Array.isArray(content)
814
+ ).flat().filter((part) => part.type === "image").map((part) => part.image).map(
815
+ (part) => (
816
+ // support string urls in image parts:
817
+ typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
818
+ )
819
+ ).filter((image) => image instanceof URL);
820
+ const downloadedImages = await Promise.all(
821
+ urls.map(async (url) => ({
822
+ url,
823
+ data: await downloadImplementation({ url })
824
+ }))
825
+ );
826
+ return Object.fromEntries(
827
+ downloadedImages.map(({ url, data }) => [url.toString(), data])
828
+ );
829
+ }
675
830
 
676
831
  // core/prompt/get-validated-prompt.ts
677
- var import_provider3 = require("@ai-sdk/provider");
832
+ var import_provider4 = require("@ai-sdk/provider");
678
833
  function getValidatedPrompt(prompt) {
679
834
  if (prompt.prompt == null && prompt.messages == null) {
680
- throw new import_provider3.InvalidPromptError({
835
+ throw new import_provider4.InvalidPromptError({
681
836
  prompt,
682
837
  message: "prompt or messages must be defined"
683
838
  });
684
839
  }
685
840
  if (prompt.prompt != null && prompt.messages != null) {
686
- throw new import_provider3.InvalidPromptError({
841
+ throw new import_provider4.InvalidPromptError({
687
842
  prompt,
688
843
  message: "prompt and messages cannot be defined at the same time"
689
844
  });
@@ -691,7 +846,7 @@ function getValidatedPrompt(prompt) {
691
846
  if (prompt.messages != null) {
692
847
  for (const message of prompt.messages) {
693
848
  if (message.role === "system" && typeof message.content !== "string") {
694
- throw new import_provider3.InvalidPromptError({
849
+ throw new import_provider4.InvalidPromptError({
695
850
  prompt,
696
851
  message: "system message content must be a string"
697
852
  });
@@ -713,7 +868,7 @@ function getValidatedPrompt(prompt) {
713
868
  }
714
869
 
715
870
  // core/prompt/prepare-call-settings.ts
716
- var import_provider4 = require("@ai-sdk/provider");
871
+ var import_provider5 = require("@ai-sdk/provider");
717
872
  function prepareCallSettings({
718
873
  maxTokens,
719
874
  temperature,
@@ -726,14 +881,14 @@ function prepareCallSettings({
726
881
  }) {
727
882
  if (maxTokens != null) {
728
883
  if (!Number.isInteger(maxTokens)) {
729
- throw new import_provider4.InvalidArgumentError({
884
+ throw new import_provider5.InvalidArgumentError({
730
885
  parameter: "maxTokens",
731
886
  value: maxTokens,
732
887
  message: "maxTokens must be an integer"
733
888
  });
734
889
  }
735
890
  if (maxTokens < 1) {
736
- throw new import_provider4.InvalidArgumentError({
891
+ throw new import_provider5.InvalidArgumentError({
737
892
  parameter: "maxTokens",
738
893
  value: maxTokens,
739
894
  message: "maxTokens must be >= 1"
@@ -742,7 +897,7 @@ function prepareCallSettings({
742
897
  }
743
898
  if (temperature != null) {
744
899
  if (typeof temperature !== "number") {
745
- throw new import_provider4.InvalidArgumentError({
900
+ throw new import_provider5.InvalidArgumentError({
746
901
  parameter: "temperature",
747
902
  value: temperature,
748
903
  message: "temperature must be a number"
@@ -751,7 +906,7 @@ function prepareCallSettings({
751
906
  }
752
907
  if (topP != null) {
753
908
  if (typeof topP !== "number") {
754
- throw new import_provider4.InvalidArgumentError({
909
+ throw new import_provider5.InvalidArgumentError({
755
910
  parameter: "topP",
756
911
  value: topP,
757
912
  message: "topP must be a number"
@@ -760,7 +915,7 @@ function prepareCallSettings({
760
915
  }
761
916
  if (presencePenalty != null) {
762
917
  if (typeof presencePenalty !== "number") {
763
- throw new import_provider4.InvalidArgumentError({
918
+ throw new import_provider5.InvalidArgumentError({
764
919
  parameter: "presencePenalty",
765
920
  value: presencePenalty,
766
921
  message: "presencePenalty must be a number"
@@ -769,7 +924,7 @@ function prepareCallSettings({
769
924
  }
770
925
  if (frequencyPenalty != null) {
771
926
  if (typeof frequencyPenalty !== "number") {
772
- throw new import_provider4.InvalidArgumentError({
927
+ throw new import_provider5.InvalidArgumentError({
773
928
  parameter: "frequencyPenalty",
774
929
  value: frequencyPenalty,
775
930
  message: "frequencyPenalty must be a number"
@@ -778,7 +933,7 @@ function prepareCallSettings({
778
933
  }
779
934
  if (seed != null) {
780
935
  if (!Number.isInteger(seed)) {
781
- throw new import_provider4.InvalidArgumentError({
936
+ throw new import_provider5.InvalidArgumentError({
782
937
  parameter: "seed",
783
938
  value: seed,
784
939
  message: "seed must be an integer"
@@ -787,14 +942,14 @@ function prepareCallSettings({
787
942
  }
788
943
  if (maxRetries != null) {
789
944
  if (!Number.isInteger(maxRetries)) {
790
- throw new import_provider4.InvalidArgumentError({
945
+ throw new import_provider5.InvalidArgumentError({
791
946
  parameter: "maxRetries",
792
947
  value: maxRetries,
793
948
  message: "maxRetries must be an integer"
794
949
  });
795
950
  }
796
951
  if (maxRetries < 0) {
797
- throw new import_provider4.InvalidArgumentError({
952
+ throw new import_provider5.InvalidArgumentError({
798
953
  parameter: "maxRetries",
799
954
  value: maxRetries,
800
955
  message: "maxRetries must be >= 0"
@@ -916,7 +1071,7 @@ async function generateObject({
916
1071
  ...baseTelemetryAttributes,
917
1072
  // specific settings that only make sense on the outer level:
918
1073
  "ai.prompt": JSON.stringify({ system, prompt, messages }),
919
- "ai.settings.jsonSchema": JSON.stringify(schema.jsonSchema),
1074
+ "ai.schema": JSON.stringify(schema.jsonSchema),
920
1075
  "ai.settings.mode": mode
921
1076
  },
922
1077
  tracer,
@@ -942,7 +1097,10 @@ async function generateObject({
942
1097
  prompt,
943
1098
  messages
944
1099
  });
945
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1100
+ const promptMessages = await convertToLanguageModelPrompt({
1101
+ prompt: validatedPrompt,
1102
+ modelSupportsImageUrls: model.supportsImageUrls
1103
+ });
946
1104
  const inputFormat = validatedPrompt.type;
947
1105
  const generateResult = await retry(
948
1106
  () => recordSpan({
@@ -974,7 +1132,7 @@ async function generateObject({
974
1132
  })
975
1133
  );
976
1134
  if (generateResult.text === void 0) {
977
- throw new import_provider5.NoObjectGeneratedError();
1135
+ throw new import_provider6.NoObjectGeneratedError();
978
1136
  }
979
1137
  result = generateResult.text;
980
1138
  finishReason = generateResult.finishReason;
@@ -990,7 +1148,10 @@ async function generateObject({
990
1148
  prompt,
991
1149
  messages
992
1150
  });
993
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1151
+ const promptMessages = await convertToLanguageModelPrompt({
1152
+ prompt: validatedPrompt,
1153
+ modelSupportsImageUrls: model.supportsImageUrls
1154
+ });
994
1155
  const inputFormat = validatedPrompt.type;
995
1156
  const generateResult = await retry(
996
1157
  () => recordSpan({
@@ -1032,7 +1193,7 @@ async function generateObject({
1032
1193
  );
1033
1194
  const functionArgs = (_b = (_a2 = generateResult.toolCalls) == null ? void 0 : _a2[0]) == null ? void 0 : _b.args;
1034
1195
  if (functionArgs === void 0) {
1035
- throw new import_provider5.NoObjectGeneratedError();
1196
+ throw new import_provider6.NoObjectGeneratedError();
1036
1197
  }
1037
1198
  result = functionArgs;
1038
1199
  finishReason = generateResult.finishReason;
@@ -1164,101 +1325,156 @@ async function streamObject({
1164
1325
  maxRetries,
1165
1326
  abortSignal,
1166
1327
  headers,
1328
+ experimental_telemetry: telemetry,
1167
1329
  onFinish,
1168
1330
  ...settings
1169
1331
  }) {
1332
+ var _a;
1333
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
1334
+ operationName: "ai.streamObject",
1335
+ model,
1336
+ telemetry,
1337
+ headers,
1338
+ settings: { ...settings, maxRetries }
1339
+ });
1340
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1170
1341
  const retry = retryWithExponentialBackoff({ maxRetries });
1171
1342
  const schema = asSchema(inputSchema);
1172
- if (mode === "auto" || mode == null) {
1173
- mode = model.defaultObjectGenerationMode;
1174
- }
1175
- let callOptions;
1176
- let transformer;
1177
- switch (mode) {
1178
- case "json": {
1179
- const validatedPrompt = getValidatedPrompt({
1180
- system: injectJsonSchemaIntoSystem({
1181
- system,
1182
- schema: schema.jsonSchema
1183
- }),
1184
- prompt,
1185
- messages
1186
- });
1187
- callOptions = {
1188
- mode: { type: "object-json" },
1189
- ...prepareCallSettings(settings),
1190
- inputFormat: validatedPrompt.type,
1191
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1192
- abortSignal,
1193
- headers
1194
- };
1195
- transformer = {
1196
- transform: (chunk, controller) => {
1197
- switch (chunk.type) {
1198
- case "text-delta":
1199
- controller.enqueue(chunk.textDelta);
1200
- break;
1201
- case "finish":
1202
- case "error":
1203
- controller.enqueue(chunk);
1204
- break;
1205
- }
1343
+ return recordSpan({
1344
+ name: "ai.streamObject",
1345
+ attributes: {
1346
+ ...baseTelemetryAttributes,
1347
+ // specific settings that only make sense on the outer level:
1348
+ "ai.prompt": JSON.stringify({ system, prompt, messages }),
1349
+ "ai.schema": JSON.stringify(schema.jsonSchema),
1350
+ "ai.settings.mode": mode
1351
+ },
1352
+ tracer,
1353
+ endWhenDone: false,
1354
+ fn: async (rootSpan) => {
1355
+ if (mode === "auto" || mode == null) {
1356
+ mode = model.defaultObjectGenerationMode;
1357
+ }
1358
+ let callOptions;
1359
+ let transformer;
1360
+ switch (mode) {
1361
+ case "json": {
1362
+ const validatedPrompt = getValidatedPrompt({
1363
+ system: injectJsonSchemaIntoSystem({
1364
+ system,
1365
+ schema: schema.jsonSchema
1366
+ }),
1367
+ prompt,
1368
+ messages
1369
+ });
1370
+ callOptions = {
1371
+ mode: { type: "object-json" },
1372
+ ...prepareCallSettings(settings),
1373
+ inputFormat: validatedPrompt.type,
1374
+ prompt: await convertToLanguageModelPrompt({
1375
+ prompt: validatedPrompt,
1376
+ modelSupportsImageUrls: model.supportsImageUrls
1377
+ }),
1378
+ abortSignal,
1379
+ headers
1380
+ };
1381
+ transformer = {
1382
+ transform: (chunk, controller) => {
1383
+ switch (chunk.type) {
1384
+ case "text-delta":
1385
+ controller.enqueue(chunk.textDelta);
1386
+ break;
1387
+ case "finish":
1388
+ case "error":
1389
+ controller.enqueue(chunk);
1390
+ break;
1391
+ }
1392
+ }
1393
+ };
1394
+ break;
1206
1395
  }
1207
- };
1208
- break;
1209
- }
1210
- case "tool": {
1211
- const validatedPrompt = getValidatedPrompt({
1212
- system,
1213
- prompt,
1214
- messages
1215
- });
1216
- callOptions = {
1217
- mode: {
1218
- type: "object-tool",
1219
- tool: {
1220
- type: "function",
1221
- name: "json",
1222
- description: "Respond with a JSON object.",
1223
- parameters: schema.jsonSchema
1224
- }
1225
- },
1226
- ...prepareCallSettings(settings),
1227
- inputFormat: validatedPrompt.type,
1228
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1229
- abortSignal,
1230
- headers
1231
- };
1232
- transformer = {
1233
- transform(chunk, controller) {
1234
- switch (chunk.type) {
1235
- case "tool-call-delta":
1236
- controller.enqueue(chunk.argsTextDelta);
1237
- break;
1238
- case "finish":
1239
- case "error":
1240
- controller.enqueue(chunk);
1241
- break;
1242
- }
1396
+ case "tool": {
1397
+ const validatedPrompt = getValidatedPrompt({
1398
+ system,
1399
+ prompt,
1400
+ messages
1401
+ });
1402
+ callOptions = {
1403
+ mode: {
1404
+ type: "object-tool",
1405
+ tool: {
1406
+ type: "function",
1407
+ name: "json",
1408
+ description: "Respond with a JSON object.",
1409
+ parameters: schema.jsonSchema
1410
+ }
1411
+ },
1412
+ ...prepareCallSettings(settings),
1413
+ inputFormat: validatedPrompt.type,
1414
+ prompt: await convertToLanguageModelPrompt({
1415
+ prompt: validatedPrompt,
1416
+ modelSupportsImageUrls: model.supportsImageUrls
1417
+ }),
1418
+ abortSignal,
1419
+ headers
1420
+ };
1421
+ transformer = {
1422
+ transform(chunk, controller) {
1423
+ switch (chunk.type) {
1424
+ case "tool-call-delta":
1425
+ controller.enqueue(chunk.argsTextDelta);
1426
+ break;
1427
+ case "finish":
1428
+ case "error":
1429
+ controller.enqueue(chunk);
1430
+ break;
1431
+ }
1432
+ }
1433
+ };
1434
+ break;
1243
1435
  }
1244
- };
1245
- break;
1246
- }
1247
- case void 0: {
1248
- throw new Error("Model does not have a default object generation mode.");
1249
- }
1250
- default: {
1251
- const _exhaustiveCheck = mode;
1252
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
1436
+ case void 0: {
1437
+ throw new Error(
1438
+ "Model does not have a default object generation mode."
1439
+ );
1440
+ }
1441
+ default: {
1442
+ const _exhaustiveCheck = mode;
1443
+ throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
1444
+ }
1445
+ }
1446
+ const {
1447
+ result: { stream, warnings, rawResponse },
1448
+ doStreamSpan
1449
+ } = await retry(
1450
+ () => recordSpan({
1451
+ name: "ai.streamObject.doStream",
1452
+ attributes: {
1453
+ ...baseTelemetryAttributes,
1454
+ "ai.prompt.format": callOptions.inputFormat,
1455
+ "ai.prompt.messages": JSON.stringify(callOptions.prompt),
1456
+ "ai.settings.mode": mode
1457
+ },
1458
+ tracer,
1459
+ endWhenDone: false,
1460
+ fn: async (doStreamSpan2) => {
1461
+ return {
1462
+ result: await model.doStream(callOptions),
1463
+ doStreamSpan: doStreamSpan2
1464
+ };
1465
+ }
1466
+ })
1467
+ );
1468
+ return new DefaultStreamObjectResult({
1469
+ stream: stream.pipeThrough(new TransformStream(transformer)),
1470
+ warnings,
1471
+ rawResponse,
1472
+ schema,
1473
+ onFinish,
1474
+ rootSpan,
1475
+ doStreamSpan
1476
+ });
1253
1477
  }
1254
- }
1255
- const result = await retry(() => model.doStream(callOptions));
1256
- return new DefaultStreamObjectResult({
1257
- stream: result.stream.pipeThrough(new TransformStream(transformer)),
1258
- warnings: result.warnings,
1259
- rawResponse: result.rawResponse,
1260
- schema,
1261
- onFinish
1262
1478
  });
1263
1479
  }
1264
1480
  var DefaultStreamObjectResult = class {
@@ -1267,7 +1483,9 @@ var DefaultStreamObjectResult = class {
1267
1483
  warnings,
1268
1484
  rawResponse,
1269
1485
  schema,
1270
- onFinish
1486
+ onFinish,
1487
+ rootSpan,
1488
+ doStreamSpan
1271
1489
  }) {
1272
1490
  this.warnings = warnings;
1273
1491
  this.rawResponse = rawResponse;
@@ -1282,10 +1500,15 @@ var DefaultStreamObjectResult = class {
1282
1500
  let accumulatedText = "";
1283
1501
  let delta = "";
1284
1502
  let latestObject = void 0;
1503
+ let firstChunk = true;
1285
1504
  const self = this;
1286
1505
  this.originalStream = stream.pipeThrough(
1287
1506
  new TransformStream({
1288
1507
  async transform(chunk, controller) {
1508
+ if (firstChunk) {
1509
+ firstChunk = false;
1510
+ doStreamSpan.addEvent("ai.stream.firstChunk");
1511
+ }
1289
1512
  if (typeof chunk === "string") {
1290
1513
  accumulatedText += chunk;
1291
1514
  delta += chunk;
@@ -1339,12 +1562,24 @@ var DefaultStreamObjectResult = class {
1339
1562
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
1340
1563
  async flush(controller) {
1341
1564
  try {
1565
+ const finalUsage = usage != null ? usage : {
1566
+ promptTokens: NaN,
1567
+ completionTokens: NaN,
1568
+ totalTokens: NaN
1569
+ };
1570
+ doStreamSpan.setAttributes({
1571
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1572
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1573
+ "ai.result.object": JSON.stringify(object)
1574
+ });
1575
+ doStreamSpan.end();
1576
+ rootSpan.setAttributes({
1577
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1578
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1579
+ "ai.result.object": JSON.stringify(object)
1580
+ });
1342
1581
  await (onFinish == null ? void 0 : onFinish({
1343
- usage: usage != null ? usage : {
1344
- promptTokens: NaN,
1345
- completionTokens: NaN,
1346
- totalTokens: NaN
1347
- },
1582
+ usage: finalUsage,
1348
1583
  object,
1349
1584
  error,
1350
1585
  rawResponse,
@@ -1352,6 +1587,8 @@ var DefaultStreamObjectResult = class {
1352
1587
  }));
1353
1588
  } catch (error2) {
1354
1589
  controller.error(error2);
1590
+ } finally {
1591
+ rootSpan.end();
1355
1592
  }
1356
1593
  }
1357
1594
  })
@@ -1472,7 +1709,7 @@ function prepareToolsAndToolChoice({
1472
1709
  }
1473
1710
 
1474
1711
  // core/generate-text/tool-call.ts
1475
- var import_provider6 = require("@ai-sdk/provider");
1712
+ var import_provider7 = require("@ai-sdk/provider");
1476
1713
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
1477
1714
  function parseToolCall({
1478
1715
  toolCall,
@@ -1480,11 +1717,11 @@ function parseToolCall({
1480
1717
  }) {
1481
1718
  const toolName = toolCall.toolName;
1482
1719
  if (tools == null) {
1483
- throw new import_provider6.NoSuchToolError({ toolName: toolCall.toolName });
1720
+ throw new import_provider7.NoSuchToolError({ toolName: toolCall.toolName });
1484
1721
  }
1485
1722
  const tool2 = tools[toolName];
1486
1723
  if (tool2 == null) {
1487
- throw new import_provider6.NoSuchToolError({
1724
+ throw new import_provider7.NoSuchToolError({
1488
1725
  toolName: toolCall.toolName,
1489
1726
  availableTools: Object.keys(tools)
1490
1727
  });
@@ -1494,7 +1731,7 @@ function parseToolCall({
1494
1731
  schema: asSchema(tool2.parameters)
1495
1732
  });
1496
1733
  if (parseResult.success === false) {
1497
- throw new import_provider6.InvalidToolArgumentsError({
1734
+ throw new import_provider7.InvalidToolArgumentsError({
1498
1735
  toolName,
1499
1736
  toolArgs: toolCall.args,
1500
1737
  cause: parseResult.error
@@ -1555,7 +1792,10 @@ async function generateText({
1555
1792
  ...prepareToolsAndToolChoice({ tools, toolChoice })
1556
1793
  };
1557
1794
  const callSettings = prepareCallSettings(settings);
1558
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1795
+ const promptMessages = await convertToLanguageModelPrompt({
1796
+ prompt: validatedPrompt,
1797
+ modelSupportsImageUrls: model.supportsImageUrls
1798
+ });
1559
1799
  let currentModelResponse;
1560
1800
  let currentToolCalls = [];
1561
1801
  let currentToolResults = [];
@@ -1628,7 +1868,9 @@ async function generateText({
1628
1868
  });
1629
1869
  responseMessages.push(...newResponseMessages);
1630
1870
  promptMessages.push(
1631
- ...newResponseMessages.map(convertToLanguageModelMessage)
1871
+ ...newResponseMessages.map(
1872
+ (message) => convertToLanguageModelMessage(message, null)
1873
+ )
1632
1874
  );
1633
1875
  } while (
1634
1876
  // there are tool calls:
@@ -1831,7 +2073,7 @@ function mergeStreams(stream1, stream2) {
1831
2073
  }
1832
2074
 
1833
2075
  // core/generate-text/run-tools-transformation.ts
1834
- var import_provider7 = require("@ai-sdk/provider");
2076
+ var import_provider8 = require("@ai-sdk/provider");
1835
2077
  var import_ui_utils2 = require("@ai-sdk/ui-utils");
1836
2078
  function runToolsTransformation({
1837
2079
  tools,
@@ -1881,7 +2123,7 @@ function runToolsTransformation({
1881
2123
  if (tools == null) {
1882
2124
  toolResultsStreamController.enqueue({
1883
2125
  type: "error",
1884
- error: new import_provider7.NoSuchToolError({ toolName: chunk.toolName })
2126
+ error: new import_provider8.NoSuchToolError({ toolName: chunk.toolName })
1885
2127
  });
1886
2128
  break;
1887
2129
  }
@@ -1889,7 +2131,7 @@ function runToolsTransformation({
1889
2131
  if (tool2 == null) {
1890
2132
  toolResultsStreamController.enqueue({
1891
2133
  type: "error",
1892
- error: new import_provider7.NoSuchToolError({
2134
+ error: new import_provider8.NoSuchToolError({
1893
2135
  toolName: chunk.toolName,
1894
2136
  availableTools: Object.keys(tools)
1895
2137
  })
@@ -2038,7 +2280,10 @@ async function streamText({
2038
2280
  fn: async (rootSpan) => {
2039
2281
  const retry = retryWithExponentialBackoff({ maxRetries });
2040
2282
  const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
2041
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
2283
+ const promptMessages = await convertToLanguageModelPrompt({
2284
+ prompt: validatedPrompt,
2285
+ modelSupportsImageUrls: model.supportsImageUrls
2286
+ });
2042
2287
  const {
2043
2288
  result: { stream, warnings, rawResponse },
2044
2289
  doStreamSpan
@@ -2667,7 +2912,7 @@ function tool(tool2) {
2667
2912
  }
2668
2913
 
2669
2914
  // core/types/errors.ts
2670
- var import_provider8 = require("@ai-sdk/provider");
2915
+ var import_provider9 = require("@ai-sdk/provider");
2671
2916
 
2672
2917
  // core/util/cosine-similarity.ts
2673
2918
  function cosineSimilarity(vector1, vector2) {