ai 3.2.34 → 3.2.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -31,7 +31,7 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
31
31
  var streams_exports = {};
32
32
  __export(streams_exports, {
33
33
  AIStream: () => AIStream,
34
- APICallError: () => import_provider8.APICallError,
34
+ APICallError: () => import_provider9.APICallError,
35
35
  AWSBedrockAnthropicMessagesStream: () => AWSBedrockAnthropicMessagesStream,
36
36
  AWSBedrockAnthropicStream: () => AWSBedrockAnthropicStream,
37
37
  AWSBedrockCohereStream: () => AWSBedrockCohereStream,
@@ -40,35 +40,35 @@ __export(streams_exports, {
40
40
  AnthropicStream: () => AnthropicStream,
41
41
  AssistantResponse: () => AssistantResponse,
42
42
  CohereStream: () => CohereStream,
43
- EmptyResponseBodyError: () => import_provider8.EmptyResponseBodyError,
43
+ EmptyResponseBodyError: () => import_provider9.EmptyResponseBodyError,
44
44
  GoogleGenerativeAIStream: () => GoogleGenerativeAIStream,
45
45
  HuggingFaceStream: () => HuggingFaceStream,
46
46
  InkeepStream: () => InkeepStream,
47
- InvalidArgumentError: () => import_provider8.InvalidArgumentError,
48
- InvalidDataContentError: () => import_provider8.InvalidDataContentError,
47
+ InvalidArgumentError: () => import_provider9.InvalidArgumentError,
48
+ InvalidDataContentError: () => import_provider9.InvalidDataContentError,
49
49
  InvalidMessageRoleError: () => InvalidMessageRoleError,
50
50
  InvalidModelIdError: () => InvalidModelIdError,
51
- InvalidPromptError: () => import_provider8.InvalidPromptError,
52
- InvalidResponseDataError: () => import_provider8.InvalidResponseDataError,
53
- InvalidToolArgumentsError: () => import_provider8.InvalidToolArgumentsError,
54
- JSONParseError: () => import_provider8.JSONParseError,
51
+ InvalidPromptError: () => import_provider9.InvalidPromptError,
52
+ InvalidResponseDataError: () => import_provider9.InvalidResponseDataError,
53
+ InvalidToolArgumentsError: () => import_provider9.InvalidToolArgumentsError,
54
+ JSONParseError: () => import_provider9.JSONParseError,
55
55
  LangChainAdapter: () => langchain_adapter_exports,
56
56
  LangChainStream: () => LangChainStream,
57
- LoadAPIKeyError: () => import_provider8.LoadAPIKeyError,
57
+ LoadAPIKeyError: () => import_provider9.LoadAPIKeyError,
58
58
  MistralStream: () => MistralStream,
59
- NoObjectGeneratedError: () => import_provider8.NoObjectGeneratedError,
59
+ NoObjectGeneratedError: () => import_provider9.NoObjectGeneratedError,
60
60
  NoSuchModelError: () => NoSuchModelError,
61
61
  NoSuchProviderError: () => NoSuchProviderError,
62
- NoSuchToolError: () => import_provider8.NoSuchToolError,
62
+ NoSuchToolError: () => import_provider9.NoSuchToolError,
63
63
  OpenAIStream: () => OpenAIStream,
64
64
  ReplicateStream: () => ReplicateStream,
65
- RetryError: () => import_provider8.RetryError,
65
+ RetryError: () => import_provider9.RetryError,
66
66
  StreamData: () => StreamData2,
67
67
  StreamingTextResponse: () => StreamingTextResponse,
68
- ToolCallParseError: () => import_provider8.ToolCallParseError,
69
- TypeValidationError: () => import_provider8.TypeValidationError,
70
- UnsupportedFunctionalityError: () => import_provider8.UnsupportedFunctionalityError,
71
- UnsupportedJSONSchemaError: () => import_provider8.UnsupportedJSONSchemaError,
68
+ ToolCallParseError: () => import_provider9.ToolCallParseError,
69
+ TypeValidationError: () => import_provider9.TypeValidationError,
70
+ UnsupportedFunctionalityError: () => import_provider9.UnsupportedFunctionalityError,
71
+ UnsupportedJSONSchemaError: () => import_provider9.UnsupportedJSONSchemaError,
72
72
  convertDataContentToBase64String: () => convertDataContentToBase64String,
73
73
  convertDataContentToUint8Array: () => convertDataContentToUint8Array,
74
74
  convertToCoreMessages: () => convertToCoreMessages,
@@ -91,6 +91,7 @@ __export(streams_exports, {
91
91
  generateId: () => generateId2,
92
92
  generateObject: () => generateObject,
93
93
  generateText: () => generateText,
94
+ jsonSchema: () => jsonSchema,
94
95
  nanoid: () => nanoid,
95
96
  parseComplexResponse: () => import_ui_utils6.parseComplexResponse,
96
97
  parseStreamPart: () => import_ui_utils6.parseStreamPart,
@@ -104,7 +105,158 @@ __export(streams_exports, {
104
105
  });
105
106
  module.exports = __toCommonJS(streams_exports);
106
107
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
107
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
108
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
109
+
110
+ // core/telemetry/get-base-telemetry-attributes.ts
111
+ function getBaseTelemetryAttributes({
112
+ operationName,
113
+ model,
114
+ settings,
115
+ telemetry,
116
+ headers
117
+ }) {
118
+ var _a;
119
+ return {
120
+ "ai.model.provider": model.provider,
121
+ "ai.model.id": model.modelId,
122
+ // settings:
123
+ ...Object.entries(settings).reduce((attributes, [key, value]) => {
124
+ attributes[`ai.settings.${key}`] = value;
125
+ return attributes;
126
+ }, {}),
127
+ // special telemetry information
128
+ "operation.name": operationName,
129
+ "resource.name": telemetry == null ? void 0 : telemetry.functionId,
130
+ "ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
131
+ // add metadata as attributes:
132
+ ...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
133
+ (attributes, [key, value]) => {
134
+ attributes[`ai.telemetry.metadata.${key}`] = value;
135
+ return attributes;
136
+ },
137
+ {}
138
+ ),
139
+ // request headers
140
+ ...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
141
+ if (value !== void 0) {
142
+ attributes[`ai.request.headers.${key}`] = value;
143
+ }
144
+ return attributes;
145
+ }, {})
146
+ };
147
+ }
148
+
149
+ // core/telemetry/get-tracer.ts
150
+ var import_api = require("@opentelemetry/api");
151
+
152
+ // core/telemetry/noop-tracer.ts
153
+ var noopTracer = {
154
+ startSpan() {
155
+ return noopSpan;
156
+ },
157
+ startActiveSpan(name, arg1, arg2, arg3) {
158
+ if (typeof arg1 === "function") {
159
+ return arg1(noopSpan);
160
+ }
161
+ if (typeof arg2 === "function") {
162
+ return arg2(noopSpan);
163
+ }
164
+ if (typeof arg3 === "function") {
165
+ return arg3(noopSpan);
166
+ }
167
+ }
168
+ };
169
+ var noopSpan = {
170
+ spanContext() {
171
+ return noopSpanContext;
172
+ },
173
+ setAttribute() {
174
+ return this;
175
+ },
176
+ setAttributes() {
177
+ return this;
178
+ },
179
+ addEvent() {
180
+ return this;
181
+ },
182
+ addLink() {
183
+ return this;
184
+ },
185
+ addLinks() {
186
+ return this;
187
+ },
188
+ setStatus() {
189
+ return this;
190
+ },
191
+ updateName() {
192
+ return this;
193
+ },
194
+ end() {
195
+ return this;
196
+ },
197
+ isRecording() {
198
+ return false;
199
+ },
200
+ recordException() {
201
+ return this;
202
+ }
203
+ };
204
+ var noopSpanContext = {
205
+ traceId: "",
206
+ spanId: "",
207
+ traceFlags: 0
208
+ };
209
+
210
+ // core/telemetry/get-tracer.ts
211
+ var testTracer = void 0;
212
+ function getTracer({ isEnabled }) {
213
+ if (!isEnabled) {
214
+ return noopTracer;
215
+ }
216
+ if (testTracer) {
217
+ return testTracer;
218
+ }
219
+ return import_api.trace.getTracer("ai");
220
+ }
221
+
222
+ // core/telemetry/record-span.ts
223
+ var import_api2 = require("@opentelemetry/api");
224
+ function recordSpan({
225
+ name,
226
+ tracer,
227
+ attributes,
228
+ fn,
229
+ endWhenDone = true
230
+ }) {
231
+ return tracer.startActiveSpan(name, { attributes }, async (span) => {
232
+ try {
233
+ const result = await fn(span);
234
+ if (endWhenDone) {
235
+ span.end();
236
+ }
237
+ return result;
238
+ } catch (error) {
239
+ try {
240
+ if (error instanceof Error) {
241
+ span.recordException({
242
+ name: error.name,
243
+ message: error.message,
244
+ stack: error.stack
245
+ });
246
+ span.setStatus({
247
+ code: import_api2.SpanStatusCode.ERROR,
248
+ message: error.message
249
+ });
250
+ } else {
251
+ span.setStatus({ code: import_api2.SpanStatusCode.ERROR });
252
+ }
253
+ } finally {
254
+ span.end();
255
+ }
256
+ throw error;
257
+ }
258
+ });
259
+ }
108
260
 
109
261
  // core/util/retry-with-exponential-backoff.ts
110
262
  var import_provider = require("@ai-sdk/provider");
@@ -174,18 +326,69 @@ async function embed({
174
326
  value,
175
327
  maxRetries,
176
328
  abortSignal,
177
- headers
329
+ headers,
330
+ experimental_telemetry: telemetry
178
331
  }) {
179
332
  var _a;
180
- const retry = retryWithExponentialBackoff({ maxRetries });
181
- const modelResponse = await retry(
182
- () => model.doEmbed({ values: [value], abortSignal, headers })
183
- );
184
- return new DefaultEmbedResult({
185
- value,
186
- embedding: modelResponse.embeddings[0],
187
- usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN },
188
- rawResponse: modelResponse.rawResponse
333
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
334
+ operationName: "ai.embed",
335
+ model,
336
+ telemetry,
337
+ headers,
338
+ settings: { maxRetries }
339
+ });
340
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
341
+ return recordSpan({
342
+ name: "ai.embed",
343
+ attributes: {
344
+ ...baseTelemetryAttributes,
345
+ // specific settings that only make sense on the outer level:
346
+ "ai.value": JSON.stringify(value)
347
+ },
348
+ tracer,
349
+ fn: async (span) => {
350
+ const retry = retryWithExponentialBackoff({ maxRetries });
351
+ const { embedding, usage, rawResponse } = await retry(
352
+ () => (
353
+ // nested spans to align with the embedMany telemetry data:
354
+ recordSpan({
355
+ name: "ai.embed.doEmbed",
356
+ attributes: {
357
+ ...baseTelemetryAttributes,
358
+ // specific settings that only make sense on the outer level:
359
+ "ai.values": [JSON.stringify(value)]
360
+ },
361
+ tracer,
362
+ fn: async (doEmbedSpan) => {
363
+ var _a2;
364
+ const modelResponse = await model.doEmbed({
365
+ values: [value],
366
+ abortSignal,
367
+ headers
368
+ });
369
+ const embedding2 = modelResponse.embeddings[0];
370
+ const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
371
+ doEmbedSpan.setAttributes({
372
+ "ai.embeddings": modelResponse.embeddings.map(
373
+ (embedding3) => JSON.stringify(embedding3)
374
+ ),
375
+ "ai.usage.tokens": usage2.tokens
376
+ });
377
+ return {
378
+ embedding: embedding2,
379
+ usage: usage2,
380
+ rawResponse: modelResponse.rawResponse
381
+ };
382
+ }
383
+ })
384
+ )
385
+ );
386
+ span.setAttributes({
387
+ "ai.embedding": JSON.stringify(embedding),
388
+ "ai.usage.tokens": usage.tokens
389
+ });
390
+ return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
391
+ }
189
392
  });
190
393
  }
191
394
  var DefaultEmbedResult = class {
@@ -215,32 +418,112 @@ async function embedMany({
215
418
  values,
216
419
  maxRetries,
217
420
  abortSignal,
218
- headers
421
+ headers,
422
+ experimental_telemetry: telemetry
219
423
  }) {
220
- var _a, _b, _c;
221
- const retry = retryWithExponentialBackoff({ maxRetries });
222
- const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
223
- if (maxEmbeddingsPerCall == null) {
224
- const modelResponse = await retry(
225
- () => model.doEmbed({ values, abortSignal, headers })
226
- );
227
- return new DefaultEmbedManyResult({
228
- values,
229
- embeddings: modelResponse.embeddings,
230
- usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN }
231
- });
232
- }
233
- const valueChunks = splitArray(values, maxEmbeddingsPerCall);
234
- const embeddings = [];
235
- let tokens = 0;
236
- for (const chunk of valueChunks) {
237
- const modelResponse = await retry(
238
- () => model.doEmbed({ values: chunk, abortSignal, headers })
239
- );
240
- embeddings.push(...modelResponse.embeddings);
241
- tokens += (_c = (_b = modelResponse.usage) == null ? void 0 : _b.tokens) != null ? _c : NaN;
242
- }
243
- return new DefaultEmbedManyResult({ values, embeddings, usage: { tokens } });
424
+ var _a;
425
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
426
+ operationName: "ai.embedMany",
427
+ model,
428
+ telemetry,
429
+ headers,
430
+ settings: { maxRetries }
431
+ });
432
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
433
+ return recordSpan({
434
+ name: "ai.embedMany",
435
+ attributes: {
436
+ ...baseTelemetryAttributes,
437
+ // specific settings that only make sense on the outer level:
438
+ "ai.values": values.map((value) => JSON.stringify(value))
439
+ },
440
+ tracer,
441
+ fn: async (span) => {
442
+ const retry = retryWithExponentialBackoff({ maxRetries });
443
+ const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
444
+ if (maxEmbeddingsPerCall == null) {
445
+ const { embeddings: embeddings2, usage } = await retry(() => {
446
+ return recordSpan({
447
+ name: "ai.embedMany.doEmbed",
448
+ attributes: {
449
+ ...baseTelemetryAttributes,
450
+ // specific settings that only make sense on the outer level:
451
+ "ai.values": values.map((value) => JSON.stringify(value))
452
+ },
453
+ tracer,
454
+ fn: async (doEmbedSpan) => {
455
+ var _a2;
456
+ const modelResponse = await model.doEmbed({
457
+ values,
458
+ abortSignal,
459
+ headers
460
+ });
461
+ const embeddings3 = modelResponse.embeddings;
462
+ const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
463
+ doEmbedSpan.setAttributes({
464
+ "ai.embeddings": embeddings3.map(
465
+ (embedding) => JSON.stringify(embedding)
466
+ ),
467
+ "ai.usage.tokens": usage2.tokens
468
+ });
469
+ return { embeddings: embeddings3, usage: usage2 };
470
+ }
471
+ });
472
+ });
473
+ span.setAttributes({
474
+ "ai.embeddings": embeddings2.map(
475
+ (embedding) => JSON.stringify(embedding)
476
+ ),
477
+ "ai.usage.tokens": usage.tokens
478
+ });
479
+ return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
480
+ }
481
+ const valueChunks = splitArray(values, maxEmbeddingsPerCall);
482
+ const embeddings = [];
483
+ let tokens = 0;
484
+ for (const chunk of valueChunks) {
485
+ const { embeddings: responseEmbeddings, usage } = await retry(() => {
486
+ return recordSpan({
487
+ name: "ai.embedMany.doEmbed",
488
+ attributes: {
489
+ ...baseTelemetryAttributes,
490
+ // specific settings that only make sense on the outer level:
491
+ "ai.values": chunk.map((value) => JSON.stringify(value))
492
+ },
493
+ tracer,
494
+ fn: async (doEmbedSpan) => {
495
+ var _a2;
496
+ const modelResponse = await model.doEmbed({
497
+ values: chunk,
498
+ abortSignal,
499
+ headers
500
+ });
501
+ const embeddings2 = modelResponse.embeddings;
502
+ const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
503
+ doEmbedSpan.setAttributes({
504
+ "ai.embeddings": embeddings2.map(
505
+ (embedding) => JSON.stringify(embedding)
506
+ ),
507
+ "ai.usage.tokens": usage2.tokens
508
+ });
509
+ return { embeddings: embeddings2, usage: usage2 };
510
+ }
511
+ });
512
+ });
513
+ embeddings.push(...responseEmbeddings);
514
+ tokens += usage.tokens;
515
+ }
516
+ span.setAttributes({
517
+ "ai.embeddings": embeddings.map((embedding) => JSON.stringify(embedding)),
518
+ "ai.usage.tokens": tokens
519
+ });
520
+ return new DefaultEmbedManyResult({
521
+ values,
522
+ embeddings,
523
+ usage: { tokens }
524
+ });
525
+ }
526
+ });
244
527
  }
245
528
  var DefaultEmbedManyResult = class {
246
529
  constructor(options) {
@@ -251,8 +534,11 @@ var DefaultEmbedManyResult = class {
251
534
  };
252
535
 
253
536
  // core/generate-object/generate-object.ts
254
- var import_provider5 = require("@ai-sdk/provider");
255
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
537
+ var import_provider6 = require("@ai-sdk/provider");
538
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
539
+
540
+ // core/prompt/convert-to-language-model-prompt.ts
541
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
256
542
 
257
543
  // core/util/detect-image-mimetype.ts
258
544
  var mimeTypeSignatures = [
@@ -270,8 +556,37 @@ function detectImageMimeType(image) {
270
556
  return void 0;
271
557
  }
272
558
 
273
- // core/prompt/data-content.ts
559
+ // core/util/download.ts
274
560
  var import_provider2 = require("@ai-sdk/provider");
561
+ async function download({
562
+ url,
563
+ fetchImplementation = fetch
564
+ }) {
565
+ var _a;
566
+ const urlText = url.toString();
567
+ try {
568
+ const response = await fetchImplementation(urlText);
569
+ if (!response.ok) {
570
+ throw new import_provider2.DownloadError({
571
+ url: urlText,
572
+ statusCode: response.status,
573
+ statusText: response.statusText
574
+ });
575
+ }
576
+ return {
577
+ data: new Uint8Array(await response.arrayBuffer()),
578
+ mimeType: (_a = response.headers.get("content-type")) != null ? _a : void 0
579
+ };
580
+ } catch (error) {
581
+ if (import_provider2.DownloadError.isDownloadError(error)) {
582
+ throw error;
583
+ }
584
+ throw new import_provider2.DownloadError({ url: urlText, cause: error });
585
+ }
586
+ }
587
+
588
+ // core/prompt/data-content.ts
589
+ var import_provider3 = require("@ai-sdk/provider");
275
590
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
276
591
  function convertDataContentToBase64String(content) {
277
592
  if (typeof content === "string") {
@@ -290,7 +605,7 @@ function convertDataContentToUint8Array(content) {
290
605
  try {
291
606
  return (0, import_provider_utils2.convertBase64ToUint8Array)(content);
292
607
  } catch (error) {
293
- throw new import_provider2.InvalidDataContentError({
608
+ throw new import_provider3.InvalidDataContentError({
294
609
  message: "Invalid data content. Content string is not a base64-encoded media.",
295
610
  content,
296
611
  cause: error
@@ -300,7 +615,7 @@ function convertDataContentToUint8Array(content) {
300
615
  if (content instanceof ArrayBuffer) {
301
616
  return new Uint8Array(content);
302
617
  }
303
- throw new import_provider2.InvalidDataContentError({ content });
618
+ throw new import_provider3.InvalidDataContentError({ content });
304
619
  }
305
620
  function convertUint8ArrayToText(uint8Array) {
306
621
  try {
@@ -334,12 +649,16 @@ var InvalidMessageRoleError = class extends Error {
334
649
  };
335
650
 
336
651
  // core/prompt/convert-to-language-model-prompt.ts
337
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
338
- function convertToLanguageModelPrompt(prompt) {
652
+ async function convertToLanguageModelPrompt({
653
+ prompt,
654
+ modelSupportsImageUrls = true,
655
+ downloadImplementation = download
656
+ }) {
339
657
  const languageModelMessages = [];
340
658
  if (prompt.system != null) {
341
659
  languageModelMessages.push({ role: "system", content: prompt.system });
342
660
  }
661
+ const downloadedImages = modelSupportsImageUrls || prompt.messages == null ? null : await downloadImages(prompt.messages, downloadImplementation);
343
662
  const promptType = prompt.type;
344
663
  switch (promptType) {
345
664
  case "prompt": {
@@ -351,7 +670,9 @@ function convertToLanguageModelPrompt(prompt) {
351
670
  }
352
671
  case "messages": {
353
672
  languageModelMessages.push(
354
- ...prompt.messages.map(convertToLanguageModelMessage)
673
+ ...prompt.messages.map(
674
+ (message) => convertToLanguageModelMessage(message, downloadedImages)
675
+ )
355
676
  );
356
677
  break;
357
678
  }
@@ -362,7 +683,7 @@ function convertToLanguageModelPrompt(prompt) {
362
683
  }
363
684
  return languageModelMessages;
364
685
  }
365
- function convertToLanguageModelMessage(message) {
686
+ function convertToLanguageModelMessage(message, downloadedImages) {
366
687
  const role = message.role;
367
688
  switch (role) {
368
689
  case "system": {
@@ -379,18 +700,27 @@ function convertToLanguageModelMessage(message) {
379
700
  role: "user",
380
701
  content: message.content.map(
381
702
  (part) => {
382
- var _a;
703
+ var _a, _b, _c;
383
704
  switch (part.type) {
384
705
  case "text": {
385
706
  return part;
386
707
  }
387
708
  case "image": {
388
709
  if (part.image instanceof URL) {
389
- return {
390
- type: "image",
391
- image: part.image,
392
- mimeType: part.mimeType
393
- };
710
+ if (downloadedImages == null) {
711
+ return {
712
+ type: "image",
713
+ image: part.image,
714
+ mimeType: part.mimeType
715
+ };
716
+ } else {
717
+ const downloadedImage = downloadedImages[part.image.toString()];
718
+ return {
719
+ type: "image",
720
+ image: downloadedImage.data,
721
+ mimeType: (_a = part.mimeType) != null ? _a : downloadedImage.mimeType
722
+ };
723
+ }
394
724
  }
395
725
  if (typeof part.image === "string") {
396
726
  try {
@@ -398,11 +728,20 @@ function convertToLanguageModelMessage(message) {
398
728
  switch (url.protocol) {
399
729
  case "http:":
400
730
  case "https:": {
401
- return {
402
- type: "image",
403
- image: url,
404
- mimeType: part.mimeType
405
- };
731
+ if (downloadedImages == null) {
732
+ return {
733
+ type: "image",
734
+ image: url,
735
+ mimeType: part.mimeType
736
+ };
737
+ } else {
738
+ const downloadedImage = downloadedImages[part.image];
739
+ return {
740
+ type: "image",
741
+ image: downloadedImage.data,
742
+ mimeType: (_b = part.mimeType) != null ? _b : downloadedImage.mimeType
743
+ };
744
+ }
406
745
  }
407
746
  case "data:": {
408
747
  try {
@@ -437,7 +776,7 @@ function convertToLanguageModelMessage(message) {
437
776
  return {
438
777
  type: "image",
439
778
  image: imageUint8,
440
- mimeType: (_a = part.mimeType) != null ? _a : detectImageMimeType(imageUint8)
779
+ mimeType: (_c = part.mimeType) != null ? _c : detectImageMimeType(imageUint8)
441
780
  };
442
781
  }
443
782
  }
@@ -469,18 +808,37 @@ function convertToLanguageModelMessage(message) {
469
808
  }
470
809
  }
471
810
  }
811
+ async function downloadImages(messages, downloadImplementation) {
812
+ const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
813
+ (content) => Array.isArray(content)
814
+ ).flat().filter((part) => part.type === "image").map((part) => part.image).map(
815
+ (part) => (
816
+ // support string urls in image parts:
817
+ typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
818
+ )
819
+ ).filter((image) => image instanceof URL);
820
+ const downloadedImages = await Promise.all(
821
+ urls.map(async (url) => ({
822
+ url,
823
+ data: await downloadImplementation({ url })
824
+ }))
825
+ );
826
+ return Object.fromEntries(
827
+ downloadedImages.map(({ url, data }) => [url.toString(), data])
828
+ );
829
+ }
472
830
 
473
831
  // core/prompt/get-validated-prompt.ts
474
- var import_provider3 = require("@ai-sdk/provider");
832
+ var import_provider4 = require("@ai-sdk/provider");
475
833
  function getValidatedPrompt(prompt) {
476
834
  if (prompt.prompt == null && prompt.messages == null) {
477
- throw new import_provider3.InvalidPromptError({
835
+ throw new import_provider4.InvalidPromptError({
478
836
  prompt,
479
837
  message: "prompt or messages must be defined"
480
838
  });
481
839
  }
482
840
  if (prompt.prompt != null && prompt.messages != null) {
483
- throw new import_provider3.InvalidPromptError({
841
+ throw new import_provider4.InvalidPromptError({
484
842
  prompt,
485
843
  message: "prompt and messages cannot be defined at the same time"
486
844
  });
@@ -488,7 +846,7 @@ function getValidatedPrompt(prompt) {
488
846
  if (prompt.messages != null) {
489
847
  for (const message of prompt.messages) {
490
848
  if (message.role === "system" && typeof message.content !== "string") {
491
- throw new import_provider3.InvalidPromptError({
849
+ throw new import_provider4.InvalidPromptError({
492
850
  prompt,
493
851
  message: "system message content must be a string"
494
852
  });
@@ -510,7 +868,7 @@ function getValidatedPrompt(prompt) {
510
868
  }
511
869
 
512
870
  // core/prompt/prepare-call-settings.ts
513
- var import_provider4 = require("@ai-sdk/provider");
871
+ var import_provider5 = require("@ai-sdk/provider");
514
872
  function prepareCallSettings({
515
873
  maxTokens,
516
874
  temperature,
@@ -523,14 +881,14 @@ function prepareCallSettings({
523
881
  }) {
524
882
  if (maxTokens != null) {
525
883
  if (!Number.isInteger(maxTokens)) {
526
- throw new import_provider4.InvalidArgumentError({
884
+ throw new import_provider5.InvalidArgumentError({
527
885
  parameter: "maxTokens",
528
886
  value: maxTokens,
529
887
  message: "maxTokens must be an integer"
530
888
  });
531
889
  }
532
890
  if (maxTokens < 1) {
533
- throw new import_provider4.InvalidArgumentError({
891
+ throw new import_provider5.InvalidArgumentError({
534
892
  parameter: "maxTokens",
535
893
  value: maxTokens,
536
894
  message: "maxTokens must be >= 1"
@@ -539,7 +897,7 @@ function prepareCallSettings({
539
897
  }
540
898
  if (temperature != null) {
541
899
  if (typeof temperature !== "number") {
542
- throw new import_provider4.InvalidArgumentError({
900
+ throw new import_provider5.InvalidArgumentError({
543
901
  parameter: "temperature",
544
902
  value: temperature,
545
903
  message: "temperature must be a number"
@@ -548,7 +906,7 @@ function prepareCallSettings({
548
906
  }
549
907
  if (topP != null) {
550
908
  if (typeof topP !== "number") {
551
- throw new import_provider4.InvalidArgumentError({
909
+ throw new import_provider5.InvalidArgumentError({
552
910
  parameter: "topP",
553
911
  value: topP,
554
912
  message: "topP must be a number"
@@ -557,208 +915,57 @@ function prepareCallSettings({
557
915
  }
558
916
  if (presencePenalty != null) {
559
917
  if (typeof presencePenalty !== "number") {
560
- throw new import_provider4.InvalidArgumentError({
918
+ throw new import_provider5.InvalidArgumentError({
561
919
  parameter: "presencePenalty",
562
920
  value: presencePenalty,
563
921
  message: "presencePenalty must be a number"
564
922
  });
565
923
  }
566
924
  }
567
- if (frequencyPenalty != null) {
568
- if (typeof frequencyPenalty !== "number") {
569
- throw new import_provider4.InvalidArgumentError({
570
- parameter: "frequencyPenalty",
571
- value: frequencyPenalty,
572
- message: "frequencyPenalty must be a number"
573
- });
574
- }
575
- }
576
- if (seed != null) {
577
- if (!Number.isInteger(seed)) {
578
- throw new import_provider4.InvalidArgumentError({
579
- parameter: "seed",
580
- value: seed,
581
- message: "seed must be an integer"
582
- });
583
- }
584
- }
585
- if (maxRetries != null) {
586
- if (!Number.isInteger(maxRetries)) {
587
- throw new import_provider4.InvalidArgumentError({
588
- parameter: "maxRetries",
589
- value: maxRetries,
590
- message: "maxRetries must be an integer"
591
- });
592
- }
593
- if (maxRetries < 0) {
594
- throw new import_provider4.InvalidArgumentError({
595
- parameter: "maxRetries",
596
- value: maxRetries,
597
- message: "maxRetries must be >= 0"
598
- });
599
- }
600
- }
601
- return {
602
- maxTokens,
603
- temperature: temperature != null ? temperature : 0,
604
- topP,
605
- presencePenalty,
606
- frequencyPenalty,
607
- stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
608
- seed,
609
- maxRetries: maxRetries != null ? maxRetries : 2
610
- };
611
- }
612
-
613
- // core/telemetry/get-base-telemetry-attributes.ts
614
- function getBaseTelemetryAttributes({
615
- operationName,
616
- model,
617
- settings,
618
- telemetry,
619
- headers
620
- }) {
621
- var _a;
622
- return {
623
- "ai.model.provider": model.provider,
624
- "ai.model.id": model.modelId,
625
- // settings:
626
- ...Object.entries(settings).reduce((attributes, [key, value]) => {
627
- attributes[`ai.settings.${key}`] = value;
628
- return attributes;
629
- }, {}),
630
- // special telemetry information
631
- "operation.name": operationName,
632
- "resource.name": telemetry == null ? void 0 : telemetry.functionId,
633
- "ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
634
- // add metadata as attributes:
635
- ...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
636
- (attributes, [key, value]) => {
637
- attributes[`ai.telemetry.metadata.${key}`] = value;
638
- return attributes;
639
- },
640
- {}
641
- ),
642
- // request headers
643
- ...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
644
- if (value !== void 0) {
645
- attributes[`ai.request.headers.${key}`] = value;
646
- }
647
- return attributes;
648
- }, {})
649
- };
650
- }
651
-
652
- // core/telemetry/get-tracer.ts
653
- var import_api = require("@opentelemetry/api");
654
-
655
- // core/telemetry/noop-tracer.ts
656
- var noopTracer = {
657
- startSpan() {
658
- return noopSpan;
659
- },
660
- startActiveSpan(name, arg1, arg2, arg3) {
661
- if (typeof arg1 === "function") {
662
- return arg1(noopSpan);
663
- }
664
- if (typeof arg2 === "function") {
665
- return arg2(noopSpan);
666
- }
667
- if (typeof arg3 === "function") {
668
- return arg3(noopSpan);
669
- }
670
- }
671
- };
672
- var noopSpan = {
673
- spanContext() {
674
- return noopSpanContext;
675
- },
676
- setAttribute() {
677
- return this;
678
- },
679
- setAttributes() {
680
- return this;
681
- },
682
- addEvent() {
683
- return this;
684
- },
685
- addLink() {
686
- return this;
687
- },
688
- addLinks() {
689
- return this;
690
- },
691
- setStatus() {
692
- return this;
693
- },
694
- updateName() {
695
- return this;
696
- },
697
- end() {
698
- return this;
699
- },
700
- isRecording() {
701
- return false;
702
- },
703
- recordException() {
704
- return this;
705
- }
706
- };
707
- var noopSpanContext = {
708
- traceId: "",
709
- spanId: "",
710
- traceFlags: 0
711
- };
712
-
713
- // core/telemetry/get-tracer.ts
714
- var testTracer = void 0;
715
- function getTracer({ isEnabled }) {
716
- if (!isEnabled) {
717
- return noopTracer;
718
- }
719
- if (testTracer) {
720
- return testTracer;
925
+ if (frequencyPenalty != null) {
926
+ if (typeof frequencyPenalty !== "number") {
927
+ throw new import_provider5.InvalidArgumentError({
928
+ parameter: "frequencyPenalty",
929
+ value: frequencyPenalty,
930
+ message: "frequencyPenalty must be a number"
931
+ });
932
+ }
721
933
  }
722
- return import_api.trace.getTracer("ai");
723
- }
724
-
725
- // core/telemetry/record-span.ts
726
- var import_api2 = require("@opentelemetry/api");
727
- function recordSpan({
728
- name,
729
- tracer,
730
- attributes,
731
- fn,
732
- endWhenDone = true
733
- }) {
734
- return tracer.startActiveSpan(name, { attributes }, async (span) => {
735
- try {
736
- const result = await fn(span);
737
- if (endWhenDone) {
738
- span.end();
739
- }
740
- return result;
741
- } catch (error) {
742
- try {
743
- if (error instanceof Error) {
744
- span.recordException({
745
- name: error.name,
746
- message: error.message,
747
- stack: error.stack
748
- });
749
- span.setStatus({
750
- code: import_api2.SpanStatusCode.ERROR,
751
- message: error.message
752
- });
753
- } else {
754
- span.setStatus({ code: import_api2.SpanStatusCode.ERROR });
755
- }
756
- } finally {
757
- span.end();
758
- }
759
- throw error;
934
+ if (seed != null) {
935
+ if (!Number.isInteger(seed)) {
936
+ throw new import_provider5.InvalidArgumentError({
937
+ parameter: "seed",
938
+ value: seed,
939
+ message: "seed must be an integer"
940
+ });
760
941
  }
761
- });
942
+ }
943
+ if (maxRetries != null) {
944
+ if (!Number.isInteger(maxRetries)) {
945
+ throw new import_provider5.InvalidArgumentError({
946
+ parameter: "maxRetries",
947
+ value: maxRetries,
948
+ message: "maxRetries must be an integer"
949
+ });
950
+ }
951
+ if (maxRetries < 0) {
952
+ throw new import_provider5.InvalidArgumentError({
953
+ parameter: "maxRetries",
954
+ value: maxRetries,
955
+ message: "maxRetries must be >= 0"
956
+ });
957
+ }
958
+ }
959
+ return {
960
+ maxTokens,
961
+ temperature: temperature != null ? temperature : 0,
962
+ topP,
963
+ presencePenalty,
964
+ frequencyPenalty,
965
+ stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
966
+ seed,
967
+ maxRetries: maxRetries != null ? maxRetries : 2
968
+ };
762
969
  }
763
970
 
764
971
  // core/types/token-usage.ts
@@ -770,12 +977,6 @@ function calculateCompletionTokenUsage(usage) {
770
977
  };
771
978
  }
772
979
 
773
- // core/util/convert-zod-to-json-schema.ts
774
- var import_zod_to_json_schema = __toESM(require("zod-to-json-schema"));
775
- function convertZodToJSONSchema(zodSchema) {
776
- return (0, import_zod_to_json_schema.default)(zodSchema);
777
- }
778
-
779
980
  // core/util/prepare-response-headers.ts
780
981
  function prepareResponseHeaders(init, { contentType }) {
781
982
  var _a;
@@ -786,6 +987,41 @@ function prepareResponseHeaders(init, { contentType }) {
786
987
  return headers;
787
988
  }
788
989
 
990
+ // core/util/schema.ts
991
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
992
+ var import_zod_to_json_schema = __toESM(require("zod-to-json-schema"));
993
+ var schemaSymbol = Symbol("vercel.ai.schema");
994
+ function jsonSchema(jsonSchema2, {
995
+ validate
996
+ } = {}) {
997
+ return {
998
+ [schemaSymbol]: true,
999
+ _type: void 0,
1000
+ // should never be used directly
1001
+ [import_provider_utils4.validatorSymbol]: true,
1002
+ jsonSchema: jsonSchema2,
1003
+ validate
1004
+ };
1005
+ }
1006
+ function isSchema(value) {
1007
+ return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
1008
+ }
1009
+ function asSchema(schema) {
1010
+ return isSchema(schema) ? schema : zodSchema(schema);
1011
+ }
1012
+ function zodSchema(zodSchema2) {
1013
+ return jsonSchema(
1014
+ // we assume that zodToJsonSchema will return a valid JSONSchema7:
1015
+ (0, import_zod_to_json_schema.default)(zodSchema2),
1016
+ {
1017
+ validate: (value) => {
1018
+ const result = zodSchema2.safeParse(value);
1019
+ return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
1020
+ }
1021
+ }
1022
+ );
1023
+ }
1024
+
789
1025
  // core/generate-object/inject-json-schema-into-system.ts
790
1026
  var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
791
1027
  var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
@@ -808,7 +1044,7 @@ function injectJsonSchemaIntoSystem({
808
1044
  // core/generate-object/generate-object.ts
809
1045
  async function generateObject({
810
1046
  model,
811
- schema,
1047
+ schema: inputSchema,
812
1048
  mode,
813
1049
  system,
814
1050
  prompt,
@@ -827,7 +1063,7 @@ async function generateObject({
827
1063
  headers,
828
1064
  settings: { ...settings, maxRetries }
829
1065
  });
830
- const jsonSchema = convertZodToJSONSchema(schema);
1066
+ const schema = asSchema(inputSchema);
831
1067
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
832
1068
  return recordSpan({
833
1069
  name: "ai.generateObject",
@@ -835,7 +1071,7 @@ async function generateObject({
835
1071
  ...baseTelemetryAttributes,
836
1072
  // specific settings that only make sense on the outer level:
837
1073
  "ai.prompt": JSON.stringify({ system, prompt, messages }),
838
- "ai.settings.jsonSchema": JSON.stringify(jsonSchema),
1074
+ "ai.schema": JSON.stringify(schema.jsonSchema),
839
1075
  "ai.settings.mode": mode
840
1076
  },
841
1077
  tracer,
@@ -854,11 +1090,17 @@ async function generateObject({
854
1090
  switch (mode) {
855
1091
  case "json": {
856
1092
  const validatedPrompt = getValidatedPrompt({
857
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1093
+ system: injectJsonSchemaIntoSystem({
1094
+ system,
1095
+ schema: schema.jsonSchema
1096
+ }),
858
1097
  prompt,
859
1098
  messages
860
1099
  });
861
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1100
+ const promptMessages = await convertToLanguageModelPrompt({
1101
+ prompt: validatedPrompt,
1102
+ modelSupportsImageUrls: model.supportsImageUrls
1103
+ });
862
1104
  const inputFormat = validatedPrompt.type;
863
1105
  const generateResult = await retry(
864
1106
  () => recordSpan({
@@ -890,7 +1132,7 @@ async function generateObject({
890
1132
  })
891
1133
  );
892
1134
  if (generateResult.text === void 0) {
893
- throw new import_provider5.NoObjectGeneratedError();
1135
+ throw new import_provider6.NoObjectGeneratedError();
894
1136
  }
895
1137
  result = generateResult.text;
896
1138
  finishReason = generateResult.finishReason;
@@ -906,7 +1148,10 @@ async function generateObject({
906
1148
  prompt,
907
1149
  messages
908
1150
  });
909
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1151
+ const promptMessages = await convertToLanguageModelPrompt({
1152
+ prompt: validatedPrompt,
1153
+ modelSupportsImageUrls: model.supportsImageUrls
1154
+ });
910
1155
  const inputFormat = validatedPrompt.type;
911
1156
  const generateResult = await retry(
912
1157
  () => recordSpan({
@@ -926,7 +1171,7 @@ async function generateObject({
926
1171
  type: "function",
927
1172
  name: "json",
928
1173
  description: "Respond with a JSON object.",
929
- parameters: jsonSchema
1174
+ parameters: schema.jsonSchema
930
1175
  }
931
1176
  },
932
1177
  ...prepareCallSettings(settings),
@@ -948,7 +1193,7 @@ async function generateObject({
948
1193
  );
949
1194
  const functionArgs = (_b = (_a2 = generateResult.toolCalls) == null ? void 0 : _a2[0]) == null ? void 0 : _b.args;
950
1195
  if (functionArgs === void 0) {
951
- throw new import_provider5.NoObjectGeneratedError();
1196
+ throw new import_provider6.NoObjectGeneratedError();
952
1197
  }
953
1198
  result = functionArgs;
954
1199
  finishReason = generateResult.finishReason;
@@ -968,7 +1213,7 @@ async function generateObject({
968
1213
  throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
969
1214
  }
970
1215
  }
971
- const parseResult = (0, import_provider_utils4.safeParseJSON)({ text: result, schema });
1216
+ const parseResult = (0, import_provider_utils5.safeParseJSON)({ text: result, schema });
972
1217
  if (!parseResult.success) {
973
1218
  throw parseResult.error;
974
1219
  }
@@ -1011,7 +1256,7 @@ var DefaultGenerateObjectResult = class {
1011
1256
  var experimental_generateObject = generateObject;
1012
1257
 
1013
1258
  // core/generate-object/stream-object.ts
1014
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1259
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1015
1260
  var import_ui_utils = require("@ai-sdk/ui-utils");
1016
1261
 
1017
1262
  // core/util/async-iterable-stream.ts
@@ -1072,7 +1317,7 @@ var DelayedPromise = class {
1072
1317
  // core/generate-object/stream-object.ts
1073
1318
  async function streamObject({
1074
1319
  model,
1075
- schema,
1320
+ schema: inputSchema,
1076
1321
  mode,
1077
1322
  system,
1078
1323
  prompt,
@@ -1080,98 +1325,156 @@ async function streamObject({
1080
1325
  maxRetries,
1081
1326
  abortSignal,
1082
1327
  headers,
1328
+ experimental_telemetry: telemetry,
1083
1329
  onFinish,
1084
1330
  ...settings
1085
1331
  }) {
1332
+ var _a;
1333
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
1334
+ operationName: "ai.streamObject",
1335
+ model,
1336
+ telemetry,
1337
+ headers,
1338
+ settings: { ...settings, maxRetries }
1339
+ });
1340
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1086
1341
  const retry = retryWithExponentialBackoff({ maxRetries });
1087
- const jsonSchema = convertZodToJSONSchema(schema);
1088
- if (mode === "auto" || mode == null) {
1089
- mode = model.defaultObjectGenerationMode;
1090
- }
1091
- let callOptions;
1092
- let transformer;
1093
- switch (mode) {
1094
- case "json": {
1095
- const validatedPrompt = getValidatedPrompt({
1096
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1097
- prompt,
1098
- messages
1099
- });
1100
- callOptions = {
1101
- mode: { type: "object-json" },
1102
- ...prepareCallSettings(settings),
1103
- inputFormat: validatedPrompt.type,
1104
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1105
- abortSignal,
1106
- headers
1107
- };
1108
- transformer = {
1109
- transform: (chunk, controller) => {
1110
- switch (chunk.type) {
1111
- case "text-delta":
1112
- controller.enqueue(chunk.textDelta);
1113
- break;
1114
- case "finish":
1115
- case "error":
1116
- controller.enqueue(chunk);
1117
- break;
1118
- }
1342
+ const schema = asSchema(inputSchema);
1343
+ return recordSpan({
1344
+ name: "ai.streamObject",
1345
+ attributes: {
1346
+ ...baseTelemetryAttributes,
1347
+ // specific settings that only make sense on the outer level:
1348
+ "ai.prompt": JSON.stringify({ system, prompt, messages }),
1349
+ "ai.schema": JSON.stringify(schema.jsonSchema),
1350
+ "ai.settings.mode": mode
1351
+ },
1352
+ tracer,
1353
+ endWhenDone: false,
1354
+ fn: async (rootSpan) => {
1355
+ if (mode === "auto" || mode == null) {
1356
+ mode = model.defaultObjectGenerationMode;
1357
+ }
1358
+ let callOptions;
1359
+ let transformer;
1360
+ switch (mode) {
1361
+ case "json": {
1362
+ const validatedPrompt = getValidatedPrompt({
1363
+ system: injectJsonSchemaIntoSystem({
1364
+ system,
1365
+ schema: schema.jsonSchema
1366
+ }),
1367
+ prompt,
1368
+ messages
1369
+ });
1370
+ callOptions = {
1371
+ mode: { type: "object-json" },
1372
+ ...prepareCallSettings(settings),
1373
+ inputFormat: validatedPrompt.type,
1374
+ prompt: await convertToLanguageModelPrompt({
1375
+ prompt: validatedPrompt,
1376
+ modelSupportsImageUrls: model.supportsImageUrls
1377
+ }),
1378
+ abortSignal,
1379
+ headers
1380
+ };
1381
+ transformer = {
1382
+ transform: (chunk, controller) => {
1383
+ switch (chunk.type) {
1384
+ case "text-delta":
1385
+ controller.enqueue(chunk.textDelta);
1386
+ break;
1387
+ case "finish":
1388
+ case "error":
1389
+ controller.enqueue(chunk);
1390
+ break;
1391
+ }
1392
+ }
1393
+ };
1394
+ break;
1119
1395
  }
1120
- };
1121
- break;
1122
- }
1123
- case "tool": {
1124
- const validatedPrompt = getValidatedPrompt({
1125
- system,
1126
- prompt,
1127
- messages
1128
- });
1129
- callOptions = {
1130
- mode: {
1131
- type: "object-tool",
1132
- tool: {
1133
- type: "function",
1134
- name: "json",
1135
- description: "Respond with a JSON object.",
1136
- parameters: jsonSchema
1137
- }
1138
- },
1139
- ...prepareCallSettings(settings),
1140
- inputFormat: validatedPrompt.type,
1141
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1142
- abortSignal,
1143
- headers
1144
- };
1145
- transformer = {
1146
- transform(chunk, controller) {
1147
- switch (chunk.type) {
1148
- case "tool-call-delta":
1149
- controller.enqueue(chunk.argsTextDelta);
1150
- break;
1151
- case "finish":
1152
- case "error":
1153
- controller.enqueue(chunk);
1154
- break;
1155
- }
1396
+ case "tool": {
1397
+ const validatedPrompt = getValidatedPrompt({
1398
+ system,
1399
+ prompt,
1400
+ messages
1401
+ });
1402
+ callOptions = {
1403
+ mode: {
1404
+ type: "object-tool",
1405
+ tool: {
1406
+ type: "function",
1407
+ name: "json",
1408
+ description: "Respond with a JSON object.",
1409
+ parameters: schema.jsonSchema
1410
+ }
1411
+ },
1412
+ ...prepareCallSettings(settings),
1413
+ inputFormat: validatedPrompt.type,
1414
+ prompt: await convertToLanguageModelPrompt({
1415
+ prompt: validatedPrompt,
1416
+ modelSupportsImageUrls: model.supportsImageUrls
1417
+ }),
1418
+ abortSignal,
1419
+ headers
1420
+ };
1421
+ transformer = {
1422
+ transform(chunk, controller) {
1423
+ switch (chunk.type) {
1424
+ case "tool-call-delta":
1425
+ controller.enqueue(chunk.argsTextDelta);
1426
+ break;
1427
+ case "finish":
1428
+ case "error":
1429
+ controller.enqueue(chunk);
1430
+ break;
1431
+ }
1432
+ }
1433
+ };
1434
+ break;
1156
1435
  }
1157
- };
1158
- break;
1159
- }
1160
- case void 0: {
1161
- throw new Error("Model does not have a default object generation mode.");
1162
- }
1163
- default: {
1164
- const _exhaustiveCheck = mode;
1165
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
1436
+ case void 0: {
1437
+ throw new Error(
1438
+ "Model does not have a default object generation mode."
1439
+ );
1440
+ }
1441
+ default: {
1442
+ const _exhaustiveCheck = mode;
1443
+ throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
1444
+ }
1445
+ }
1446
+ const {
1447
+ result: { stream, warnings, rawResponse },
1448
+ doStreamSpan
1449
+ } = await retry(
1450
+ () => recordSpan({
1451
+ name: "ai.streamObject.doStream",
1452
+ attributes: {
1453
+ ...baseTelemetryAttributes,
1454
+ "ai.prompt.format": callOptions.inputFormat,
1455
+ "ai.prompt.messages": JSON.stringify(callOptions.prompt),
1456
+ "ai.settings.mode": mode
1457
+ },
1458
+ tracer,
1459
+ endWhenDone: false,
1460
+ fn: async (doStreamSpan2) => {
1461
+ return {
1462
+ result: await model.doStream(callOptions),
1463
+ doStreamSpan: doStreamSpan2
1464
+ };
1465
+ }
1466
+ })
1467
+ );
1468
+ return new DefaultStreamObjectResult({
1469
+ stream: stream.pipeThrough(new TransformStream(transformer)),
1470
+ warnings,
1471
+ rawResponse,
1472
+ schema,
1473
+ onFinish,
1474
+ rootSpan,
1475
+ doStreamSpan
1476
+ });
1166
1477
  }
1167
- }
1168
- const result = await retry(() => model.doStream(callOptions));
1169
- return new DefaultStreamObjectResult({
1170
- stream: result.stream.pipeThrough(new TransformStream(transformer)),
1171
- warnings: result.warnings,
1172
- rawResponse: result.rawResponse,
1173
- schema,
1174
- onFinish
1175
1478
  });
1176
1479
  }
1177
1480
  var DefaultStreamObjectResult = class {
@@ -1180,7 +1483,9 @@ var DefaultStreamObjectResult = class {
1180
1483
  warnings,
1181
1484
  rawResponse,
1182
1485
  schema,
1183
- onFinish
1486
+ onFinish,
1487
+ rootSpan,
1488
+ doStreamSpan
1184
1489
  }) {
1185
1490
  this.warnings = warnings;
1186
1491
  this.rawResponse = rawResponse;
@@ -1195,10 +1500,15 @@ var DefaultStreamObjectResult = class {
1195
1500
  let accumulatedText = "";
1196
1501
  let delta = "";
1197
1502
  let latestObject = void 0;
1503
+ let firstChunk = true;
1198
1504
  const self = this;
1199
1505
  this.originalStream = stream.pipeThrough(
1200
1506
  new TransformStream({
1201
1507
  async transform(chunk, controller) {
1508
+ if (firstChunk) {
1509
+ firstChunk = false;
1510
+ doStreamSpan.addEvent("ai.stream.firstChunk");
1511
+ }
1202
1512
  if (typeof chunk === "string") {
1203
1513
  accumulatedText += chunk;
1204
1514
  delta += chunk;
@@ -1230,7 +1540,7 @@ var DefaultStreamObjectResult = class {
1230
1540
  usage = calculateCompletionTokenUsage(chunk.usage);
1231
1541
  controller.enqueue({ ...chunk, usage });
1232
1542
  resolveUsage(usage);
1233
- const validationResult = (0, import_provider_utils5.safeValidateTypes)({
1543
+ const validationResult = (0, import_provider_utils6.safeValidateTypes)({
1234
1544
  value: latestObject,
1235
1545
  schema
1236
1546
  });
@@ -1252,12 +1562,24 @@ var DefaultStreamObjectResult = class {
1252
1562
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
1253
1563
  async flush(controller) {
1254
1564
  try {
1565
+ const finalUsage = usage != null ? usage : {
1566
+ promptTokens: NaN,
1567
+ completionTokens: NaN,
1568
+ totalTokens: NaN
1569
+ };
1570
+ doStreamSpan.setAttributes({
1571
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1572
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1573
+ "ai.result.object": JSON.stringify(object)
1574
+ });
1575
+ doStreamSpan.end();
1576
+ rootSpan.setAttributes({
1577
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1578
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1579
+ "ai.result.object": JSON.stringify(object)
1580
+ });
1255
1581
  await (onFinish == null ? void 0 : onFinish({
1256
- usage: usage != null ? usage : {
1257
- promptTokens: NaN,
1258
- completionTokens: NaN,
1259
- totalTokens: NaN
1260
- },
1582
+ usage: finalUsage,
1261
1583
  object,
1262
1584
  error,
1263
1585
  rawResponse,
@@ -1265,6 +1587,8 @@ var DefaultStreamObjectResult = class {
1265
1587
  }));
1266
1588
  } catch (error2) {
1267
1589
  controller.error(error2);
1590
+ } finally {
1591
+ rootSpan.end();
1268
1592
  }
1269
1593
  }
1270
1594
  })
@@ -1378,36 +1702,36 @@ function prepareToolsAndToolChoice({
1378
1702
  type: "function",
1379
1703
  name,
1380
1704
  description: tool2.description,
1381
- parameters: convertZodToJSONSchema(tool2.parameters)
1705
+ parameters: asSchema(tool2.parameters).jsonSchema
1382
1706
  })),
1383
1707
  toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
1384
1708
  };
1385
1709
  }
1386
1710
 
1387
1711
  // core/generate-text/tool-call.ts
1388
- var import_provider6 = require("@ai-sdk/provider");
1389
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1712
+ var import_provider7 = require("@ai-sdk/provider");
1713
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1390
1714
  function parseToolCall({
1391
1715
  toolCall,
1392
1716
  tools
1393
1717
  }) {
1394
1718
  const toolName = toolCall.toolName;
1395
1719
  if (tools == null) {
1396
- throw new import_provider6.NoSuchToolError({ toolName: toolCall.toolName });
1720
+ throw new import_provider7.NoSuchToolError({ toolName: toolCall.toolName });
1397
1721
  }
1398
1722
  const tool2 = tools[toolName];
1399
1723
  if (tool2 == null) {
1400
- throw new import_provider6.NoSuchToolError({
1724
+ throw new import_provider7.NoSuchToolError({
1401
1725
  toolName: toolCall.toolName,
1402
1726
  availableTools: Object.keys(tools)
1403
1727
  });
1404
1728
  }
1405
- const parseResult = (0, import_provider_utils6.safeParseJSON)({
1729
+ const parseResult = (0, import_provider_utils7.safeParseJSON)({
1406
1730
  text: toolCall.args,
1407
- schema: tool2.parameters
1731
+ schema: asSchema(tool2.parameters)
1408
1732
  });
1409
1733
  if (parseResult.success === false) {
1410
- throw new import_provider6.InvalidToolArgumentsError({
1734
+ throw new import_provider7.InvalidToolArgumentsError({
1411
1735
  toolName,
1412
1736
  toolArgs: toolCall.args,
1413
1737
  cause: parseResult.error
@@ -1468,7 +1792,10 @@ async function generateText({
1468
1792
  ...prepareToolsAndToolChoice({ tools, toolChoice })
1469
1793
  };
1470
1794
  const callSettings = prepareCallSettings(settings);
1471
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1795
+ const promptMessages = await convertToLanguageModelPrompt({
1796
+ prompt: validatedPrompt,
1797
+ modelSupportsImageUrls: model.supportsImageUrls
1798
+ });
1472
1799
  let currentModelResponse;
1473
1800
  let currentToolCalls = [];
1474
1801
  let currentToolResults = [];
@@ -1541,7 +1868,9 @@ async function generateText({
1541
1868
  });
1542
1869
  responseMessages.push(...newResponseMessages);
1543
1870
  promptMessages.push(
1544
- ...newResponseMessages.map(convertToLanguageModelMessage)
1871
+ ...newResponseMessages.map(
1872
+ (message) => convertToLanguageModelMessage(message, null)
1873
+ )
1545
1874
  );
1546
1875
  } while (
1547
1876
  // there are tool calls:
@@ -1744,7 +2073,7 @@ function mergeStreams(stream1, stream2) {
1744
2073
  }
1745
2074
 
1746
2075
  // core/generate-text/run-tools-transformation.ts
1747
- var import_provider7 = require("@ai-sdk/provider");
2076
+ var import_provider8 = require("@ai-sdk/provider");
1748
2077
  var import_ui_utils2 = require("@ai-sdk/ui-utils");
1749
2078
  function runToolsTransformation({
1750
2079
  tools,
@@ -1794,7 +2123,7 @@ function runToolsTransformation({
1794
2123
  if (tools == null) {
1795
2124
  toolResultsStreamController.enqueue({
1796
2125
  type: "error",
1797
- error: new import_provider7.NoSuchToolError({ toolName: chunk.toolName })
2126
+ error: new import_provider8.NoSuchToolError({ toolName: chunk.toolName })
1798
2127
  });
1799
2128
  break;
1800
2129
  }
@@ -1802,7 +2131,7 @@ function runToolsTransformation({
1802
2131
  if (tool2 == null) {
1803
2132
  toolResultsStreamController.enqueue({
1804
2133
  type: "error",
1805
- error: new import_provider7.NoSuchToolError({
2134
+ error: new import_provider8.NoSuchToolError({
1806
2135
  toolName: chunk.toolName,
1807
2136
  availableTools: Object.keys(tools)
1808
2137
  })
@@ -1951,7 +2280,10 @@ async function streamText({
1951
2280
  fn: async (rootSpan) => {
1952
2281
  const retry = retryWithExponentialBackoff({ maxRetries });
1953
2282
  const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1954
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
2283
+ const promptMessages = await convertToLanguageModelPrompt({
2284
+ prompt: validatedPrompt,
2285
+ modelSupportsImageUrls: model.supportsImageUrls
2286
+ });
1955
2287
  const {
1956
2288
  result: { stream, warnings, rawResponse },
1957
2289
  doStreamSpan
@@ -2580,7 +2912,7 @@ function tool(tool2) {
2580
2912
  }
2581
2913
 
2582
2914
  // core/types/errors.ts
2583
- var import_provider8 = require("@ai-sdk/provider");
2915
+ var import_provider9 = require("@ai-sdk/provider");
2584
2916
 
2585
2917
  // core/util/cosine-similarity.ts
2586
2918
  function cosineSimilarity(vector1, vector2) {
@@ -3619,8 +3951,8 @@ var StreamingTextResponse = class extends Response {
3619
3951
  };
3620
3952
 
3621
3953
  // streams/index.ts
3622
- var generateId2 = import_provider_utils7.generateId;
3623
- var nanoid = import_provider_utils7.generateId;
3954
+ var generateId2 = import_provider_utils8.generateId;
3955
+ var nanoid = import_provider_utils8.generateId;
3624
3956
  // Annotate the CommonJS export names for ESM import in node:
3625
3957
  0 && (module.exports = {
3626
3958
  AIStream,
@@ -3684,6 +4016,7 @@ var nanoid = import_provider_utils7.generateId;
3684
4016
  generateId,
3685
4017
  generateObject,
3686
4018
  generateText,
4019
+ jsonSchema,
3687
4020
  nanoid,
3688
4021
  parseComplexResponse,
3689
4022
  parseStreamPart,