ai 3.2.34 → 3.2.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -13,6 +13,157 @@ import {
13
13
  } from "@ai-sdk/ui-utils";
14
14
  import { generateId as generateIdImpl } from "@ai-sdk/provider-utils";
15
15
 
16
+ // core/telemetry/get-base-telemetry-attributes.ts
17
+ function getBaseTelemetryAttributes({
18
+ operationName,
19
+ model,
20
+ settings,
21
+ telemetry,
22
+ headers
23
+ }) {
24
+ var _a;
25
+ return {
26
+ "ai.model.provider": model.provider,
27
+ "ai.model.id": model.modelId,
28
+ // settings:
29
+ ...Object.entries(settings).reduce((attributes, [key, value]) => {
30
+ attributes[`ai.settings.${key}`] = value;
31
+ return attributes;
32
+ }, {}),
33
+ // special telemetry information
34
+ "operation.name": operationName,
35
+ "resource.name": telemetry == null ? void 0 : telemetry.functionId,
36
+ "ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
37
+ // add metadata as attributes:
38
+ ...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
39
+ (attributes, [key, value]) => {
40
+ attributes[`ai.telemetry.metadata.${key}`] = value;
41
+ return attributes;
42
+ },
43
+ {}
44
+ ),
45
+ // request headers
46
+ ...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
47
+ if (value !== void 0) {
48
+ attributes[`ai.request.headers.${key}`] = value;
49
+ }
50
+ return attributes;
51
+ }, {})
52
+ };
53
+ }
54
+
55
+ // core/telemetry/get-tracer.ts
56
+ import { trace } from "@opentelemetry/api";
57
+
58
+ // core/telemetry/noop-tracer.ts
59
+ var noopTracer = {
60
+ startSpan() {
61
+ return noopSpan;
62
+ },
63
+ startActiveSpan(name, arg1, arg2, arg3) {
64
+ if (typeof arg1 === "function") {
65
+ return arg1(noopSpan);
66
+ }
67
+ if (typeof arg2 === "function") {
68
+ return arg2(noopSpan);
69
+ }
70
+ if (typeof arg3 === "function") {
71
+ return arg3(noopSpan);
72
+ }
73
+ }
74
+ };
75
+ var noopSpan = {
76
+ spanContext() {
77
+ return noopSpanContext;
78
+ },
79
+ setAttribute() {
80
+ return this;
81
+ },
82
+ setAttributes() {
83
+ return this;
84
+ },
85
+ addEvent() {
86
+ return this;
87
+ },
88
+ addLink() {
89
+ return this;
90
+ },
91
+ addLinks() {
92
+ return this;
93
+ },
94
+ setStatus() {
95
+ return this;
96
+ },
97
+ updateName() {
98
+ return this;
99
+ },
100
+ end() {
101
+ return this;
102
+ },
103
+ isRecording() {
104
+ return false;
105
+ },
106
+ recordException() {
107
+ return this;
108
+ }
109
+ };
110
+ var noopSpanContext = {
111
+ traceId: "",
112
+ spanId: "",
113
+ traceFlags: 0
114
+ };
115
+
116
+ // core/telemetry/get-tracer.ts
117
+ var testTracer = void 0;
118
+ function getTracer({ isEnabled }) {
119
+ if (!isEnabled) {
120
+ return noopTracer;
121
+ }
122
+ if (testTracer) {
123
+ return testTracer;
124
+ }
125
+ return trace.getTracer("ai");
126
+ }
127
+
128
+ // core/telemetry/record-span.ts
129
+ import { SpanStatusCode } from "@opentelemetry/api";
130
+ function recordSpan({
131
+ name,
132
+ tracer,
133
+ attributes,
134
+ fn,
135
+ endWhenDone = true
136
+ }) {
137
+ return tracer.startActiveSpan(name, { attributes }, async (span) => {
138
+ try {
139
+ const result = await fn(span);
140
+ if (endWhenDone) {
141
+ span.end();
142
+ }
143
+ return result;
144
+ } catch (error) {
145
+ try {
146
+ if (error instanceof Error) {
147
+ span.recordException({
148
+ name: error.name,
149
+ message: error.message,
150
+ stack: error.stack
151
+ });
152
+ span.setStatus({
153
+ code: SpanStatusCode.ERROR,
154
+ message: error.message
155
+ });
156
+ } else {
157
+ span.setStatus({ code: SpanStatusCode.ERROR });
158
+ }
159
+ } finally {
160
+ span.end();
161
+ }
162
+ throw error;
163
+ }
164
+ });
165
+ }
166
+
16
167
  // core/util/retry-with-exponential-backoff.ts
17
168
  import { APICallError, RetryError } from "@ai-sdk/provider";
18
169
  import { getErrorMessage, isAbortError } from "@ai-sdk/provider-utils";
@@ -81,18 +232,69 @@ async function embed({
81
232
  value,
82
233
  maxRetries,
83
234
  abortSignal,
84
- headers
235
+ headers,
236
+ experimental_telemetry: telemetry
85
237
  }) {
86
238
  var _a;
87
- const retry = retryWithExponentialBackoff({ maxRetries });
88
- const modelResponse = await retry(
89
- () => model.doEmbed({ values: [value], abortSignal, headers })
90
- );
91
- return new DefaultEmbedResult({
92
- value,
93
- embedding: modelResponse.embeddings[0],
94
- usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN },
95
- rawResponse: modelResponse.rawResponse
239
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
240
+ operationName: "ai.embed",
241
+ model,
242
+ telemetry,
243
+ headers,
244
+ settings: { maxRetries }
245
+ });
246
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
247
+ return recordSpan({
248
+ name: "ai.embed",
249
+ attributes: {
250
+ ...baseTelemetryAttributes,
251
+ // specific settings that only make sense on the outer level:
252
+ "ai.value": JSON.stringify(value)
253
+ },
254
+ tracer,
255
+ fn: async (span) => {
256
+ const retry = retryWithExponentialBackoff({ maxRetries });
257
+ const { embedding, usage, rawResponse } = await retry(
258
+ () => (
259
+ // nested spans to align with the embedMany telemetry data:
260
+ recordSpan({
261
+ name: "ai.embed.doEmbed",
262
+ attributes: {
263
+ ...baseTelemetryAttributes,
264
+ // specific settings that only make sense on the outer level:
265
+ "ai.values": [JSON.stringify(value)]
266
+ },
267
+ tracer,
268
+ fn: async (doEmbedSpan) => {
269
+ var _a2;
270
+ const modelResponse = await model.doEmbed({
271
+ values: [value],
272
+ abortSignal,
273
+ headers
274
+ });
275
+ const embedding2 = modelResponse.embeddings[0];
276
+ const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
277
+ doEmbedSpan.setAttributes({
278
+ "ai.embeddings": modelResponse.embeddings.map(
279
+ (embedding3) => JSON.stringify(embedding3)
280
+ ),
281
+ "ai.usage.tokens": usage2.tokens
282
+ });
283
+ return {
284
+ embedding: embedding2,
285
+ usage: usage2,
286
+ rawResponse: modelResponse.rawResponse
287
+ };
288
+ }
289
+ })
290
+ )
291
+ );
292
+ span.setAttributes({
293
+ "ai.embedding": JSON.stringify(embedding),
294
+ "ai.usage.tokens": usage.tokens
295
+ });
296
+ return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
297
+ }
96
298
  });
97
299
  }
98
300
  var DefaultEmbedResult = class {
@@ -122,32 +324,112 @@ async function embedMany({
122
324
  values,
123
325
  maxRetries,
124
326
  abortSignal,
125
- headers
327
+ headers,
328
+ experimental_telemetry: telemetry
126
329
  }) {
127
- var _a, _b, _c;
128
- const retry = retryWithExponentialBackoff({ maxRetries });
129
- const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
130
- if (maxEmbeddingsPerCall == null) {
131
- const modelResponse = await retry(
132
- () => model.doEmbed({ values, abortSignal, headers })
133
- );
134
- return new DefaultEmbedManyResult({
135
- values,
136
- embeddings: modelResponse.embeddings,
137
- usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN }
138
- });
139
- }
140
- const valueChunks = splitArray(values, maxEmbeddingsPerCall);
141
- const embeddings = [];
142
- let tokens = 0;
143
- for (const chunk of valueChunks) {
144
- const modelResponse = await retry(
145
- () => model.doEmbed({ values: chunk, abortSignal, headers })
146
- );
147
- embeddings.push(...modelResponse.embeddings);
148
- tokens += (_c = (_b = modelResponse.usage) == null ? void 0 : _b.tokens) != null ? _c : NaN;
149
- }
150
- return new DefaultEmbedManyResult({ values, embeddings, usage: { tokens } });
330
+ var _a;
331
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
332
+ operationName: "ai.embedMany",
333
+ model,
334
+ telemetry,
335
+ headers,
336
+ settings: { maxRetries }
337
+ });
338
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
339
+ return recordSpan({
340
+ name: "ai.embedMany",
341
+ attributes: {
342
+ ...baseTelemetryAttributes,
343
+ // specific settings that only make sense on the outer level:
344
+ "ai.values": values.map((value) => JSON.stringify(value))
345
+ },
346
+ tracer,
347
+ fn: async (span) => {
348
+ const retry = retryWithExponentialBackoff({ maxRetries });
349
+ const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
350
+ if (maxEmbeddingsPerCall == null) {
351
+ const { embeddings: embeddings2, usage } = await retry(() => {
352
+ return recordSpan({
353
+ name: "ai.embedMany.doEmbed",
354
+ attributes: {
355
+ ...baseTelemetryAttributes,
356
+ // specific settings that only make sense on the outer level:
357
+ "ai.values": values.map((value) => JSON.stringify(value))
358
+ },
359
+ tracer,
360
+ fn: async (doEmbedSpan) => {
361
+ var _a2;
362
+ const modelResponse = await model.doEmbed({
363
+ values,
364
+ abortSignal,
365
+ headers
366
+ });
367
+ const embeddings3 = modelResponse.embeddings;
368
+ const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
369
+ doEmbedSpan.setAttributes({
370
+ "ai.embeddings": embeddings3.map(
371
+ (embedding) => JSON.stringify(embedding)
372
+ ),
373
+ "ai.usage.tokens": usage2.tokens
374
+ });
375
+ return { embeddings: embeddings3, usage: usage2 };
376
+ }
377
+ });
378
+ });
379
+ span.setAttributes({
380
+ "ai.embeddings": embeddings2.map(
381
+ (embedding) => JSON.stringify(embedding)
382
+ ),
383
+ "ai.usage.tokens": usage.tokens
384
+ });
385
+ return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
386
+ }
387
+ const valueChunks = splitArray(values, maxEmbeddingsPerCall);
388
+ const embeddings = [];
389
+ let tokens = 0;
390
+ for (const chunk of valueChunks) {
391
+ const { embeddings: responseEmbeddings, usage } = await retry(() => {
392
+ return recordSpan({
393
+ name: "ai.embedMany.doEmbed",
394
+ attributes: {
395
+ ...baseTelemetryAttributes,
396
+ // specific settings that only make sense on the outer level:
397
+ "ai.values": chunk.map((value) => JSON.stringify(value))
398
+ },
399
+ tracer,
400
+ fn: async (doEmbedSpan) => {
401
+ var _a2;
402
+ const modelResponse = await model.doEmbed({
403
+ values: chunk,
404
+ abortSignal,
405
+ headers
406
+ });
407
+ const embeddings2 = modelResponse.embeddings;
408
+ const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
409
+ doEmbedSpan.setAttributes({
410
+ "ai.embeddings": embeddings2.map(
411
+ (embedding) => JSON.stringify(embedding)
412
+ ),
413
+ "ai.usage.tokens": usage2.tokens
414
+ });
415
+ return { embeddings: embeddings2, usage: usage2 };
416
+ }
417
+ });
418
+ });
419
+ embeddings.push(...responseEmbeddings);
420
+ tokens += usage.tokens;
421
+ }
422
+ span.setAttributes({
423
+ "ai.embeddings": embeddings.map((embedding) => JSON.stringify(embedding)),
424
+ "ai.usage.tokens": tokens
425
+ });
426
+ return new DefaultEmbedManyResult({
427
+ values,
428
+ embeddings,
429
+ usage: { tokens }
430
+ });
431
+ }
432
+ });
151
433
  }
152
434
  var DefaultEmbedManyResult = class {
153
435
  constructor(options) {
@@ -161,6 +443,9 @@ var DefaultEmbedManyResult = class {
161
443
  import { NoObjectGeneratedError } from "@ai-sdk/provider";
162
444
  import { safeParseJSON } from "@ai-sdk/provider-utils";
163
445
 
446
+ // core/prompt/convert-to-language-model-prompt.ts
447
+ import { getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider-utils";
448
+
164
449
  // core/util/detect-image-mimetype.ts
165
450
  var mimeTypeSignatures = [
166
451
  { mimeType: "image/gif", bytes: [71, 73, 70] },
@@ -177,6 +462,35 @@ function detectImageMimeType(image) {
177
462
  return void 0;
178
463
  }
179
464
 
465
+ // core/util/download.ts
466
+ import { DownloadError } from "@ai-sdk/provider";
467
+ async function download({
468
+ url,
469
+ fetchImplementation = fetch
470
+ }) {
471
+ var _a;
472
+ const urlText = url.toString();
473
+ try {
474
+ const response = await fetchImplementation(urlText);
475
+ if (!response.ok) {
476
+ throw new DownloadError({
477
+ url: urlText,
478
+ statusCode: response.status,
479
+ statusText: response.statusText
480
+ });
481
+ }
482
+ return {
483
+ data: new Uint8Array(await response.arrayBuffer()),
484
+ mimeType: (_a = response.headers.get("content-type")) != null ? _a : void 0
485
+ };
486
+ } catch (error) {
487
+ if (DownloadError.isDownloadError(error)) {
488
+ throw error;
489
+ }
490
+ throw new DownloadError({ url: urlText, cause: error });
491
+ }
492
+ }
493
+
180
494
  // core/prompt/data-content.ts
181
495
  import { InvalidDataContentError } from "@ai-sdk/provider";
182
496
  import {
@@ -244,12 +558,16 @@ var InvalidMessageRoleError = class extends Error {
244
558
  };
245
559
 
246
560
  // core/prompt/convert-to-language-model-prompt.ts
247
- import { getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider-utils";
248
- function convertToLanguageModelPrompt(prompt) {
561
+ async function convertToLanguageModelPrompt({
562
+ prompt,
563
+ modelSupportsImageUrls = true,
564
+ downloadImplementation = download
565
+ }) {
249
566
  const languageModelMessages = [];
250
567
  if (prompt.system != null) {
251
568
  languageModelMessages.push({ role: "system", content: prompt.system });
252
569
  }
570
+ const downloadedImages = modelSupportsImageUrls || prompt.messages == null ? null : await downloadImages(prompt.messages, downloadImplementation);
253
571
  const promptType = prompt.type;
254
572
  switch (promptType) {
255
573
  case "prompt": {
@@ -261,7 +579,9 @@ function convertToLanguageModelPrompt(prompt) {
261
579
  }
262
580
  case "messages": {
263
581
  languageModelMessages.push(
264
- ...prompt.messages.map(convertToLanguageModelMessage)
582
+ ...prompt.messages.map(
583
+ (message) => convertToLanguageModelMessage(message, downloadedImages)
584
+ )
265
585
  );
266
586
  break;
267
587
  }
@@ -272,7 +592,7 @@ function convertToLanguageModelPrompt(prompt) {
272
592
  }
273
593
  return languageModelMessages;
274
594
  }
275
- function convertToLanguageModelMessage(message) {
595
+ function convertToLanguageModelMessage(message, downloadedImages) {
276
596
  const role = message.role;
277
597
  switch (role) {
278
598
  case "system": {
@@ -289,18 +609,27 @@ function convertToLanguageModelMessage(message) {
289
609
  role: "user",
290
610
  content: message.content.map(
291
611
  (part) => {
292
- var _a;
612
+ var _a, _b, _c;
293
613
  switch (part.type) {
294
614
  case "text": {
295
615
  return part;
296
616
  }
297
617
  case "image": {
298
618
  if (part.image instanceof URL) {
299
- return {
300
- type: "image",
301
- image: part.image,
302
- mimeType: part.mimeType
303
- };
619
+ if (downloadedImages == null) {
620
+ return {
621
+ type: "image",
622
+ image: part.image,
623
+ mimeType: part.mimeType
624
+ };
625
+ } else {
626
+ const downloadedImage = downloadedImages[part.image.toString()];
627
+ return {
628
+ type: "image",
629
+ image: downloadedImage.data,
630
+ mimeType: (_a = part.mimeType) != null ? _a : downloadedImage.mimeType
631
+ };
632
+ }
304
633
  }
305
634
  if (typeof part.image === "string") {
306
635
  try {
@@ -308,11 +637,20 @@ function convertToLanguageModelMessage(message) {
308
637
  switch (url.protocol) {
309
638
  case "http:":
310
639
  case "https:": {
311
- return {
312
- type: "image",
313
- image: url,
314
- mimeType: part.mimeType
315
- };
640
+ if (downloadedImages == null) {
641
+ return {
642
+ type: "image",
643
+ image: url,
644
+ mimeType: part.mimeType
645
+ };
646
+ } else {
647
+ const downloadedImage = downloadedImages[part.image];
648
+ return {
649
+ type: "image",
650
+ image: downloadedImage.data,
651
+ mimeType: (_b = part.mimeType) != null ? _b : downloadedImage.mimeType
652
+ };
653
+ }
316
654
  }
317
655
  case "data:": {
318
656
  try {
@@ -347,7 +685,7 @@ function convertToLanguageModelMessage(message) {
347
685
  return {
348
686
  type: "image",
349
687
  image: imageUint8,
350
- mimeType: (_a = part.mimeType) != null ? _a : detectImageMimeType(imageUint8)
688
+ mimeType: (_c = part.mimeType) != null ? _c : detectImageMimeType(imageUint8)
351
689
  };
352
690
  }
353
691
  }
@@ -379,6 +717,25 @@ function convertToLanguageModelMessage(message) {
379
717
  }
380
718
  }
381
719
  }
720
+ async function downloadImages(messages, downloadImplementation) {
721
+ const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
722
+ (content) => Array.isArray(content)
723
+ ).flat().filter((part) => part.type === "image").map((part) => part.image).map(
724
+ (part) => (
725
+ // support string urls in image parts:
726
+ typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
727
+ )
728
+ ).filter((image) => image instanceof URL);
729
+ const downloadedImages = await Promise.all(
730
+ urls.map(async (url) => ({
731
+ url,
732
+ data: await downloadImplementation({ url })
733
+ }))
734
+ );
735
+ return Object.fromEntries(
736
+ downloadedImages.map(({ url, data }) => [url.toString(), data])
737
+ );
738
+ }
382
739
 
383
740
  // core/prompt/get-validated-prompt.ts
384
741
  import { InvalidPromptError } from "@ai-sdk/provider";
@@ -467,208 +824,57 @@ function prepareCallSettings({
467
824
  }
468
825
  if (presencePenalty != null) {
469
826
  if (typeof presencePenalty !== "number") {
470
- throw new InvalidArgumentError({
471
- parameter: "presencePenalty",
472
- value: presencePenalty,
473
- message: "presencePenalty must be a number"
474
- });
475
- }
476
- }
477
- if (frequencyPenalty != null) {
478
- if (typeof frequencyPenalty !== "number") {
479
- throw new InvalidArgumentError({
480
- parameter: "frequencyPenalty",
481
- value: frequencyPenalty,
482
- message: "frequencyPenalty must be a number"
483
- });
484
- }
485
- }
486
- if (seed != null) {
487
- if (!Number.isInteger(seed)) {
488
- throw new InvalidArgumentError({
489
- parameter: "seed",
490
- value: seed,
491
- message: "seed must be an integer"
492
- });
493
- }
494
- }
495
- if (maxRetries != null) {
496
- if (!Number.isInteger(maxRetries)) {
497
- throw new InvalidArgumentError({
498
- parameter: "maxRetries",
499
- value: maxRetries,
500
- message: "maxRetries must be an integer"
501
- });
502
- }
503
- if (maxRetries < 0) {
504
- throw new InvalidArgumentError({
505
- parameter: "maxRetries",
506
- value: maxRetries,
507
- message: "maxRetries must be >= 0"
508
- });
509
- }
510
- }
511
- return {
512
- maxTokens,
513
- temperature: temperature != null ? temperature : 0,
514
- topP,
515
- presencePenalty,
516
- frequencyPenalty,
517
- stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
518
- seed,
519
- maxRetries: maxRetries != null ? maxRetries : 2
520
- };
521
- }
522
-
523
- // core/telemetry/get-base-telemetry-attributes.ts
524
- function getBaseTelemetryAttributes({
525
- operationName,
526
- model,
527
- settings,
528
- telemetry,
529
- headers
530
- }) {
531
- var _a;
532
- return {
533
- "ai.model.provider": model.provider,
534
- "ai.model.id": model.modelId,
535
- // settings:
536
- ...Object.entries(settings).reduce((attributes, [key, value]) => {
537
- attributes[`ai.settings.${key}`] = value;
538
- return attributes;
539
- }, {}),
540
- // special telemetry information
541
- "operation.name": operationName,
542
- "resource.name": telemetry == null ? void 0 : telemetry.functionId,
543
- "ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
544
- // add metadata as attributes:
545
- ...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
546
- (attributes, [key, value]) => {
547
- attributes[`ai.telemetry.metadata.${key}`] = value;
548
- return attributes;
549
- },
550
- {}
551
- ),
552
- // request headers
553
- ...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
554
- if (value !== void 0) {
555
- attributes[`ai.request.headers.${key}`] = value;
556
- }
557
- return attributes;
558
- }, {})
559
- };
560
- }
561
-
562
- // core/telemetry/get-tracer.ts
563
- import { trace } from "@opentelemetry/api";
564
-
565
- // core/telemetry/noop-tracer.ts
566
- var noopTracer = {
567
- startSpan() {
568
- return noopSpan;
569
- },
570
- startActiveSpan(name, arg1, arg2, arg3) {
571
- if (typeof arg1 === "function") {
572
- return arg1(noopSpan);
573
- }
574
- if (typeof arg2 === "function") {
575
- return arg2(noopSpan);
576
- }
577
- if (typeof arg3 === "function") {
578
- return arg3(noopSpan);
579
- }
580
- }
581
- };
582
- var noopSpan = {
583
- spanContext() {
584
- return noopSpanContext;
585
- },
586
- setAttribute() {
587
- return this;
588
- },
589
- setAttributes() {
590
- return this;
591
- },
592
- addEvent() {
593
- return this;
594
- },
595
- addLink() {
596
- return this;
597
- },
598
- addLinks() {
599
- return this;
600
- },
601
- setStatus() {
602
- return this;
603
- },
604
- updateName() {
605
- return this;
606
- },
607
- end() {
608
- return this;
609
- },
610
- isRecording() {
611
- return false;
612
- },
613
- recordException() {
614
- return this;
615
- }
616
- };
617
- var noopSpanContext = {
618
- traceId: "",
619
- spanId: "",
620
- traceFlags: 0
621
- };
622
-
623
- // core/telemetry/get-tracer.ts
624
- var testTracer = void 0;
625
- function getTracer({ isEnabled }) {
626
- if (!isEnabled) {
627
- return noopTracer;
827
+ throw new InvalidArgumentError({
828
+ parameter: "presencePenalty",
829
+ value: presencePenalty,
830
+ message: "presencePenalty must be a number"
831
+ });
832
+ }
628
833
  }
629
- if (testTracer) {
630
- return testTracer;
834
+ if (frequencyPenalty != null) {
835
+ if (typeof frequencyPenalty !== "number") {
836
+ throw new InvalidArgumentError({
837
+ parameter: "frequencyPenalty",
838
+ value: frequencyPenalty,
839
+ message: "frequencyPenalty must be a number"
840
+ });
841
+ }
631
842
  }
632
- return trace.getTracer("ai");
633
- }
634
-
635
- // core/telemetry/record-span.ts
636
- import { SpanStatusCode } from "@opentelemetry/api";
637
- function recordSpan({
638
- name,
639
- tracer,
640
- attributes,
641
- fn,
642
- endWhenDone = true
643
- }) {
644
- return tracer.startActiveSpan(name, { attributes }, async (span) => {
645
- try {
646
- const result = await fn(span);
647
- if (endWhenDone) {
648
- span.end();
649
- }
650
- return result;
651
- } catch (error) {
652
- try {
653
- if (error instanceof Error) {
654
- span.recordException({
655
- name: error.name,
656
- message: error.message,
657
- stack: error.stack
658
- });
659
- span.setStatus({
660
- code: SpanStatusCode.ERROR,
661
- message: error.message
662
- });
663
- } else {
664
- span.setStatus({ code: SpanStatusCode.ERROR });
665
- }
666
- } finally {
667
- span.end();
668
- }
669
- throw error;
843
+ if (seed != null) {
844
+ if (!Number.isInteger(seed)) {
845
+ throw new InvalidArgumentError({
846
+ parameter: "seed",
847
+ value: seed,
848
+ message: "seed must be an integer"
849
+ });
670
850
  }
671
- });
851
+ }
852
+ if (maxRetries != null) {
853
+ if (!Number.isInteger(maxRetries)) {
854
+ throw new InvalidArgumentError({
855
+ parameter: "maxRetries",
856
+ value: maxRetries,
857
+ message: "maxRetries must be an integer"
858
+ });
859
+ }
860
+ if (maxRetries < 0) {
861
+ throw new InvalidArgumentError({
862
+ parameter: "maxRetries",
863
+ value: maxRetries,
864
+ message: "maxRetries must be >= 0"
865
+ });
866
+ }
867
+ }
868
+ return {
869
+ maxTokens,
870
+ temperature: temperature != null ? temperature : 0,
871
+ topP,
872
+ presencePenalty,
873
+ frequencyPenalty,
874
+ stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
875
+ seed,
876
+ maxRetries: maxRetries != null ? maxRetries : 2
877
+ };
672
878
  }
673
879
 
674
880
  // core/types/token-usage.ts
@@ -680,12 +886,6 @@ function calculateCompletionTokenUsage(usage) {
680
886
  };
681
887
  }
682
888
 
683
- // core/util/convert-zod-to-json-schema.ts
684
- import zodToJsonSchema from "zod-to-json-schema";
685
- function convertZodToJSONSchema(zodSchema) {
686
- return zodToJsonSchema(zodSchema);
687
- }
688
-
689
889
  // core/util/prepare-response-headers.ts
690
890
  function prepareResponseHeaders(init, { contentType }) {
691
891
  var _a;
@@ -696,6 +896,41 @@ function prepareResponseHeaders(init, { contentType }) {
696
896
  return headers;
697
897
  }
698
898
 
899
+ // core/util/schema.ts
900
+ import { validatorSymbol } from "@ai-sdk/provider-utils";
901
+ import zodToJsonSchema from "zod-to-json-schema";
902
+ var schemaSymbol = Symbol("vercel.ai.schema");
903
+ function jsonSchema(jsonSchema2, {
904
+ validate
905
+ } = {}) {
906
+ return {
907
+ [schemaSymbol]: true,
908
+ _type: void 0,
909
+ // should never be used directly
910
+ [validatorSymbol]: true,
911
+ jsonSchema: jsonSchema2,
912
+ validate
913
+ };
914
+ }
915
+ function isSchema(value) {
916
+ return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
917
+ }
918
+ function asSchema(schema) {
919
+ return isSchema(schema) ? schema : zodSchema(schema);
920
+ }
921
+ function zodSchema(zodSchema2) {
922
+ return jsonSchema(
923
+ // we assume that zodToJsonSchema will return a valid JSONSchema7:
924
+ zodToJsonSchema(zodSchema2),
925
+ {
926
+ validate: (value) => {
927
+ const result = zodSchema2.safeParse(value);
928
+ return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
929
+ }
930
+ }
931
+ );
932
+ }
933
+
699
934
  // core/generate-object/inject-json-schema-into-system.ts
700
935
  var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
701
936
  var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
@@ -718,7 +953,7 @@ function injectJsonSchemaIntoSystem({
718
953
  // core/generate-object/generate-object.ts
719
954
  async function generateObject({
720
955
  model,
721
- schema,
956
+ schema: inputSchema,
722
957
  mode,
723
958
  system,
724
959
  prompt,
@@ -737,7 +972,7 @@ async function generateObject({
737
972
  headers,
738
973
  settings: { ...settings, maxRetries }
739
974
  });
740
- const jsonSchema = convertZodToJSONSchema(schema);
975
+ const schema = asSchema(inputSchema);
741
976
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
742
977
  return recordSpan({
743
978
  name: "ai.generateObject",
@@ -745,7 +980,7 @@ async function generateObject({
745
980
  ...baseTelemetryAttributes,
746
981
  // specific settings that only make sense on the outer level:
747
982
  "ai.prompt": JSON.stringify({ system, prompt, messages }),
748
- "ai.settings.jsonSchema": JSON.stringify(jsonSchema),
983
+ "ai.schema": JSON.stringify(schema.jsonSchema),
749
984
  "ai.settings.mode": mode
750
985
  },
751
986
  tracer,
@@ -764,11 +999,17 @@ async function generateObject({
764
999
  switch (mode) {
765
1000
  case "json": {
766
1001
  const validatedPrompt = getValidatedPrompt({
767
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1002
+ system: injectJsonSchemaIntoSystem({
1003
+ system,
1004
+ schema: schema.jsonSchema
1005
+ }),
768
1006
  prompt,
769
1007
  messages
770
1008
  });
771
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1009
+ const promptMessages = await convertToLanguageModelPrompt({
1010
+ prompt: validatedPrompt,
1011
+ modelSupportsImageUrls: model.supportsImageUrls
1012
+ });
772
1013
  const inputFormat = validatedPrompt.type;
773
1014
  const generateResult = await retry(
774
1015
  () => recordSpan({
@@ -816,7 +1057,10 @@ async function generateObject({
816
1057
  prompt,
817
1058
  messages
818
1059
  });
819
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1060
+ const promptMessages = await convertToLanguageModelPrompt({
1061
+ prompt: validatedPrompt,
1062
+ modelSupportsImageUrls: model.supportsImageUrls
1063
+ });
820
1064
  const inputFormat = validatedPrompt.type;
821
1065
  const generateResult = await retry(
822
1066
  () => recordSpan({
@@ -836,7 +1080,7 @@ async function generateObject({
836
1080
  type: "function",
837
1081
  name: "json",
838
1082
  description: "Respond with a JSON object.",
839
- parameters: jsonSchema
1083
+ parameters: schema.jsonSchema
840
1084
  }
841
1085
  },
842
1086
  ...prepareCallSettings(settings),
@@ -985,7 +1229,7 @@ var DelayedPromise = class {
985
1229
  // core/generate-object/stream-object.ts
986
1230
  async function streamObject({
987
1231
  model,
988
- schema,
1232
+ schema: inputSchema,
989
1233
  mode,
990
1234
  system,
991
1235
  prompt,
@@ -993,98 +1237,156 @@ async function streamObject({
993
1237
  maxRetries,
994
1238
  abortSignal,
995
1239
  headers,
1240
+ experimental_telemetry: telemetry,
996
1241
  onFinish,
997
1242
  ...settings
998
1243
  }) {
1244
+ var _a;
1245
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
1246
+ operationName: "ai.streamObject",
1247
+ model,
1248
+ telemetry,
1249
+ headers,
1250
+ settings: { ...settings, maxRetries }
1251
+ });
1252
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
999
1253
  const retry = retryWithExponentialBackoff({ maxRetries });
1000
- const jsonSchema = convertZodToJSONSchema(schema);
1001
- if (mode === "auto" || mode == null) {
1002
- mode = model.defaultObjectGenerationMode;
1003
- }
1004
- let callOptions;
1005
- let transformer;
1006
- switch (mode) {
1007
- case "json": {
1008
- const validatedPrompt = getValidatedPrompt({
1009
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1010
- prompt,
1011
- messages
1012
- });
1013
- callOptions = {
1014
- mode: { type: "object-json" },
1015
- ...prepareCallSettings(settings),
1016
- inputFormat: validatedPrompt.type,
1017
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1018
- abortSignal,
1019
- headers
1020
- };
1021
- transformer = {
1022
- transform: (chunk, controller) => {
1023
- switch (chunk.type) {
1024
- case "text-delta":
1025
- controller.enqueue(chunk.textDelta);
1026
- break;
1027
- case "finish":
1028
- case "error":
1029
- controller.enqueue(chunk);
1030
- break;
1031
- }
1254
+ const schema = asSchema(inputSchema);
1255
+ return recordSpan({
1256
+ name: "ai.streamObject",
1257
+ attributes: {
1258
+ ...baseTelemetryAttributes,
1259
+ // specific settings that only make sense on the outer level:
1260
+ "ai.prompt": JSON.stringify({ system, prompt, messages }),
1261
+ "ai.schema": JSON.stringify(schema.jsonSchema),
1262
+ "ai.settings.mode": mode
1263
+ },
1264
+ tracer,
1265
+ endWhenDone: false,
1266
+ fn: async (rootSpan) => {
1267
+ if (mode === "auto" || mode == null) {
1268
+ mode = model.defaultObjectGenerationMode;
1269
+ }
1270
+ let callOptions;
1271
+ let transformer;
1272
+ switch (mode) {
1273
+ case "json": {
1274
+ const validatedPrompt = getValidatedPrompt({
1275
+ system: injectJsonSchemaIntoSystem({
1276
+ system,
1277
+ schema: schema.jsonSchema
1278
+ }),
1279
+ prompt,
1280
+ messages
1281
+ });
1282
+ callOptions = {
1283
+ mode: { type: "object-json" },
1284
+ ...prepareCallSettings(settings),
1285
+ inputFormat: validatedPrompt.type,
1286
+ prompt: await convertToLanguageModelPrompt({
1287
+ prompt: validatedPrompt,
1288
+ modelSupportsImageUrls: model.supportsImageUrls
1289
+ }),
1290
+ abortSignal,
1291
+ headers
1292
+ };
1293
+ transformer = {
1294
+ transform: (chunk, controller) => {
1295
+ switch (chunk.type) {
1296
+ case "text-delta":
1297
+ controller.enqueue(chunk.textDelta);
1298
+ break;
1299
+ case "finish":
1300
+ case "error":
1301
+ controller.enqueue(chunk);
1302
+ break;
1303
+ }
1304
+ }
1305
+ };
1306
+ break;
1032
1307
  }
1033
- };
1034
- break;
1035
- }
1036
- case "tool": {
1037
- const validatedPrompt = getValidatedPrompt({
1038
- system,
1039
- prompt,
1040
- messages
1041
- });
1042
- callOptions = {
1043
- mode: {
1044
- type: "object-tool",
1045
- tool: {
1046
- type: "function",
1047
- name: "json",
1048
- description: "Respond with a JSON object.",
1049
- parameters: jsonSchema
1050
- }
1051
- },
1052
- ...prepareCallSettings(settings),
1053
- inputFormat: validatedPrompt.type,
1054
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1055
- abortSignal,
1056
- headers
1057
- };
1058
- transformer = {
1059
- transform(chunk, controller) {
1060
- switch (chunk.type) {
1061
- case "tool-call-delta":
1062
- controller.enqueue(chunk.argsTextDelta);
1063
- break;
1064
- case "finish":
1065
- case "error":
1066
- controller.enqueue(chunk);
1067
- break;
1068
- }
1308
+ case "tool": {
1309
+ const validatedPrompt = getValidatedPrompt({
1310
+ system,
1311
+ prompt,
1312
+ messages
1313
+ });
1314
+ callOptions = {
1315
+ mode: {
1316
+ type: "object-tool",
1317
+ tool: {
1318
+ type: "function",
1319
+ name: "json",
1320
+ description: "Respond with a JSON object.",
1321
+ parameters: schema.jsonSchema
1322
+ }
1323
+ },
1324
+ ...prepareCallSettings(settings),
1325
+ inputFormat: validatedPrompt.type,
1326
+ prompt: await convertToLanguageModelPrompt({
1327
+ prompt: validatedPrompt,
1328
+ modelSupportsImageUrls: model.supportsImageUrls
1329
+ }),
1330
+ abortSignal,
1331
+ headers
1332
+ };
1333
+ transformer = {
1334
+ transform(chunk, controller) {
1335
+ switch (chunk.type) {
1336
+ case "tool-call-delta":
1337
+ controller.enqueue(chunk.argsTextDelta);
1338
+ break;
1339
+ case "finish":
1340
+ case "error":
1341
+ controller.enqueue(chunk);
1342
+ break;
1343
+ }
1344
+ }
1345
+ };
1346
+ break;
1069
1347
  }
1070
- };
1071
- break;
1072
- }
1073
- case void 0: {
1074
- throw new Error("Model does not have a default object generation mode.");
1075
- }
1076
- default: {
1077
- const _exhaustiveCheck = mode;
1078
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
1348
+ case void 0: {
1349
+ throw new Error(
1350
+ "Model does not have a default object generation mode."
1351
+ );
1352
+ }
1353
+ default: {
1354
+ const _exhaustiveCheck = mode;
1355
+ throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
1356
+ }
1357
+ }
1358
+ const {
1359
+ result: { stream, warnings, rawResponse },
1360
+ doStreamSpan
1361
+ } = await retry(
1362
+ () => recordSpan({
1363
+ name: "ai.streamObject.doStream",
1364
+ attributes: {
1365
+ ...baseTelemetryAttributes,
1366
+ "ai.prompt.format": callOptions.inputFormat,
1367
+ "ai.prompt.messages": JSON.stringify(callOptions.prompt),
1368
+ "ai.settings.mode": mode
1369
+ },
1370
+ tracer,
1371
+ endWhenDone: false,
1372
+ fn: async (doStreamSpan2) => {
1373
+ return {
1374
+ result: await model.doStream(callOptions),
1375
+ doStreamSpan: doStreamSpan2
1376
+ };
1377
+ }
1378
+ })
1379
+ );
1380
+ return new DefaultStreamObjectResult({
1381
+ stream: stream.pipeThrough(new TransformStream(transformer)),
1382
+ warnings,
1383
+ rawResponse,
1384
+ schema,
1385
+ onFinish,
1386
+ rootSpan,
1387
+ doStreamSpan
1388
+ });
1079
1389
  }
1080
- }
1081
- const result = await retry(() => model.doStream(callOptions));
1082
- return new DefaultStreamObjectResult({
1083
- stream: result.stream.pipeThrough(new TransformStream(transformer)),
1084
- warnings: result.warnings,
1085
- rawResponse: result.rawResponse,
1086
- schema,
1087
- onFinish
1088
1390
  });
1089
1391
  }
1090
1392
  var DefaultStreamObjectResult = class {
@@ -1093,7 +1395,9 @@ var DefaultStreamObjectResult = class {
1093
1395
  warnings,
1094
1396
  rawResponse,
1095
1397
  schema,
1096
- onFinish
1398
+ onFinish,
1399
+ rootSpan,
1400
+ doStreamSpan
1097
1401
  }) {
1098
1402
  this.warnings = warnings;
1099
1403
  this.rawResponse = rawResponse;
@@ -1108,10 +1412,15 @@ var DefaultStreamObjectResult = class {
1108
1412
  let accumulatedText = "";
1109
1413
  let delta = "";
1110
1414
  let latestObject = void 0;
1415
+ let firstChunk = true;
1111
1416
  const self = this;
1112
1417
  this.originalStream = stream.pipeThrough(
1113
1418
  new TransformStream({
1114
1419
  async transform(chunk, controller) {
1420
+ if (firstChunk) {
1421
+ firstChunk = false;
1422
+ doStreamSpan.addEvent("ai.stream.firstChunk");
1423
+ }
1115
1424
  if (typeof chunk === "string") {
1116
1425
  accumulatedText += chunk;
1117
1426
  delta += chunk;
@@ -1165,12 +1474,24 @@ var DefaultStreamObjectResult = class {
1165
1474
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
1166
1475
  async flush(controller) {
1167
1476
  try {
1477
+ const finalUsage = usage != null ? usage : {
1478
+ promptTokens: NaN,
1479
+ completionTokens: NaN,
1480
+ totalTokens: NaN
1481
+ };
1482
+ doStreamSpan.setAttributes({
1483
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1484
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1485
+ "ai.result.object": JSON.stringify(object)
1486
+ });
1487
+ doStreamSpan.end();
1488
+ rootSpan.setAttributes({
1489
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1490
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1491
+ "ai.result.object": JSON.stringify(object)
1492
+ });
1168
1493
  await (onFinish == null ? void 0 : onFinish({
1169
- usage: usage != null ? usage : {
1170
- promptTokens: NaN,
1171
- completionTokens: NaN,
1172
- totalTokens: NaN
1173
- },
1494
+ usage: finalUsage,
1174
1495
  object,
1175
1496
  error,
1176
1497
  rawResponse,
@@ -1178,6 +1499,8 @@ var DefaultStreamObjectResult = class {
1178
1499
  }));
1179
1500
  } catch (error2) {
1180
1501
  controller.error(error2);
1502
+ } finally {
1503
+ rootSpan.end();
1181
1504
  }
1182
1505
  }
1183
1506
  })
@@ -1291,7 +1614,7 @@ function prepareToolsAndToolChoice({
1291
1614
  type: "function",
1292
1615
  name,
1293
1616
  description: tool2.description,
1294
- parameters: convertZodToJSONSchema(tool2.parameters)
1617
+ parameters: asSchema(tool2.parameters).jsonSchema
1295
1618
  })),
1296
1619
  toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
1297
1620
  };
@@ -1320,7 +1643,7 @@ function parseToolCall({
1320
1643
  }
1321
1644
  const parseResult = safeParseJSON2({
1322
1645
  text: toolCall.args,
1323
- schema: tool2.parameters
1646
+ schema: asSchema(tool2.parameters)
1324
1647
  });
1325
1648
  if (parseResult.success === false) {
1326
1649
  throw new InvalidToolArgumentsError({
@@ -1384,7 +1707,10 @@ async function generateText({
1384
1707
  ...prepareToolsAndToolChoice({ tools, toolChoice })
1385
1708
  };
1386
1709
  const callSettings = prepareCallSettings(settings);
1387
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1710
+ const promptMessages = await convertToLanguageModelPrompt({
1711
+ prompt: validatedPrompt,
1712
+ modelSupportsImageUrls: model.supportsImageUrls
1713
+ });
1388
1714
  let currentModelResponse;
1389
1715
  let currentToolCalls = [];
1390
1716
  let currentToolResults = [];
@@ -1457,7 +1783,9 @@ async function generateText({
1457
1783
  });
1458
1784
  responseMessages.push(...newResponseMessages);
1459
1785
  promptMessages.push(
1460
- ...newResponseMessages.map(convertToLanguageModelMessage)
1786
+ ...newResponseMessages.map(
1787
+ (message) => convertToLanguageModelMessage(message, null)
1788
+ )
1461
1789
  );
1462
1790
  } while (
1463
1791
  // there are tool calls:
@@ -1867,7 +2195,10 @@ async function streamText({
1867
2195
  fn: async (rootSpan) => {
1868
2196
  const retry = retryWithExponentialBackoff({ maxRetries });
1869
2197
  const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1870
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
2198
+ const promptMessages = await convertToLanguageModelPrompt({
2199
+ prompt: validatedPrompt,
2200
+ modelSupportsImageUrls: model.supportsImageUrls
2201
+ });
1871
2202
  const {
1872
2203
  result: { stream, warnings, rawResponse },
1873
2204
  doStreamSpan
@@ -3623,6 +3954,7 @@ export {
3623
3954
  generateId2 as generateId,
3624
3955
  generateObject,
3625
3956
  generateText,
3957
+ jsonSchema,
3626
3958
  nanoid,
3627
3959
  parseComplexResponse,
3628
3960
  parseStreamPart,