ai 4.0.12 → 4.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  var __defProp = Object.defineProperty;
2
2
  var __export = (target, all) => {
3
- for (var name12 in all)
4
- __defProp(target, name12, { get: all[name12], enumerable: true });
3
+ for (var name13 in all)
4
+ __defProp(target, name13, { get: all[name13], enumerable: true });
5
5
  };
6
6
 
7
7
  // streams/index.ts
@@ -347,7 +347,7 @@ function getBaseTelemetryAttributes({
347
347
  telemetry,
348
348
  headers
349
349
  }) {
350
- var _a12;
350
+ var _a13;
351
351
  return {
352
352
  "ai.model.provider": model.provider,
353
353
  "ai.model.id": model.modelId,
@@ -357,7 +357,7 @@ function getBaseTelemetryAttributes({
357
357
  return attributes;
358
358
  }, {}),
359
359
  // add metadata as attributes:
360
- ...Object.entries((_a12 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a12 : {}).reduce(
360
+ ...Object.entries((_a13 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a13 : {}).reduce(
361
361
  (attributes, [key, value]) => {
362
362
  attributes[`ai.telemetry.metadata.${key}`] = value;
363
363
  return attributes;
@@ -382,7 +382,7 @@ var noopTracer = {
382
382
  startSpan() {
383
383
  return noopSpan;
384
384
  },
385
- startActiveSpan(name12, arg1, arg2, arg3) {
385
+ startActiveSpan(name13, arg1, arg2, arg3) {
386
386
  if (typeof arg1 === "function") {
387
387
  return arg1(noopSpan);
388
388
  }
@@ -452,13 +452,13 @@ function getTracer({
452
452
  // core/telemetry/record-span.ts
453
453
  import { SpanStatusCode } from "@opentelemetry/api";
454
454
  function recordSpan({
455
- name: name12,
455
+ name: name13,
456
456
  tracer,
457
457
  attributes,
458
458
  fn,
459
459
  endWhenDone = true
460
460
  }) {
461
- return tracer.startActiveSpan(name12, { attributes }, async (span) => {
461
+ return tracer.startActiveSpan(name13, { attributes }, async (span) => {
462
462
  try {
463
463
  const result = await fn(span);
464
464
  if (endWhenDone) {
@@ -566,14 +566,14 @@ async function embed({
566
566
  }),
567
567
  tracer,
568
568
  fn: async (doEmbedSpan) => {
569
- var _a12;
569
+ var _a13;
570
570
  const modelResponse = await model.doEmbed({
571
571
  values: [value],
572
572
  abortSignal,
573
573
  headers
574
574
  });
575
575
  const embedding2 = modelResponse.embeddings[0];
576
- const usage2 = (_a12 = modelResponse.usage) != null ? _a12 : { tokens: NaN };
576
+ const usage2 = (_a13 = modelResponse.usage) != null ? _a13 : { tokens: NaN };
577
577
  doEmbedSpan.setAttributes(
578
578
  selectTelemetryAttributes({
579
579
  telemetry,
@@ -683,14 +683,14 @@ async function embedMany({
683
683
  }),
684
684
  tracer,
685
685
  fn: async (doEmbedSpan) => {
686
- var _a12;
686
+ var _a13;
687
687
  const modelResponse = await model.doEmbed({
688
688
  values,
689
689
  abortSignal,
690
690
  headers
691
691
  });
692
692
  const embeddings3 = modelResponse.embeddings;
693
- const usage2 = (_a12 = modelResponse.usage) != null ? _a12 : { tokens: NaN };
693
+ const usage2 = (_a13 = modelResponse.usage) != null ? _a13 : { tokens: NaN };
694
694
  doEmbedSpan.setAttributes(
695
695
  selectTelemetryAttributes({
696
696
  telemetry,
@@ -742,14 +742,14 @@ async function embedMany({
742
742
  }),
743
743
  tracer,
744
744
  fn: async (doEmbedSpan) => {
745
- var _a12;
745
+ var _a13;
746
746
  const modelResponse = await model.doEmbed({
747
747
  values: chunk,
748
748
  abortSignal,
749
749
  headers
750
750
  });
751
751
  const embeddings2 = modelResponse.embeddings;
752
- const usage2 = (_a12 = modelResponse.usage) != null ? _a12 : { tokens: NaN };
752
+ const usage2 = (_a13 = modelResponse.usage) != null ? _a13 : { tokens: NaN };
753
753
  doEmbedSpan.setAttributes(
754
754
  selectTelemetryAttributes({
755
755
  telemetry,
@@ -795,6 +795,45 @@ var DefaultEmbedManyResult = class {
795
795
  }
796
796
  };
797
797
 
798
+ // core/generate-image/generate-image.ts
799
+ import { convertBase64ToUint8Array } from "@ai-sdk/provider-utils";
800
+ async function generateImage({
801
+ model,
802
+ prompt,
803
+ n,
804
+ size,
805
+ providerOptions,
806
+ maxRetries: maxRetriesArg,
807
+ abortSignal,
808
+ headers
809
+ }) {
810
+ const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
811
+ const { images } = await retry(
812
+ () => model.doGenerate({
813
+ prompt,
814
+ n: n != null ? n : 1,
815
+ abortSignal,
816
+ headers,
817
+ size,
818
+ providerOptions: providerOptions != null ? providerOptions : {}
819
+ })
820
+ );
821
+ return new DefaultGenerateImageResult({ base64Images: images });
822
+ }
823
+ var DefaultGenerateImageResult = class {
824
+ constructor(options) {
825
+ this.images = options.base64Images.map((base64) => ({
826
+ base64,
827
+ get uint8Array() {
828
+ return convertBase64ToUint8Array(this.base64);
829
+ }
830
+ }));
831
+ }
832
+ get image() {
833
+ return this.images[0];
834
+ }
835
+ };
836
+
798
837
  // core/generate-object/generate-object.ts
799
838
  import { createIdGenerator, safeParseJSON } from "@ai-sdk/provider-utils";
800
839
 
@@ -829,7 +868,7 @@ async function download({
829
868
  url,
830
869
  fetchImplementation = fetch
831
870
  }) {
832
- var _a12;
871
+ var _a13;
833
872
  const urlText = url.toString();
834
873
  try {
835
874
  const response = await fetchImplementation(urlText);
@@ -842,7 +881,7 @@ async function download({
842
881
  }
843
882
  return {
844
883
  data: new Uint8Array(await response.arrayBuffer()),
845
- mimeType: (_a12 = response.headers.get("content-type")) != null ? _a12 : void 0
884
+ mimeType: (_a13 = response.headers.get("content-type")) != null ? _a13 : void 0
846
885
  };
847
886
  } catch (error) {
848
887
  if (DownloadError.isInstance(error)) {
@@ -870,7 +909,7 @@ function detectImageMimeType(image) {
870
909
 
871
910
  // core/prompt/data-content.ts
872
911
  import {
873
- convertBase64ToUint8Array,
912
+ convertBase64ToUint8Array as convertBase64ToUint8Array2,
874
913
  convertUint8ArrayToBase64
875
914
  } from "@ai-sdk/provider-utils";
876
915
 
@@ -905,8 +944,8 @@ var dataContentSchema = z.union([
905
944
  z.custom(
906
945
  // Buffer might not be available in some environments such as CloudFlare:
907
946
  (value) => {
908
- var _a12, _b;
909
- return (_b = (_a12 = globalThis.Buffer) == null ? void 0 : _a12.isBuffer(value)) != null ? _b : false;
947
+ var _a13, _b;
948
+ return (_b = (_a13 = globalThis.Buffer) == null ? void 0 : _a13.isBuffer(value)) != null ? _b : false;
910
949
  },
911
950
  { message: "Must be a Buffer" }
912
951
  )
@@ -926,7 +965,7 @@ function convertDataContentToUint8Array(content) {
926
965
  }
927
966
  if (typeof content === "string") {
928
967
  try {
929
- return convertBase64ToUint8Array(content);
968
+ return convertBase64ToUint8Array2(content);
930
969
  } catch (error) {
931
970
  throw new InvalidDataContentError({
932
971
  message: "Invalid data content. Content string is not a base64-encoded media.",
@@ -1414,7 +1453,7 @@ function detectSingleMessageCharacteristics(message) {
1414
1453
 
1415
1454
  // core/prompt/attachments-to-parts.ts
1416
1455
  function attachmentsToParts(attachments) {
1417
- var _a12, _b, _c;
1456
+ var _a13, _b, _c;
1418
1457
  const parts = [];
1419
1458
  for (const attachment of attachments) {
1420
1459
  let url;
@@ -1426,7 +1465,7 @@ function attachmentsToParts(attachments) {
1426
1465
  switch (url.protocol) {
1427
1466
  case "http:":
1428
1467
  case "https:": {
1429
- if ((_a12 = attachment.contentType) == null ? void 0 : _a12.startsWith("image/")) {
1468
+ if ((_a13 = attachment.contentType) == null ? void 0 : _a13.startsWith("image/")) {
1430
1469
  parts.push({ type: "image", image: url });
1431
1470
  } else {
1432
1471
  if (!attachment.contentType) {
@@ -1512,8 +1551,8 @@ _a6 = symbol6;
1512
1551
 
1513
1552
  // core/prompt/convert-to-core-messages.ts
1514
1553
  function convertToCoreMessages(messages, options) {
1515
- var _a12;
1516
- const tools = (_a12 = options == null ? void 0 : options.tools) != null ? _a12 : {};
1554
+ var _a13;
1555
+ const tools = (_a13 = options == null ? void 0 : options.tools) != null ? _a13 : {};
1517
1556
  const coreMessages = [];
1518
1557
  for (const message of messages) {
1519
1558
  const { role, content, toolInvocations, experimental_attachments } = message;
@@ -1800,7 +1839,7 @@ var arrayOutputStrategy = (schema) => {
1800
1839
  additionalProperties: false
1801
1840
  },
1802
1841
  validatePartialResult({ value, latestObject, isFirstDelta, isFinalDelta }) {
1803
- var _a12;
1842
+ var _a13;
1804
1843
  if (!isJSONObject(value) || !isJSONArray(value.elements)) {
1805
1844
  return {
1806
1845
  success: false,
@@ -1823,7 +1862,7 @@ var arrayOutputStrategy = (schema) => {
1823
1862
  }
1824
1863
  resultArray.push(result.value);
1825
1864
  }
1826
- const publishedElementCount = (_a12 = latestObject == null ? void 0 : latestObject.length) != null ? _a12 : 0;
1865
+ const publishedElementCount = (_a13 = latestObject == null ? void 0 : latestObject.length) != null ? _a13 : 0;
1827
1866
  let textDelta = "";
1828
1867
  if (isFirstDelta) {
1829
1868
  textDelta += "[";
@@ -2155,7 +2194,7 @@ async function generateObject({
2155
2194
  }),
2156
2195
  tracer,
2157
2196
  fn: async (span) => {
2158
- var _a12, _b;
2197
+ var _a13, _b;
2159
2198
  if (mode === "auto" || mode == null) {
2160
2199
  mode = model.defaultObjectGenerationMode;
2161
2200
  }
@@ -2217,7 +2256,7 @@ async function generateObject({
2217
2256
  }),
2218
2257
  tracer,
2219
2258
  fn: async (span2) => {
2220
- var _a13, _b2, _c, _d, _e, _f;
2259
+ var _a14, _b2, _c, _d, _e, _f;
2221
2260
  const result2 = await model.doGenerate({
2222
2261
  mode: {
2223
2262
  type: "object-json",
@@ -2236,7 +2275,7 @@ async function generateObject({
2236
2275
  throw new NoObjectGeneratedError();
2237
2276
  }
2238
2277
  const responseData = {
2239
- id: (_b2 = (_a13 = result2.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
2278
+ id: (_b2 = (_a14 = result2.response) == null ? void 0 : _a14.id) != null ? _b2 : generateId3(),
2240
2279
  timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
2241
2280
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
2242
2281
  };
@@ -2271,7 +2310,7 @@ async function generateObject({
2271
2310
  rawResponse = generateResult.rawResponse;
2272
2311
  logprobs = generateResult.logprobs;
2273
2312
  resultProviderMetadata = generateResult.providerMetadata;
2274
- request = (_a12 = generateResult.request) != null ? _a12 : {};
2313
+ request = (_a13 = generateResult.request) != null ? _a13 : {};
2275
2314
  response = generateResult.responseData;
2276
2315
  break;
2277
2316
  }
@@ -2317,7 +2356,7 @@ async function generateObject({
2317
2356
  }),
2318
2357
  tracer,
2319
2358
  fn: async (span2) => {
2320
- var _a13, _b2, _c, _d, _e, _f, _g, _h;
2359
+ var _a14, _b2, _c, _d, _e, _f, _g, _h;
2321
2360
  const result2 = await model.doGenerate({
2322
2361
  mode: {
2323
2362
  type: "object-tool",
@@ -2335,7 +2374,7 @@ async function generateObject({
2335
2374
  abortSignal,
2336
2375
  headers
2337
2376
  });
2338
- const objectText = (_b2 = (_a13 = result2.toolCalls) == null ? void 0 : _a13[0]) == null ? void 0 : _b2.args;
2377
+ const objectText = (_b2 = (_a14 = result2.toolCalls) == null ? void 0 : _a14[0]) == null ? void 0 : _b2.args;
2339
2378
  if (objectText === void 0) {
2340
2379
  throw new NoObjectGeneratedError();
2341
2380
  }
@@ -2440,9 +2479,9 @@ var DefaultGenerateObjectResult = class {
2440
2479
  this.logprobs = options.logprobs;
2441
2480
  }
2442
2481
  toJsonResponse(init) {
2443
- var _a12;
2482
+ var _a13;
2444
2483
  return new Response(JSON.stringify(this.object), {
2445
- status: (_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200,
2484
+ status: (_a13 = init == null ? void 0 : init.status) != null ? _a13 : 200,
2446
2485
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
2447
2486
  contentType: "application/json; charset=utf-8"
2448
2487
  })
@@ -2480,17 +2519,17 @@ var DelayedPromise = class {
2480
2519
  return this.promise;
2481
2520
  }
2482
2521
  resolve(value) {
2483
- var _a12;
2522
+ var _a13;
2484
2523
  this.status = { type: "resolved", value };
2485
2524
  if (this.promise) {
2486
- (_a12 = this._resolve) == null ? void 0 : _a12.call(this, value);
2525
+ (_a13 = this._resolve) == null ? void 0 : _a13.call(this, value);
2487
2526
  }
2488
2527
  }
2489
2528
  reject(error) {
2490
- var _a12;
2529
+ var _a13;
2491
2530
  this.status = { type: "rejected", error };
2492
2531
  if (this.promise) {
2493
- (_a12 = this._reject) == null ? void 0 : _a12.call(this, error);
2532
+ (_a13 = this._reject) == null ? void 0 : _a13.call(this, error);
2494
2533
  }
2495
2534
  }
2496
2535
  };
@@ -2579,8 +2618,8 @@ function createStitchableStream() {
2579
2618
 
2580
2619
  // core/util/now.ts
2581
2620
  function now() {
2582
- var _a12, _b;
2583
- return (_b = (_a12 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a12.now()) != null ? _b : Date.now();
2621
+ var _a13, _b;
2622
+ return (_b = (_a13 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a13.now()) != null ? _b : Date.now();
2584
2623
  }
2585
2624
 
2586
2625
  // core/generate-object/stream-object.ts
@@ -2869,7 +2908,7 @@ var DefaultStreamObjectResult = class {
2869
2908
  const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
2870
2909
  new TransformStream({
2871
2910
  async transform(chunk, controller) {
2872
- var _a12, _b, _c;
2911
+ var _a13, _b, _c;
2873
2912
  if (isFirstChunk) {
2874
2913
  const msToFirstChunk = now2() - startTimestampMs;
2875
2914
  isFirstChunk = false;
@@ -2915,7 +2954,7 @@ var DefaultStreamObjectResult = class {
2915
2954
  switch (chunk.type) {
2916
2955
  case "response-metadata": {
2917
2956
  response = {
2918
- id: (_a12 = chunk.id) != null ? _a12 : response.id,
2957
+ id: (_a13 = chunk.id) != null ? _a13 : response.id,
2919
2958
  timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2920
2959
  modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2921
2960
  };
@@ -3112,9 +3151,9 @@ var DefaultStreamObjectResult = class {
3112
3151
  });
3113
3152
  }
3114
3153
  toTextStreamResponse(init) {
3115
- var _a12;
3154
+ var _a13;
3116
3155
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
3117
- status: (_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200,
3156
+ status: (_a13 = init == null ? void 0 : init.status) != null ? _a13 : 200,
3118
3157
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
3119
3158
  contentType: "text/plain; charset=utf-8"
3120
3159
  })
@@ -3127,7 +3166,7 @@ import { createIdGenerator as createIdGenerator3 } from "@ai-sdk/provider-utils"
3127
3166
 
3128
3167
  // errors/index.ts
3129
3168
  import {
3130
- AISDKError as AISDKError11,
3169
+ AISDKError as AISDKError12,
3131
3170
  APICallError as APICallError2,
3132
3171
  EmptyResponseBodyError,
3133
3172
  InvalidPromptError as InvalidPromptError2,
@@ -3189,23 +3228,21 @@ var NoSuchToolError = class extends AISDKError9 {
3189
3228
  };
3190
3229
  _a9 = symbol9;
3191
3230
 
3192
- // errors/tool-execution-error.ts
3231
+ // errors/tool-call-repair-error.ts
3193
3232
  import { AISDKError as AISDKError10, getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
3194
- var name10 = "AI_ToolExecutionError";
3233
+ var name10 = "AI_ToolCallRepairError";
3195
3234
  var marker10 = `vercel.ai.error.${name10}`;
3196
3235
  var symbol10 = Symbol.for(marker10);
3197
3236
  var _a10;
3198
- var ToolExecutionError = class extends AISDKError10 {
3237
+ var ToolCallRepairError = class extends AISDKError10 {
3199
3238
  constructor({
3200
- toolArgs,
3201
- toolName,
3202
3239
  cause,
3203
- message = `Error executing tool ${toolName}: ${getErrorMessage3(cause)}`
3240
+ originalError,
3241
+ message = `Error repairing tool call: ${getErrorMessage3(cause)}`
3204
3242
  }) {
3205
3243
  super({ name: name10, message, cause });
3206
3244
  this[_a10] = true;
3207
- this.toolArgs = toolArgs;
3208
- this.toolName = toolName;
3245
+ this.originalError = originalError;
3209
3246
  }
3210
3247
  static isInstance(error) {
3211
3248
  return AISDKError10.hasMarker(error, marker10);
@@ -3213,6 +3250,30 @@ var ToolExecutionError = class extends AISDKError10 {
3213
3250
  };
3214
3251
  _a10 = symbol10;
3215
3252
 
3253
+ // errors/tool-execution-error.ts
3254
+ import { AISDKError as AISDKError11, getErrorMessage as getErrorMessage4 } from "@ai-sdk/provider";
3255
+ var name11 = "AI_ToolExecutionError";
3256
+ var marker11 = `vercel.ai.error.${name11}`;
3257
+ var symbol11 = Symbol.for(marker11);
3258
+ var _a11;
3259
+ var ToolExecutionError = class extends AISDKError11 {
3260
+ constructor({
3261
+ toolArgs,
3262
+ toolName,
3263
+ cause,
3264
+ message = `Error executing tool ${toolName}: ${getErrorMessage4(cause)}`
3265
+ }) {
3266
+ super({ name: name11, message, cause });
3267
+ this[_a11] = true;
3268
+ this.toolArgs = toolArgs;
3269
+ this.toolName = toolName;
3270
+ }
3271
+ static isInstance(error) {
3272
+ return AISDKError11.hasMarker(error, marker11);
3273
+ }
3274
+ };
3275
+ _a11 = symbol11;
3276
+
3216
3277
  // core/prompt/prepare-tools-and-tool-choice.ts
3217
3278
  import { asSchema as asSchema2 } from "@ai-sdk/ui-utils";
3218
3279
 
@@ -3234,24 +3295,24 @@ function prepareToolsAndToolChoice({
3234
3295
  };
3235
3296
  }
3236
3297
  const filteredTools = activeTools != null ? Object.entries(tools).filter(
3237
- ([name12]) => activeTools.includes(name12)
3298
+ ([name13]) => activeTools.includes(name13)
3238
3299
  ) : Object.entries(tools);
3239
3300
  return {
3240
- tools: filteredTools.map(([name12, tool2]) => {
3301
+ tools: filteredTools.map(([name13, tool2]) => {
3241
3302
  const toolType = tool2.type;
3242
3303
  switch (toolType) {
3243
3304
  case void 0:
3244
3305
  case "function":
3245
3306
  return {
3246
3307
  type: "function",
3247
- name: name12,
3308
+ name: name13,
3248
3309
  description: tool2.description,
3249
3310
  parameters: asSchema2(tool2.parameters).jsonSchema
3250
3311
  };
3251
3312
  case "provider-defined":
3252
3313
  return {
3253
3314
  type: "provider-defined",
3254
- name: name12,
3315
+ name: name13,
3255
3316
  id: tool2.id,
3256
3317
  args: tool2.args
3257
3318
  };
@@ -3281,14 +3342,49 @@ function removeTextAfterLastWhitespace(text2) {
3281
3342
  // core/generate-text/parse-tool-call.ts
3282
3343
  import { safeParseJSON as safeParseJSON2, safeValidateTypes as safeValidateTypes3 } from "@ai-sdk/provider-utils";
3283
3344
  import { asSchema as asSchema3 } from "@ai-sdk/ui-utils";
3284
- function parseToolCall({
3345
+ async function parseToolCall({
3285
3346
  toolCall,
3286
- tools
3347
+ tools,
3348
+ repairToolCall,
3349
+ system,
3350
+ messages
3287
3351
  }) {
3288
- const toolName = toolCall.toolName;
3289
3352
  if (tools == null) {
3290
3353
  throw new NoSuchToolError({ toolName: toolCall.toolName });
3291
3354
  }
3355
+ try {
3356
+ return await doParseToolCall({ toolCall, tools });
3357
+ } catch (error) {
3358
+ if (repairToolCall == null || !(NoSuchToolError.isInstance(error) || InvalidToolArgumentsError.isInstance(error))) {
3359
+ throw error;
3360
+ }
3361
+ let repairedToolCall = null;
3362
+ try {
3363
+ repairedToolCall = await repairToolCall({
3364
+ toolCall,
3365
+ tools,
3366
+ parameterSchema: ({ toolName }) => asSchema3(tools[toolName].parameters).jsonSchema,
3367
+ system,
3368
+ messages,
3369
+ error
3370
+ });
3371
+ } catch (repairError) {
3372
+ throw new ToolCallRepairError({
3373
+ cause: repairError,
3374
+ originalError: error
3375
+ });
3376
+ }
3377
+ if (repairedToolCall == null) {
3378
+ throw error;
3379
+ }
3380
+ return await doParseToolCall({ toolCall: repairedToolCall, tools });
3381
+ }
3382
+ }
3383
+ async function doParseToolCall({
3384
+ toolCall,
3385
+ tools
3386
+ }) {
3387
+ const toolName = toolCall.toolName;
3292
3388
  const tool2 = tools[toolName];
3293
3389
  if (tool2 == null) {
3294
3390
  throw new NoSuchToolError({
@@ -3368,6 +3464,7 @@ async function generateText({
3368
3464
  experimental_telemetry: telemetry,
3369
3465
  experimental_providerMetadata: providerMetadata,
3370
3466
  experimental_activeTools: activeTools,
3467
+ experimental_repairToolCall: repairToolCall,
3371
3468
  _internal: {
3372
3469
  generateId: generateId3 = originalGenerateId3,
3373
3470
  currentDate = () => /* @__PURE__ */ new Date()
@@ -3375,7 +3472,7 @@ async function generateText({
3375
3472
  onStepFinish,
3376
3473
  ...settings
3377
3474
  }) {
3378
- var _a12;
3475
+ var _a13;
3379
3476
  if (maxSteps < 1) {
3380
3477
  throw new InvalidArgumentError({
3381
3478
  parameter: "maxSteps",
@@ -3392,7 +3489,7 @@ async function generateText({
3392
3489
  });
3393
3490
  const initialPrompt = standardizePrompt({
3394
3491
  prompt: {
3395
- system: (_a12 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a12 : system,
3492
+ system: (_a13 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a13 : system,
3396
3493
  prompt,
3397
3494
  messages
3398
3495
  },
@@ -3418,7 +3515,7 @@ async function generateText({
3418
3515
  }),
3419
3516
  tracer,
3420
3517
  fn: async (span) => {
3421
- var _a13, _b, _c, _d, _e, _f;
3518
+ var _a14, _b, _c, _d, _e, _f;
3422
3519
  const mode = {
3423
3520
  type: "regular",
3424
3521
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -3470,8 +3567,8 @@ async function generateText({
3470
3567
  "ai.prompt.tools": {
3471
3568
  // convert the language model level tools:
3472
3569
  input: () => {
3473
- var _a14;
3474
- return (_a14 = mode.tools) == null ? void 0 : _a14.map((tool2) => JSON.stringify(tool2));
3570
+ var _a15;
3571
+ return (_a15 = mode.tools) == null ? void 0 : _a15.map((tool2) => JSON.stringify(tool2));
3475
3572
  }
3476
3573
  },
3477
3574
  "ai.prompt.toolChoice": {
@@ -3491,7 +3588,7 @@ async function generateText({
3491
3588
  }),
3492
3589
  tracer,
3493
3590
  fn: async (span2) => {
3494
- var _a14, _b2, _c2, _d2, _e2, _f2;
3591
+ var _a15, _b2, _c2, _d2, _e2, _f2;
3495
3592
  const result = await model.doGenerate({
3496
3593
  mode,
3497
3594
  ...callSettings,
@@ -3503,7 +3600,7 @@ async function generateText({
3503
3600
  headers
3504
3601
  });
3505
3602
  const responseData = {
3506
- id: (_b2 = (_a14 = result.response) == null ? void 0 : _a14.id) != null ? _b2 : generateId3(),
3603
+ id: (_b2 = (_a15 = result.response) == null ? void 0 : _a15.id) != null ? _b2 : generateId3(),
3507
3604
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
3508
3605
  modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
3509
3606
  };
@@ -3536,8 +3633,16 @@ async function generateText({
3536
3633
  }
3537
3634
  })
3538
3635
  );
3539
- currentToolCalls = ((_a13 = currentModelResponse.toolCalls) != null ? _a13 : []).map(
3540
- (modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
3636
+ currentToolCalls = await Promise.all(
3637
+ ((_a14 = currentModelResponse.toolCalls) != null ? _a14 : []).map(
3638
+ (toolCall) => parseToolCall({
3639
+ toolCall,
3640
+ tools,
3641
+ repairToolCall,
3642
+ system,
3643
+ messages: stepInputMessages
3644
+ })
3645
+ )
3541
3646
  );
3542
3647
  currentToolResults = tools == null ? [] : await executeTools({
3543
3648
  toolCalls: currentToolCalls,
@@ -3741,6 +3846,46 @@ var DefaultGenerateTextResult = class {
3741
3846
  }
3742
3847
  };
3743
3848
 
3849
+ // core/generate-text/output.ts
3850
+ var output_exports = {};
3851
+ __export(output_exports, {
3852
+ object: () => object,
3853
+ text: () => text
3854
+ });
3855
+ import { parseJSON } from "@ai-sdk/provider-utils";
3856
+ import { asSchema as asSchema4 } from "@ai-sdk/ui-utils";
3857
+ var text = () => ({
3858
+ type: "text",
3859
+ responseFormat: () => ({ type: "text" }),
3860
+ injectIntoSystemPrompt({ system }) {
3861
+ return system;
3862
+ },
3863
+ parseOutput({ text: text2 }) {
3864
+ return text2;
3865
+ }
3866
+ });
3867
+ var object = ({
3868
+ schema: inputSchema
3869
+ }) => {
3870
+ const schema = asSchema4(inputSchema);
3871
+ return {
3872
+ type: "object",
3873
+ responseFormat: ({ model }) => ({
3874
+ type: "json",
3875
+ schema: model.supportsStructuredOutputs ? schema.jsonSchema : void 0
3876
+ }),
3877
+ injectIntoSystemPrompt({ system, model }) {
3878
+ return model.supportsStructuredOutputs ? system : injectJsonInstruction({
3879
+ prompt: system,
3880
+ schema: schema.jsonSchema
3881
+ });
3882
+ },
3883
+ parseOutput({ text: text2 }) {
3884
+ return parseJSON({ text: text2, schema });
3885
+ }
3886
+ };
3887
+ };
3888
+
3744
3889
  // core/generate-text/stream-text.ts
3745
3890
  import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
3746
3891
  import { formatDataStreamPart as formatDataStreamPart2 } from "@ai-sdk/ui-utils";
@@ -3841,8 +3986,10 @@ function runToolsTransformation({
3841
3986
  toolCallStreaming,
3842
3987
  tracer,
3843
3988
  telemetry,
3989
+ system,
3844
3990
  messages,
3845
- abortSignal
3991
+ abortSignal,
3992
+ repairToolCall
3846
3993
  }) {
3847
3994
  let toolResultsStreamController = null;
3848
3995
  const toolResultsStream = new ReadableStream({
@@ -3863,7 +4010,7 @@ function runToolsTransformation({
3863
4010
  }
3864
4011
  }
3865
4012
  const forwardStream = new TransformStream({
3866
- transform(chunk, controller) {
4013
+ async transform(chunk, controller) {
3867
4014
  const chunkType = chunk.type;
3868
4015
  switch (chunkType) {
3869
4016
  case "text-delta":
@@ -3912,9 +4059,12 @@ function runToolsTransformation({
3912
4059
  break;
3913
4060
  }
3914
4061
  try {
3915
- const toolCall = parseToolCall({
4062
+ const toolCall = await parseToolCall({
3916
4063
  toolCall: chunk,
3917
- tools
4064
+ tools,
4065
+ repairToolCall,
4066
+ system,
4067
+ messages
3918
4068
  });
3919
4069
  controller.enqueue(toolCall);
3920
4070
  if (tool2.execute != null) {
@@ -4053,6 +4203,7 @@ function streamText({
4053
4203
  experimental_providerMetadata: providerMetadata,
4054
4204
  experimental_toolCallStreaming: toolCallStreaming = false,
4055
4205
  experimental_activeTools: activeTools,
4206
+ experimental_repairToolCall: repairToolCall,
4056
4207
  onChunk,
4057
4208
  onFinish,
4058
4209
  onStepFinish,
@@ -4077,6 +4228,7 @@ function streamText({
4077
4228
  toolChoice,
4078
4229
  toolCallStreaming,
4079
4230
  activeTools,
4231
+ repairToolCall,
4080
4232
  maxSteps,
4081
4233
  continueSteps,
4082
4234
  providerMetadata,
@@ -4103,6 +4255,7 @@ var DefaultStreamTextResult = class {
4103
4255
  toolChoice,
4104
4256
  toolCallStreaming,
4105
4257
  activeTools,
4258
+ repairToolCall,
4106
4259
  maxSteps,
4107
4260
  continueSteps,
4108
4261
  providerMetadata,
@@ -4214,8 +4367,8 @@ var DefaultStreamTextResult = class {
4214
4367
  "ai.prompt.tools": {
4215
4368
  // convert the language model level tools:
4216
4369
  input: () => {
4217
- var _a12;
4218
- return (_a12 = mode.tools) == null ? void 0 : _a12.map((tool2) => JSON.stringify(tool2));
4370
+ var _a13;
4371
+ return (_a13 = mode.tools) == null ? void 0 : _a13.map((tool2) => JSON.stringify(tool2));
4219
4372
  }
4220
4373
  },
4221
4374
  "ai.prompt.toolChoice": {
@@ -4257,7 +4410,9 @@ var DefaultStreamTextResult = class {
4257
4410
  toolCallStreaming,
4258
4411
  tracer,
4259
4412
  telemetry,
4413
+ system,
4260
4414
  messages: stepInputMessages,
4415
+ repairToolCall,
4261
4416
  abortSignal
4262
4417
  });
4263
4418
  const stepRequest = request != null ? request : {};
@@ -4298,7 +4453,7 @@ var DefaultStreamTextResult = class {
4298
4453
  transformedStream.pipeThrough(
4299
4454
  new TransformStream({
4300
4455
  async transform(chunk, controller) {
4301
- var _a12, _b, _c;
4456
+ var _a13, _b, _c;
4302
4457
  if (stepFirstChunk) {
4303
4458
  const msToFirstChunk = now2() - startTimestampMs;
4304
4459
  stepFirstChunk = false;
@@ -4352,7 +4507,7 @@ var DefaultStreamTextResult = class {
4352
4507
  }
4353
4508
  case "response-metadata": {
4354
4509
  stepResponse = {
4355
- id: (_a12 = chunk.id) != null ? _a12 : stepResponse.id,
4510
+ id: (_a13 = chunk.id) != null ? _a13 : stepResponse.id,
4356
4511
  timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4357
4512
  modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4358
4513
  };
@@ -4669,7 +4824,7 @@ var DefaultStreamTextResult = class {
4669
4824
  });
4670
4825
  }
4671
4826
  toDataStreamInternal({
4672
- getErrorMessage: getErrorMessage4 = () => "An error occurred.",
4827
+ getErrorMessage: getErrorMessage5 = () => "An error occurred.",
4673
4828
  // mask error messages for safety by default
4674
4829
  sendUsage = true
4675
4830
  } = {}) {
@@ -4729,7 +4884,7 @@ var DefaultStreamTextResult = class {
4729
4884
  }
4730
4885
  case "error": {
4731
4886
  controller.enqueue(
4732
- formatDataStreamPart2("error", getErrorMessage4(chunk.error))
4887
+ formatDataStreamPart2("error", getErrorMessage5(chunk.error))
4733
4888
  );
4734
4889
  break;
4735
4890
  }
@@ -4772,7 +4927,7 @@ var DefaultStreamTextResult = class {
4772
4927
  statusText,
4773
4928
  headers,
4774
4929
  data,
4775
- getErrorMessage: getErrorMessage4,
4930
+ getErrorMessage: getErrorMessage5,
4776
4931
  sendUsage
4777
4932
  } = {}) {
4778
4933
  writeToServerResponse({
@@ -4783,7 +4938,7 @@ var DefaultStreamTextResult = class {
4783
4938
  contentType: "text/plain; charset=utf-8",
4784
4939
  dataStreamVersion: "v1"
4785
4940
  }),
4786
- stream: this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage })
4941
+ stream: this.toDataStream({ data, getErrorMessage: getErrorMessage5, sendUsage })
4787
4942
  });
4788
4943
  }
4789
4944
  pipeTextStreamToResponse(response, init) {
@@ -4817,11 +4972,11 @@ var DefaultStreamTextResult = class {
4817
4972
  status,
4818
4973
  statusText,
4819
4974
  data,
4820
- getErrorMessage: getErrorMessage4,
4975
+ getErrorMessage: getErrorMessage5,
4821
4976
  sendUsage
4822
4977
  } = {}) {
4823
4978
  return new Response(
4824
- this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage }),
4979
+ this.toDataStream({ data, getErrorMessage: getErrorMessage5, sendUsage }),
4825
4980
  {
4826
4981
  status,
4827
4982
  statusText,
@@ -4833,9 +4988,9 @@ var DefaultStreamTextResult = class {
4833
4988
  );
4834
4989
  }
4835
4990
  toTextStreamResponse(init) {
4836
- var _a12;
4991
+ var _a13;
4837
4992
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
4838
- status: (_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200,
4993
+ status: (_a13 = init == null ? void 0 : init.status) != null ? _a13 : 200,
4839
4994
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
4840
4995
  contentType: "text/plain; charset=utf-8"
4841
4996
  })
@@ -4843,46 +4998,6 @@ var DefaultStreamTextResult = class {
4843
4998
  }
4844
4999
  };
4845
5000
 
4846
- // core/generate-text/output.ts
4847
- var output_exports = {};
4848
- __export(output_exports, {
4849
- object: () => object,
4850
- text: () => text
4851
- });
4852
- import { parseJSON } from "@ai-sdk/provider-utils";
4853
- import { asSchema as asSchema4 } from "@ai-sdk/ui-utils";
4854
- var text = () => ({
4855
- type: "text",
4856
- responseFormat: () => ({ type: "text" }),
4857
- injectIntoSystemPrompt({ system }) {
4858
- return system;
4859
- },
4860
- parseOutput({ text: text2 }) {
4861
- return text2;
4862
- }
4863
- });
4864
- var object = ({
4865
- schema: inputSchema
4866
- }) => {
4867
- const schema = asSchema4(inputSchema);
4868
- return {
4869
- type: "object",
4870
- responseFormat: ({ model }) => ({
4871
- type: "json",
4872
- schema: model.supportsStructuredOutputs ? schema.jsonSchema : void 0
4873
- }),
4874
- injectIntoSystemPrompt({ system, model }) {
4875
- return model.supportsStructuredOutputs ? system : injectJsonInstruction({
4876
- prompt: system,
4877
- schema: schema.jsonSchema
4878
- });
4879
- },
4880
- parseOutput({ text: text2 }) {
4881
- return parseJSON({ text: text2, schema });
4882
- }
4883
- };
4884
- };
4885
-
4886
5001
  // core/middleware/wrap-language-model.ts
4887
5002
  var experimental_wrapLanguageModel = ({
4888
5003
  model,
@@ -4947,11 +5062,11 @@ function experimental_customProvider({
4947
5062
  }
4948
5063
 
4949
5064
  // core/registry/no-such-provider-error.ts
4950
- import { AISDKError as AISDKError12, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
4951
- var name11 = "AI_NoSuchProviderError";
4952
- var marker11 = `vercel.ai.error.${name11}`;
4953
- var symbol11 = Symbol.for(marker11);
4954
- var _a11;
5065
+ import { AISDKError as AISDKError13, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
5066
+ var name12 = "AI_NoSuchProviderError";
5067
+ var marker12 = `vercel.ai.error.${name12}`;
5068
+ var symbol12 = Symbol.for(marker12);
5069
+ var _a12;
4955
5070
  var NoSuchProviderError = class extends NoSuchModelError3 {
4956
5071
  constructor({
4957
5072
  modelId,
@@ -4960,16 +5075,16 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
4960
5075
  availableProviders,
4961
5076
  message = `No such provider: ${providerId} (available providers: ${availableProviders.join()})`
4962
5077
  }) {
4963
- super({ errorName: name11, modelId, modelType, message });
4964
- this[_a11] = true;
5078
+ super({ errorName: name12, modelId, modelType, message });
5079
+ this[_a12] = true;
4965
5080
  this.providerId = providerId;
4966
5081
  this.availableProviders = availableProviders;
4967
5082
  }
4968
5083
  static isInstance(error) {
4969
- return AISDKError12.hasMarker(error, marker11);
5084
+ return AISDKError13.hasMarker(error, marker12);
4970
5085
  }
4971
5086
  };
4972
- _a11 = symbol11;
5087
+ _a12 = symbol12;
4973
5088
 
4974
5089
  // core/registry/provider-registry.ts
4975
5090
  import { NoSuchModelError as NoSuchModelError4 } from "@ai-sdk/provider";
@@ -5011,19 +5126,19 @@ var DefaultProviderRegistry = class {
5011
5126
  return [id.slice(0, index), id.slice(index + 1)];
5012
5127
  }
5013
5128
  languageModel(id) {
5014
- var _a12, _b;
5129
+ var _a13, _b;
5015
5130
  const [providerId, modelId] = this.splitId(id, "languageModel");
5016
- const model = (_b = (_a12 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a12, modelId);
5131
+ const model = (_b = (_a13 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a13, modelId);
5017
5132
  if (model == null) {
5018
5133
  throw new NoSuchModelError4({ modelId: id, modelType: "languageModel" });
5019
5134
  }
5020
5135
  return model;
5021
5136
  }
5022
5137
  textEmbeddingModel(id) {
5023
- var _a12;
5138
+ var _a13;
5024
5139
  const [providerId, modelId] = this.splitId(id, "textEmbeddingModel");
5025
5140
  const provider = this.getProvider(providerId);
5026
- const model = (_a12 = provider.textEmbeddingModel) == null ? void 0 : _a12.call(provider, modelId);
5141
+ const model = (_a13 = provider.textEmbeddingModel) == null ? void 0 : _a13.call(provider, modelId);
5027
5142
  if (model == null) {
5028
5143
  throw new NoSuchModelError4({
5029
5144
  modelId: id,
@@ -5071,7 +5186,7 @@ import {
5071
5186
  function AssistantResponse({ threadId, messageId }, process2) {
5072
5187
  const stream = new ReadableStream({
5073
5188
  async start(controller) {
5074
- var _a12;
5189
+ var _a13;
5075
5190
  const textEncoder = new TextEncoder();
5076
5191
  const sendMessage = (message) => {
5077
5192
  controller.enqueue(
@@ -5093,7 +5208,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
5093
5208
  );
5094
5209
  };
5095
5210
  const forwardStream = async (stream2) => {
5096
- var _a13, _b;
5211
+ var _a14, _b;
5097
5212
  let result = void 0;
5098
5213
  for await (const value of stream2) {
5099
5214
  switch (value.event) {
@@ -5110,7 +5225,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
5110
5225
  break;
5111
5226
  }
5112
5227
  case "thread.message.delta": {
5113
- const content = (_a13 = value.data.delta.content) == null ? void 0 : _a13[0];
5228
+ const content = (_a14 = value.data.delta.content) == null ? void 0 : _a14[0];
5114
5229
  if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
5115
5230
  controller.enqueue(
5116
5231
  textEncoder.encode(
@@ -5144,7 +5259,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
5144
5259
  forwardStream
5145
5260
  });
5146
5261
  } catch (error) {
5147
- sendError((_a12 = error.message) != null ? _a12 : `${error}`);
5262
+ sendError((_a13 = error.message) != null ? _a13 : `${error}`);
5148
5263
  } finally {
5149
5264
  controller.close();
5150
5265
  }
@@ -5205,7 +5320,7 @@ function toDataStreamInternal(stream, callbacks) {
5205
5320
  return stream.pipeThrough(
5206
5321
  new TransformStream({
5207
5322
  transform: async (value, controller) => {
5208
- var _a12;
5323
+ var _a13;
5209
5324
  if (typeof value === "string") {
5210
5325
  controller.enqueue(value);
5211
5326
  return;
@@ -5213,7 +5328,7 @@ function toDataStreamInternal(stream, callbacks) {
5213
5328
  if ("event" in value) {
5214
5329
  if (value.event === "on_chat_model_stream") {
5215
5330
  forwardAIMessageChunk(
5216
- (_a12 = value.data) == null ? void 0 : _a12.chunk,
5331
+ (_a13 = value.data) == null ? void 0 : _a13.chunk,
5217
5332
  controller
5218
5333
  );
5219
5334
  }
@@ -5236,7 +5351,7 @@ function toDataStream(stream, callbacks) {
5236
5351
  );
5237
5352
  }
5238
5353
  function toDataStreamResponse(stream, options) {
5239
- var _a12;
5354
+ var _a13;
5240
5355
  const dataStream = toDataStreamInternal(
5241
5356
  stream,
5242
5357
  options == null ? void 0 : options.callbacks
@@ -5245,7 +5360,7 @@ function toDataStreamResponse(stream, options) {
5245
5360
  const init = options == null ? void 0 : options.init;
5246
5361
  const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
5247
5362
  return new Response(responseStream, {
5248
- status: (_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200,
5363
+ status: (_a13 = init == null ? void 0 : init.status) != null ? _a13 : 200,
5249
5364
  statusText: init == null ? void 0 : init.statusText,
5250
5365
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
5251
5366
  contentType: "text/plain; charset=utf-8",
@@ -5300,14 +5415,14 @@ function toDataStream2(stream, callbacks) {
5300
5415
  );
5301
5416
  }
5302
5417
  function toDataStreamResponse2(stream, options = {}) {
5303
- var _a12;
5418
+ var _a13;
5304
5419
  const { init, data, callbacks } = options;
5305
5420
  const dataStream = toDataStreamInternal2(stream, callbacks).pipeThrough(
5306
5421
  new TextEncoderStream()
5307
5422
  );
5308
5423
  const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
5309
5424
  return new Response(responseStream, {
5310
- status: (_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200,
5425
+ status: (_a13 = init == null ? void 0 : init.status) != null ? _a13 : 200,
5311
5426
  statusText: init == null ? void 0 : init.statusText,
5312
5427
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
5313
5428
  contentType: "text/plain; charset=utf-8",
@@ -5399,7 +5514,7 @@ var StreamData = class {
5399
5514
  }
5400
5515
  };
5401
5516
  export {
5402
- AISDKError11 as AISDKError,
5517
+ AISDKError12 as AISDKError,
5403
5518
  APICallError2 as APICallError,
5404
5519
  AssistantResponse,
5405
5520
  DownloadError,
@@ -5423,6 +5538,7 @@ export {
5423
5538
  output_exports as Output,
5424
5539
  RetryError,
5425
5540
  StreamData,
5541
+ ToolCallRepairError,
5426
5542
  ToolExecutionError,
5427
5543
  TypeValidationError2 as TypeValidationError,
5428
5544
  UnsupportedFunctionalityError2 as UnsupportedFunctionalityError,
@@ -5434,6 +5550,7 @@ export {
5434
5550
  embedMany,
5435
5551
  experimental_createProviderRegistry,
5436
5552
  experimental_customProvider,
5553
+ generateImage as experimental_generateImage,
5437
5554
  experimental_wrapLanguageModel,
5438
5555
  formatAssistantStreamPart2 as formatAssistantStreamPart,
5439
5556
  formatDataStreamPart6 as formatDataStreamPart,