ai 3.3.17 → 3.3.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -4,8 +4,8 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
4
  var __getOwnPropNames = Object.getOwnPropertyNames;
5
5
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
6
  var __export = (target, all) => {
7
- for (var name12 in all)
8
- __defProp(target, name12, { get: all[name12], enumerable: true });
7
+ for (var name10 in all)
8
+ __defProp(target, name10, { get: all[name10], enumerable: true });
9
9
  };
10
10
  var __copyProps = (to, from, except, desc) => {
11
11
  if (from && typeof from === "object" || typeof from === "function") {
@@ -39,7 +39,6 @@ __export(streams_exports, {
39
39
  InvalidArgumentError: () => InvalidArgumentError,
40
40
  InvalidDataContentError: () => InvalidDataContentError,
41
41
  InvalidMessageRoleError: () => InvalidMessageRoleError,
42
- InvalidModelIdError: () => InvalidModelIdError,
43
42
  InvalidPromptError: () => import_provider15.InvalidPromptError,
44
43
  InvalidResponseDataError: () => import_provider15.InvalidResponseDataError,
45
44
  InvalidToolArgumentsError: () => InvalidToolArgumentsError,
@@ -48,8 +47,9 @@ __export(streams_exports, {
48
47
  LangChainStream: () => LangChainStream,
49
48
  LoadAPIKeyError: () => import_provider15.LoadAPIKeyError,
50
49
  MistralStream: () => MistralStream,
50
+ NoContentGeneratedError: () => import_provider15.NoContentGeneratedError,
51
51
  NoObjectGeneratedError: () => NoObjectGeneratedError,
52
- NoSuchModelError: () => NoSuchModelError,
52
+ NoSuchModelError: () => import_provider15.NoSuchModelError,
53
53
  NoSuchProviderError: () => NoSuchProviderError,
54
54
  NoSuchToolError: () => NoSuchToolError,
55
55
  OpenAIStream: () => OpenAIStream,
@@ -70,6 +70,7 @@ __export(streams_exports, {
70
70
  experimental_StreamData: () => experimental_StreamData,
71
71
  experimental_createModelRegistry: () => experimental_createModelRegistry,
72
72
  experimental_createProviderRegistry: () => experimental_createProviderRegistry,
73
+ experimental_customProvider: () => experimental_customProvider,
73
74
  experimental_generateObject: () => experimental_generateObject,
74
75
  experimental_generateText: () => experimental_generateText,
75
76
  experimental_streamObject: () => experimental_streamObject,
@@ -223,7 +224,7 @@ function getBaseTelemetryAttributes({
223
224
  telemetry,
224
225
  headers
225
226
  }) {
226
- var _a12;
227
+ var _a10;
227
228
  return {
228
229
  "ai.model.provider": model.provider,
229
230
  "ai.model.id": model.modelId,
@@ -233,7 +234,7 @@ function getBaseTelemetryAttributes({
233
234
  return attributes;
234
235
  }, {}),
235
236
  // add metadata as attributes:
236
- ...Object.entries((_a12 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a12 : {}).reduce(
237
+ ...Object.entries((_a10 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a10 : {}).reduce(
237
238
  (attributes, [key, value]) => {
238
239
  attributes[`ai.telemetry.metadata.${key}`] = value;
239
240
  return attributes;
@@ -258,7 +259,7 @@ var noopTracer = {
258
259
  startSpan() {
259
260
  return noopSpan;
260
261
  },
261
- startActiveSpan(name12, arg1, arg2, arg3) {
262
+ startActiveSpan(name10, arg1, arg2, arg3) {
262
263
  if (typeof arg1 === "function") {
263
264
  return arg1(noopSpan);
264
265
  }
@@ -326,13 +327,13 @@ function getTracer({ isEnabled }) {
326
327
  // core/telemetry/record-span.ts
327
328
  var import_api2 = require("@opentelemetry/api");
328
329
  function recordSpan({
329
- name: name12,
330
+ name: name10,
330
331
  tracer,
331
332
  attributes,
332
333
  fn,
333
334
  endWhenDone = true
334
335
  }) {
335
- return tracer.startActiveSpan(name12, { attributes }, async (span) => {
336
+ return tracer.startActiveSpan(name10, { attributes }, async (span) => {
336
337
  try {
337
338
  const result = await fn(span);
338
339
  if (endWhenDone) {
@@ -398,14 +399,14 @@ async function embed({
398
399
  headers,
399
400
  experimental_telemetry: telemetry
400
401
  }) {
401
- var _a12;
402
+ var _a10;
402
403
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
403
404
  model,
404
405
  telemetry,
405
406
  headers,
406
407
  settings: { maxRetries }
407
408
  });
408
- const tracer = getTracer({ isEnabled: (_a12 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a12 : false });
409
+ const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
409
410
  return recordSpan({
410
411
  name: "ai.embed",
411
412
  attributes: selectTelemetryAttributes({
@@ -438,14 +439,14 @@ async function embed({
438
439
  }),
439
440
  tracer,
440
441
  fn: async (doEmbedSpan) => {
441
- var _a13;
442
+ var _a11;
442
443
  const modelResponse = await model.doEmbed({
443
444
  values: [value],
444
445
  abortSignal,
445
446
  headers
446
447
  });
447
448
  const embedding2 = modelResponse.embeddings[0];
448
- const usage2 = (_a13 = modelResponse.usage) != null ? _a13 : { tokens: NaN };
449
+ const usage2 = (_a11 = modelResponse.usage) != null ? _a11 : { tokens: NaN };
449
450
  doEmbedSpan.setAttributes(
450
451
  selectTelemetryAttributes({
451
452
  telemetry,
@@ -511,14 +512,14 @@ async function embedMany({
511
512
  headers,
512
513
  experimental_telemetry: telemetry
513
514
  }) {
514
- var _a12;
515
+ var _a10;
515
516
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
516
517
  model,
517
518
  telemetry,
518
519
  headers,
519
520
  settings: { maxRetries }
520
521
  });
521
- const tracer = getTracer({ isEnabled: (_a12 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a12 : false });
522
+ const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
522
523
  return recordSpan({
523
524
  name: "ai.embedMany",
524
525
  attributes: selectTelemetryAttributes({
@@ -556,14 +557,14 @@ async function embedMany({
556
557
  }),
557
558
  tracer,
558
559
  fn: async (doEmbedSpan) => {
559
- var _a13;
560
+ var _a11;
560
561
  const modelResponse = await model.doEmbed({
561
562
  values,
562
563
  abortSignal,
563
564
  headers
564
565
  });
565
566
  const embeddings3 = modelResponse.embeddings;
566
- const usage2 = (_a13 = modelResponse.usage) != null ? _a13 : { tokens: NaN };
567
+ const usage2 = (_a11 = modelResponse.usage) != null ? _a11 : { tokens: NaN };
567
568
  doEmbedSpan.setAttributes(
568
569
  selectTelemetryAttributes({
569
570
  telemetry,
@@ -615,14 +616,14 @@ async function embedMany({
615
616
  }),
616
617
  tracer,
617
618
  fn: async (doEmbedSpan) => {
618
- var _a13;
619
+ var _a11;
619
620
  const modelResponse = await model.doEmbed({
620
621
  values: chunk,
621
622
  abortSignal,
622
623
  headers
623
624
  });
624
625
  const embeddings2 = modelResponse.embeddings;
625
- const usage2 = (_a13 = modelResponse.usage) != null ? _a13 : { tokens: NaN };
626
+ const usage2 = (_a11 = modelResponse.usage) != null ? _a11 : { tokens: NaN };
626
627
  doEmbedSpan.setAttributes(
627
628
  selectTelemetryAttributes({
628
629
  telemetry,
@@ -724,7 +725,7 @@ async function download({
724
725
  url,
725
726
  fetchImplementation = fetch
726
727
  }) {
727
- var _a12;
728
+ var _a10;
728
729
  const urlText = url.toString();
729
730
  try {
730
731
  const response = await fetchImplementation(urlText);
@@ -737,7 +738,7 @@ async function download({
737
738
  }
738
739
  return {
739
740
  data: new Uint8Array(await response.arrayBuffer()),
740
- mimeType: (_a12 = response.headers.get("content-type")) != null ? _a12 : void 0
741
+ mimeType: (_a10 = response.headers.get("content-type")) != null ? _a10 : void 0
741
742
  };
742
743
  } catch (error) {
743
744
  if (DownloadError.isInstance(error)) {
@@ -815,8 +816,8 @@ var dataContentSchema = import_zod.z.union([
815
816
  import_zod.z.custom(
816
817
  // Buffer might not be available in some environments such as CloudFlare:
817
818
  (value) => {
818
- var _a12, _b;
819
- return (_b = (_a12 = globalThis.Buffer) == null ? void 0 : _a12.isBuffer(value)) != null ? _b : false;
819
+ var _a10, _b;
820
+ return (_b = (_a10 = globalThis.Buffer) == null ? void 0 : _a10.isBuffer(value)) != null ? _b : false;
820
821
  },
821
822
  { message: "Must be a Buffer" }
822
823
  )
@@ -944,7 +945,7 @@ function convertToLanguageModelMessage(message, downloadedImages) {
944
945
  role: "user",
945
946
  content: message.content.map(
946
947
  (part) => {
947
- var _a12, _b, _c;
948
+ var _a10, _b, _c;
948
949
  switch (part.type) {
949
950
  case "text": {
950
951
  return {
@@ -967,7 +968,7 @@ function convertToLanguageModelMessage(message, downloadedImages) {
967
968
  return {
968
969
  type: "image",
969
970
  image: downloadedImage.data,
970
- mimeType: (_a12 = part.mimeType) != null ? _a12 : downloadedImage.mimeType,
971
+ mimeType: (_a10 = part.mimeType) != null ? _a10 : downloadedImage.mimeType,
971
972
  providerMetadata: part.experimental_providerMetadata
972
973
  };
973
974
  }
@@ -1399,8 +1400,8 @@ function prepareResponseHeaders(init, {
1399
1400
  contentType,
1400
1401
  dataStreamVersion
1401
1402
  }) {
1402
- var _a12;
1403
- const headers = new Headers((_a12 = init == null ? void 0 : init.headers) != null ? _a12 : {});
1403
+ var _a10;
1404
+ const headers = new Headers((_a10 = init == null ? void 0 : init.headers) != null ? _a10 : {});
1404
1405
  if (!headers.has("Content-Type")) {
1405
1406
  headers.set("Content-Type", contentType);
1406
1407
  }
@@ -1709,7 +1710,7 @@ async function generateObject({
1709
1710
  experimental_telemetry: telemetry,
1710
1711
  ...settings
1711
1712
  }) {
1712
- var _a12;
1713
+ var _a10;
1713
1714
  validateObjectGenerationInput({
1714
1715
  output,
1715
1716
  mode,
@@ -1727,7 +1728,7 @@ async function generateObject({
1727
1728
  headers,
1728
1729
  settings: { ...settings, maxRetries }
1729
1730
  });
1730
- const tracer = getTracer({ isEnabled: (_a12 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a12 : false });
1731
+ const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
1731
1732
  return recordSpan({
1732
1733
  name: "ai.generateObject",
1733
1734
  attributes: selectTelemetryAttributes({
@@ -1888,7 +1889,7 @@ async function generateObject({
1888
1889
  }),
1889
1890
  tracer,
1890
1891
  fn: async (span2) => {
1891
- var _a13, _b;
1892
+ var _a11, _b;
1892
1893
  const result2 = await model.doGenerate({
1893
1894
  mode: {
1894
1895
  type: "object-tool",
@@ -1905,7 +1906,7 @@ async function generateObject({
1905
1906
  abortSignal,
1906
1907
  headers
1907
1908
  });
1908
- const objectText = (_b = (_a13 = result2.toolCalls) == null ? void 0 : _a13[0]) == null ? void 0 : _b.args;
1909
+ const objectText = (_b = (_a11 = result2.toolCalls) == null ? void 0 : _a11[0]) == null ? void 0 : _b.args;
1909
1910
  if (objectText === void 0) {
1910
1911
  throw new NoObjectGeneratedError();
1911
1912
  }
@@ -1993,9 +1994,9 @@ var DefaultGenerateObjectResult = class {
1993
1994
  this.experimental_providerMetadata = options.providerMetadata;
1994
1995
  }
1995
1996
  toJsonResponse(init) {
1996
- var _a12;
1997
+ var _a10;
1997
1998
  return new Response(JSON.stringify(this.object), {
1998
- status: (_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200,
1999
+ status: (_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200,
1999
2000
  headers: prepareResponseHeaders(init, {
2000
2001
  contentType: "application/json; charset=utf-8"
2001
2002
  })
@@ -2045,17 +2046,17 @@ var DelayedPromise = class {
2045
2046
  return this.promise;
2046
2047
  }
2047
2048
  resolve(value) {
2048
- var _a12;
2049
+ var _a10;
2049
2050
  this.status = { type: "resolved", value };
2050
2051
  if (this.promise) {
2051
- (_a12 = this._resolve) == null ? void 0 : _a12.call(this, value);
2052
+ (_a10 = this._resolve) == null ? void 0 : _a10.call(this, value);
2052
2053
  }
2053
2054
  }
2054
2055
  reject(error) {
2055
- var _a12;
2056
+ var _a10;
2056
2057
  this.status = { type: "rejected", error };
2057
2058
  if (this.promise) {
2058
- (_a12 = this._reject) == null ? void 0 : _a12.call(this, error);
2059
+ (_a10 = this._reject) == null ? void 0 : _a10.call(this, error);
2059
2060
  }
2060
2061
  }
2061
2062
  };
@@ -2078,7 +2079,7 @@ async function streamObject({
2078
2079
  onFinish,
2079
2080
  ...settings
2080
2081
  }) {
2081
- var _a12;
2082
+ var _a10;
2082
2083
  validateObjectGenerationInput({
2083
2084
  output,
2084
2085
  mode,
@@ -2096,7 +2097,7 @@ async function streamObject({
2096
2097
  headers,
2097
2098
  settings: { ...settings, maxRetries }
2098
2099
  });
2099
- const tracer = getTracer({ isEnabled: (_a12 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a12 : false });
2100
+ const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
2100
2101
  const retry = retryWithExponentialBackoff({ maxRetries });
2101
2102
  return recordSpan({
2102
2103
  name: "ai.streamObject",
@@ -2485,8 +2486,8 @@ var DefaultStreamObjectResult = class {
2485
2486
  });
2486
2487
  }
2487
2488
  pipeTextStreamToResponse(response, init) {
2488
- var _a12;
2489
- response.writeHead((_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200, {
2489
+ var _a10;
2490
+ response.writeHead((_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200, {
2490
2491
  "Content-Type": "text/plain; charset=utf-8",
2491
2492
  ...init == null ? void 0 : init.headers
2492
2493
  });
@@ -2508,9 +2509,9 @@ var DefaultStreamObjectResult = class {
2508
2509
  read();
2509
2510
  }
2510
2511
  toTextStreamResponse(init) {
2511
- var _a12;
2512
+ var _a10;
2512
2513
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
2513
- status: (_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200,
2514
+ status: (_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200,
2514
2515
  headers: prepareResponseHeaders(init, {
2515
2516
  contentType: "text/plain; charset=utf-8"
2516
2517
  })
@@ -2539,9 +2540,9 @@ function prepareToolsAndToolChoice({
2539
2540
  };
2540
2541
  }
2541
2542
  return {
2542
- tools: Object.entries(tools).map(([name12, tool2]) => ({
2543
+ tools: Object.entries(tools).map(([name10, tool2]) => ({
2543
2544
  type: "function",
2544
- name: name12,
2545
+ name: name10,
2545
2546
  description: tool2.description,
2546
2547
  parameters: (0, import_ui_utils3.asSchema)(tool2.parameters).jsonSchema
2547
2548
  })),
@@ -2690,14 +2691,14 @@ async function generateText({
2690
2691
  experimental_telemetry: telemetry,
2691
2692
  ...settings
2692
2693
  }) {
2693
- var _a12;
2694
+ var _a10;
2694
2695
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
2695
2696
  model,
2696
2697
  telemetry,
2697
2698
  headers,
2698
2699
  settings: { ...settings, maxRetries }
2699
2700
  });
2700
- const tracer = getTracer({ isEnabled: (_a12 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a12 : false });
2701
+ const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
2701
2702
  return recordSpan({
2702
2703
  name: "ai.generateText",
2703
2704
  attributes: selectTelemetryAttributes({
@@ -2717,7 +2718,7 @@ async function generateText({
2717
2718
  }),
2718
2719
  tracer,
2719
2720
  fn: async (span) => {
2720
- var _a13, _b, _c, _d;
2721
+ var _a11, _b, _c, _d;
2721
2722
  const retry = retryWithExponentialBackoff({ maxRetries });
2722
2723
  const validatedPrompt = validatePrompt({
2723
2724
  system,
@@ -2803,7 +2804,7 @@ async function generateText({
2803
2804
  }
2804
2805
  })
2805
2806
  );
2806
- currentToolCalls = ((_a13 = currentModelResponse.toolCalls) != null ? _a13 : []).map(
2807
+ currentToolCalls = ((_a11 = currentModelResponse.toolCalls) != null ? _a11 : []).map(
2807
2808
  (modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
2808
2809
  );
2809
2810
  currentToolResults = tools == null ? [] : await executeTools({
@@ -3271,14 +3272,14 @@ async function streamText({
3271
3272
  onFinish,
3272
3273
  ...settings
3273
3274
  }) {
3274
- var _a12;
3275
+ var _a10;
3275
3276
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
3276
3277
  model,
3277
3278
  telemetry,
3278
3279
  headers,
3279
3280
  settings: { ...settings, maxRetries }
3280
3281
  });
3281
- const tracer = getTracer({ isEnabled: (_a12 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a12 : false });
3282
+ const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
3282
3283
  return recordSpan({
3283
3284
  name: "ai.streamText",
3284
3285
  attributes: selectTelemetryAttributes({
@@ -3655,8 +3656,8 @@ var DefaultStreamTextResult = class {
3655
3656
  return this.pipeDataStreamToResponse(response, init);
3656
3657
  }
3657
3658
  pipeDataStreamToResponse(response, init) {
3658
- var _a12;
3659
- response.writeHead((_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200, {
3659
+ var _a10;
3660
+ response.writeHead((_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200, {
3660
3661
  "Content-Type": "text/plain; charset=utf-8",
3661
3662
  ...init == null ? void 0 : init.headers
3662
3663
  });
@@ -3678,8 +3679,8 @@ var DefaultStreamTextResult = class {
3678
3679
  read();
3679
3680
  }
3680
3681
  pipeTextStreamToResponse(response, init) {
3681
- var _a12;
3682
- response.writeHead((_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200, {
3682
+ var _a10;
3683
+ response.writeHead((_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200, {
3683
3684
  "Content-Type": "text/plain; charset=utf-8",
3684
3685
  ...init == null ? void 0 : init.headers
3685
3686
  });
@@ -3704,7 +3705,7 @@ var DefaultStreamTextResult = class {
3704
3705
  return this.toDataStreamResponse(options);
3705
3706
  }
3706
3707
  toDataStreamResponse(options) {
3707
- var _a12;
3708
+ var _a10;
3708
3709
  const init = options == null ? void 0 : "init" in options ? options.init : {
3709
3710
  headers: "headers" in options ? options.headers : void 0,
3710
3711
  status: "status" in options ? options.status : void 0,
@@ -3714,7 +3715,7 @@ var DefaultStreamTextResult = class {
3714
3715
  const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
3715
3716
  const stream = data ? mergeStreams(data.stream, this.toDataStream({ getErrorMessage: getErrorMessage4 })) : this.toDataStream({ getErrorMessage: getErrorMessage4 });
3716
3717
  return new Response(stream, {
3717
- status: (_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200,
3718
+ status: (_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200,
3718
3719
  statusText: init == null ? void 0 : init.statusText,
3719
3720
  headers: prepareResponseHeaders(init, {
3720
3721
  contentType: "text/plain; charset=utf-8",
@@ -3723,9 +3724,9 @@ var DefaultStreamTextResult = class {
3723
3724
  });
3724
3725
  }
3725
3726
  toTextStreamResponse(init) {
3726
- var _a12;
3727
+ var _a10;
3727
3728
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
3728
- status: (_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200,
3729
+ status: (_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200,
3729
3730
  headers: prepareResponseHeaders(init, {
3730
3731
  contentType: "text/plain; charset=utf-8"
3731
3732
  })
@@ -3736,7 +3737,7 @@ var experimental_streamText = streamText;
3736
3737
 
3737
3738
  // core/prompt/attachments-to-parts.ts
3738
3739
  function attachmentsToParts(attachments) {
3739
- var _a12, _b, _c;
3740
+ var _a10, _b, _c;
3740
3741
  const parts = [];
3741
3742
  for (const attachment of attachments) {
3742
3743
  let url;
@@ -3748,7 +3749,7 @@ function attachmentsToParts(attachments) {
3748
3749
  switch (url.protocol) {
3749
3750
  case "http:":
3750
3751
  case "https:": {
3751
- if ((_a12 = attachment.contentType) == null ? void 0 : _a12.startsWith("image/")) {
3752
+ if ((_a10 = attachment.contentType) == null ? void 0 : _a10.startsWith("image/")) {
3752
3753
  parts.push({ type: "image", image: url });
3753
3754
  }
3754
3755
  break;
@@ -3856,110 +3857,62 @@ function convertToCoreMessages(messages) {
3856
3857
  return coreMessages;
3857
3858
  }
3858
3859
 
3859
- // core/registry/invalid-model-id-error.ts
3860
+ // core/registry/custom-provider.ts
3860
3861
  var import_provider12 = require("@ai-sdk/provider");
3861
- var name9 = "AI_InvalidModelIdError";
3862
+ function experimental_customProvider({
3863
+ languageModels,
3864
+ textEmbeddingModels,
3865
+ fallbackProvider
3866
+ }) {
3867
+ return {
3868
+ languageModel(modelId) {
3869
+ if (languageModels != null && modelId in languageModels) {
3870
+ return languageModels[modelId];
3871
+ }
3872
+ if (fallbackProvider) {
3873
+ return fallbackProvider.languageModel(modelId);
3874
+ }
3875
+ throw new import_provider12.NoSuchModelError({ modelId, modelType: "languageModel" });
3876
+ },
3877
+ textEmbeddingModel(modelId) {
3878
+ if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
3879
+ return textEmbeddingModels[modelId];
3880
+ }
3881
+ if (fallbackProvider) {
3882
+ return fallbackProvider.textEmbeddingModel(modelId);
3883
+ }
3884
+ throw new import_provider12.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
3885
+ }
3886
+ };
3887
+ }
3888
+
3889
+ // core/registry/no-such-provider-error.ts
3890
+ var import_provider13 = require("@ai-sdk/provider");
3891
+ var name9 = "AI_NoSuchProviderError";
3862
3892
  var marker9 = `vercel.ai.error.${name9}`;
3863
3893
  var symbol9 = Symbol.for(marker9);
3864
3894
  var _a9;
3865
- var InvalidModelIdError = class extends import_provider12.AISDKError {
3866
- constructor({
3867
- id,
3868
- message = `Invalid model id: ${id}`
3869
- }) {
3870
- super({ name: name9, message });
3871
- this[_a9] = true;
3872
- this.id = id;
3873
- }
3874
- static isInstance(error) {
3875
- return import_provider12.AISDKError.hasMarker(error, marker9);
3876
- }
3877
- /**
3878
- * @deprecated use `isInstance` instead
3879
- */
3880
- static isInvalidModelIdError(error) {
3881
- return error instanceof Error && error.name === name9 && typeof error.id === "string";
3882
- }
3883
- /**
3884
- * @deprecated Do not use this method. It will be removed in the next major version.
3885
- */
3886
- toJSON() {
3887
- return {
3888
- name: this.name,
3889
- message: this.message,
3890
- stack: this.stack,
3891
- id: this.id
3892
- };
3893
- }
3894
- };
3895
- _a9 = symbol9;
3896
-
3897
- // core/registry/no-such-model-error.ts
3898
- var import_provider13 = require("@ai-sdk/provider");
3899
- var name10 = "AI_NoSuchModelError";
3900
- var marker10 = `vercel.ai.error.${name10}`;
3901
- var symbol10 = Symbol.for(marker10);
3902
- var _a10;
3903
- var NoSuchModelError = class extends import_provider13.AISDKError {
3895
+ var NoSuchProviderError = class extends import_provider13.NoSuchModelError {
3904
3896
  constructor({
3905
3897
  modelId,
3906
3898
  modelType,
3907
- message = `No such ${modelType}: ${modelId}`
3908
- }) {
3909
- super({ name: name10, message });
3910
- this[_a10] = true;
3911
- this.modelId = modelId;
3912
- this.modelType = modelType;
3913
- }
3914
- static isInstance(error) {
3915
- return import_provider13.AISDKError.hasMarker(error, marker10);
3916
- }
3917
- /**
3918
- * @deprecated use `isInstance` instead
3919
- */
3920
- static isNoSuchModelError(error) {
3921
- return error instanceof Error && error.name === name10 && typeof error.modelId === "string" && typeof error.modelType === "string";
3922
- }
3923
- /**
3924
- * @deprecated Do not use this method. It will be removed in the next major version.
3925
- */
3926
- toJSON() {
3927
- return {
3928
- name: this.name,
3929
- message: this.message,
3930
- stack: this.stack,
3931
- modelId: this.modelId,
3932
- modelType: this.modelType
3933
- };
3934
- }
3935
- };
3936
- _a10 = symbol10;
3937
-
3938
- // core/registry/no-such-provider-error.ts
3939
- var import_provider14 = require("@ai-sdk/provider");
3940
- var name11 = "AI_NoSuchProviderError";
3941
- var marker11 = `vercel.ai.error.${name11}`;
3942
- var symbol11 = Symbol.for(marker11);
3943
- var _a11;
3944
- var NoSuchProviderError = class extends import_provider14.AISDKError {
3945
- constructor({
3946
3899
  providerId,
3947
3900
  availableProviders,
3948
3901
  message = `No such provider: ${providerId} (available providers: ${availableProviders.join()})`
3949
3902
  }) {
3950
- super({ name: name11, message });
3951
- this[_a11] = true;
3903
+ super({ errorName: name9, modelId, modelType, message });
3904
+ this[_a9] = true;
3952
3905
  this.providerId = providerId;
3953
3906
  this.availableProviders = availableProviders;
3954
3907
  }
3955
3908
  static isInstance(error) {
3956
- return import_provider14.AISDKError.hasMarker(error, marker11);
3909
+ return import_provider13.AISDKError.hasMarker(error, marker9);
3957
3910
  }
3958
3911
  /**
3959
3912
  * @deprecated use `isInstance` instead
3960
3913
  */
3961
3914
  static isNoSuchProviderError(error) {
3962
- return error instanceof Error && error.name === name11 && typeof error.providerId === "string" && Array.isArray(error.availableProviders);
3915
+ return error instanceof Error && error.name === name9 && typeof error.providerId === "string" && Array.isArray(error.availableProviders);
3963
3916
  }
3964
3917
  /**
3965
3918
  * @deprecated Do not use this method. It will be removed in the next major version.
@@ -3969,14 +3922,17 @@ var NoSuchProviderError = class extends import_provider14.AISDKError {
3969
3922
  name: this.name,
3970
3923
  message: this.message,
3971
3924
  stack: this.stack,
3925
+ modelId: this.modelId,
3926
+ modelType: this.modelType,
3972
3927
  providerId: this.providerId,
3973
3928
  availableProviders: this.availableProviders
3974
3929
  };
3975
3930
  }
3976
3931
  };
3977
- _a11 = symbol11;
3932
+ _a9 = symbol9;
3978
3933
 
3979
3934
  // core/registry/provider-registry.ts
3935
+ var import_provider14 = require("@ai-sdk/provider");
3980
3936
  function experimental_createProviderRegistry(providers) {
3981
3937
  const registry = new DefaultProviderRegistry();
3982
3938
  for (const [id, provider] of Object.entries(providers)) {
@@ -3999,35 +3955,41 @@ var DefaultProviderRegistry = class {
3999
3955
  const provider = this.providers[id];
4000
3956
  if (provider == null) {
4001
3957
  throw new NoSuchProviderError({
3958
+ modelId: id,
3959
+ modelType: "languageModel",
4002
3960
  providerId: id,
4003
3961
  availableProviders: Object.keys(this.providers)
4004
3962
  });
4005
3963
  }
4006
3964
  return provider;
4007
3965
  }
4008
- splitId(id) {
3966
+ splitId(id, modelType) {
4009
3967
  const index = id.indexOf(":");
4010
3968
  if (index === -1) {
4011
- throw new InvalidModelIdError({ id });
3969
+ throw new import_provider14.NoSuchModelError({
3970
+ modelId: id,
3971
+ modelType,
3972
+ message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
3973
+ });
4012
3974
  }
4013
3975
  return [id.slice(0, index), id.slice(index + 1)];
4014
3976
  }
4015
3977
  languageModel(id) {
4016
- var _a12, _b;
4017
- const [providerId, modelId] = this.splitId(id);
4018
- const model = (_b = (_a12 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a12, modelId);
3978
+ var _a10, _b;
3979
+ const [providerId, modelId] = this.splitId(id, "languageModel");
3980
+ const model = (_b = (_a10 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a10, modelId);
4019
3981
  if (model == null) {
4020
- throw new NoSuchModelError({ modelId: id, modelType: "languageModel" });
3982
+ throw new import_provider14.NoSuchModelError({ modelId: id, modelType: "languageModel" });
4021
3983
  }
4022
3984
  return model;
4023
3985
  }
4024
3986
  textEmbeddingModel(id) {
4025
- var _a12, _b, _c;
4026
- const [providerId, modelId] = this.splitId(id);
3987
+ var _a10, _b, _c;
3988
+ const [providerId, modelId] = this.splitId(id, "textEmbeddingModel");
4027
3989
  const provider = this.getProvider(providerId);
4028
- const model = (_c = (_a12 = provider.textEmbeddingModel) == null ? void 0 : _a12.call(provider, modelId)) != null ? _c : (_b = provider.textEmbedding) == null ? void 0 : _b.call(provider, modelId);
3990
+ const model = (_c = (_a10 = provider.textEmbeddingModel) == null ? void 0 : _a10.call(provider, modelId)) != null ? _c : "textEmbedding" in provider ? (_b = provider.textEmbedding) == null ? void 0 : _b.call(provider, modelId) : void 0;
4029
3991
  if (model == null) {
4030
- throw new NoSuchModelError({
3992
+ throw new import_provider14.NoSuchModelError({
4031
3993
  modelId: id,
4032
3994
  modelType: "textEmbeddingModel"
4033
3995
  });
@@ -4185,8 +4147,8 @@ function readableFromAsyncIterable(iterable) {
4185
4147
  controller.enqueue(value);
4186
4148
  },
4187
4149
  async cancel(reason) {
4188
- var _a12;
4189
- await ((_a12 = it.return) == null ? void 0 : _a12.call(it, reason));
4150
+ var _a10;
4151
+ await ((_a10 = it.return) == null ? void 0 : _a10.call(it, reason));
4190
4152
  }
4191
4153
  });
4192
4154
  }
@@ -4323,7 +4285,7 @@ var import_ui_utils8 = require("@ai-sdk/ui-utils");
4323
4285
  function AssistantResponse({ threadId, messageId }, process2) {
4324
4286
  const stream = new ReadableStream({
4325
4287
  async start(controller) {
4326
- var _a12;
4288
+ var _a10;
4327
4289
  const textEncoder = new TextEncoder();
4328
4290
  const sendMessage = (message) => {
4329
4291
  controller.enqueue(
@@ -4341,7 +4303,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4341
4303
  );
4342
4304
  };
4343
4305
  const forwardStream = async (stream2) => {
4344
- var _a13, _b;
4306
+ var _a11, _b;
4345
4307
  let result = void 0;
4346
4308
  for await (const value of stream2) {
4347
4309
  switch (value.event) {
@@ -4358,7 +4320,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4358
4320
  break;
4359
4321
  }
4360
4322
  case "thread.message.delta": {
4361
- const content = (_a13 = value.data.delta.content) == null ? void 0 : _a13[0];
4323
+ const content = (_a11 = value.data.delta.content) == null ? void 0 : _a11[0];
4362
4324
  if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
4363
4325
  controller.enqueue(
4364
4326
  textEncoder.encode(
@@ -4394,7 +4356,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4394
4356
  forwardStream
4395
4357
  });
4396
4358
  } catch (error) {
4397
- sendError((_a12 = error.message) != null ? _a12 : `${error}`);
4359
+ sendError((_a10 = error.message) != null ? _a10 : `${error}`);
4398
4360
  } finally {
4399
4361
  controller.close();
4400
4362
  }
@@ -4415,9 +4377,9 @@ var experimental_AssistantResponse = AssistantResponse;
4415
4377
 
4416
4378
  // streams/aws-bedrock-stream.ts
4417
4379
  async function* asDeltaIterable(response, extractTextDeltaFromChunk) {
4418
- var _a12, _b;
4380
+ var _a10, _b;
4419
4381
  const decoder = new TextDecoder();
4420
- for await (const chunk of (_a12 = response.body) != null ? _a12 : []) {
4382
+ for await (const chunk of (_a10 = response.body) != null ? _a10 : []) {
4421
4383
  const bytes = (_b = chunk.chunk) == null ? void 0 : _b.bytes;
4422
4384
  if (bytes != null) {
4423
4385
  const chunkText = decoder.decode(bytes);
@@ -4431,8 +4393,8 @@ async function* asDeltaIterable(response, extractTextDeltaFromChunk) {
4431
4393
  }
4432
4394
  function AWSBedrockAnthropicMessagesStream(response, callbacks) {
4433
4395
  return AWSBedrockStream(response, callbacks, (chunk) => {
4434
- var _a12;
4435
- return (_a12 = chunk.delta) == null ? void 0 : _a12.text;
4396
+ var _a10;
4397
+ return (_a10 = chunk.delta) == null ? void 0 : _a10.text;
4436
4398
  });
4437
4399
  }
4438
4400
  function AWSBedrockAnthropicStream(response, callbacks) {
@@ -4479,8 +4441,8 @@ async function readAndProcessLines(reader, controller) {
4479
4441
  controller.close();
4480
4442
  }
4481
4443
  function createParser2(res) {
4482
- var _a12;
4483
- const reader = (_a12 = res.body) == null ? void 0 : _a12.getReader();
4444
+ var _a10;
4445
+ const reader = (_a10 = res.body) == null ? void 0 : _a10.getReader();
4484
4446
  return new ReadableStream({
4485
4447
  async start(controller) {
4486
4448
  if (!reader) {
@@ -4510,9 +4472,9 @@ function CohereStream(reader, callbacks) {
4510
4472
 
4511
4473
  // streams/google-generative-ai-stream.ts
4512
4474
  async function* streamable3(response) {
4513
- var _a12, _b, _c;
4475
+ var _a10, _b, _c;
4514
4476
  for await (const chunk of response.stream) {
4515
- const parts = (_c = (_b = (_a12 = chunk.candidates) == null ? void 0 : _a12[0]) == null ? void 0 : _b.content) == null ? void 0 : _c.parts;
4477
+ const parts = (_c = (_b = (_a10 = chunk.candidates) == null ? void 0 : _a10[0]) == null ? void 0 : _b.content) == null ? void 0 : _c.parts;
4516
4478
  if (parts === void 0) {
4517
4479
  continue;
4518
4480
  }
@@ -4531,13 +4493,13 @@ function createParser3(res) {
4531
4493
  const trimStartOfStream = trimStartOfStreamHelper();
4532
4494
  return new ReadableStream({
4533
4495
  async pull(controller) {
4534
- var _a12, _b;
4496
+ var _a10, _b;
4535
4497
  const { value, done } = await res.next();
4536
4498
  if (done) {
4537
4499
  controller.close();
4538
4500
  return;
4539
4501
  }
4540
- const text = trimStartOfStream((_b = (_a12 = value.token) == null ? void 0 : _a12.text) != null ? _b : "");
4502
+ const text = trimStartOfStream((_b = (_a10 = value.token) == null ? void 0 : _a10.text) != null ? _b : "");
4541
4503
  if (!text)
4542
4504
  return;
4543
4505
  if (value.generated_text != null && value.generated_text.length > 0) {
@@ -4562,11 +4524,11 @@ function InkeepStream(res, callbacks) {
4562
4524
  let chat_session_id = "";
4563
4525
  let records_cited;
4564
4526
  const inkeepEventParser = (data, options) => {
4565
- var _a12, _b;
4527
+ var _a10, _b;
4566
4528
  const { event } = options;
4567
4529
  if (event === "records_cited") {
4568
4530
  records_cited = JSON.parse(data);
4569
- (_a12 = callbacks == null ? void 0 : callbacks.onRecordsCited) == null ? void 0 : _a12.call(callbacks, records_cited);
4531
+ (_a10 = callbacks == null ? void 0 : callbacks.onRecordsCited) == null ? void 0 : _a10.call(callbacks, records_cited);
4570
4532
  }
4571
4533
  if (event === "message_chunk") {
4572
4534
  const inkeepMessageChunk = JSON.parse(data);
@@ -4579,12 +4541,12 @@ function InkeepStream(res, callbacks) {
4579
4541
  passThroughCallbacks = {
4580
4542
  ...passThroughCallbacks,
4581
4543
  onFinal: (completion) => {
4582
- var _a12;
4544
+ var _a10;
4583
4545
  const inkeepOnFinalMetadata = {
4584
4546
  chat_session_id,
4585
4547
  records_cited
4586
4548
  };
4587
- (_a12 = callbacks == null ? void 0 : callbacks.onFinal) == null ? void 0 : _a12.call(callbacks, completion, inkeepOnFinalMetadata);
4549
+ (_a10 = callbacks == null ? void 0 : callbacks.onFinal) == null ? void 0 : _a10.call(callbacks, completion, inkeepOnFinalMetadata);
4588
4550
  }
4589
4551
  };
4590
4552
  return AIStream(res, inkeepEventParser, passThroughCallbacks).pipeThrough(
@@ -4606,7 +4568,7 @@ function toDataStream(stream, callbacks) {
4606
4568
  return stream.pipeThrough(
4607
4569
  new TransformStream({
4608
4570
  transform: async (value, controller) => {
4609
- var _a12;
4571
+ var _a10;
4610
4572
  if (typeof value === "string") {
4611
4573
  controller.enqueue(value);
4612
4574
  return;
@@ -4614,7 +4576,7 @@ function toDataStream(stream, callbacks) {
4614
4576
  if ("event" in value) {
4615
4577
  if (value.event === "on_chat_model_stream") {
4616
4578
  forwardAIMessageChunk(
4617
- (_a12 = value.data) == null ? void 0 : _a12.chunk,
4579
+ (_a10 = value.data) == null ? void 0 : _a10.chunk,
4618
4580
  controller
4619
4581
  );
4620
4582
  }
@@ -4626,13 +4588,13 @@ function toDataStream(stream, callbacks) {
4626
4588
  ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(createStreamDataTransformer());
4627
4589
  }
4628
4590
  function toDataStreamResponse(stream, options) {
4629
- var _a12;
4591
+ var _a10;
4630
4592
  const dataStream = toDataStream(stream, options == null ? void 0 : options.callbacks);
4631
4593
  const data = options == null ? void 0 : options.data;
4632
4594
  const init = options == null ? void 0 : options.init;
4633
4595
  const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
4634
4596
  return new Response(responseStream, {
4635
- status: (_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200,
4597
+ status: (_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200,
4636
4598
  statusText: init == null ? void 0 : init.statusText,
4637
4599
  headers: prepareResponseHeaders(init, {
4638
4600
  contentType: "text/plain; charset=utf-8",
@@ -4714,9 +4676,9 @@ function LangChainStream(callbacks) {
4714
4676
 
4715
4677
  // streams/mistral-stream.ts
4716
4678
  async function* streamable4(stream) {
4717
- var _a12, _b;
4679
+ var _a10, _b;
4718
4680
  for await (const chunk of stream) {
4719
- const content = (_b = (_a12 = chunk.choices[0]) == null ? void 0 : _a12.delta) == null ? void 0 : _b.content;
4681
+ const content = (_b = (_a10 = chunk.choices[0]) == null ? void 0 : _a10.delta) == null ? void 0 : _b.content;
4720
4682
  if (content === void 0 || content === "") {
4721
4683
  continue;
4722
4684
  }
@@ -4746,10 +4708,10 @@ async function* streamable5(stream) {
4746
4708
  model: chunk.model,
4747
4709
  // not exposed by Azure API
4748
4710
  choices: chunk.choices.map((choice) => {
4749
- var _a12, _b, _c, _d, _e, _f, _g;
4711
+ var _a10, _b, _c, _d, _e, _f, _g;
4750
4712
  return {
4751
4713
  delta: {
4752
- content: (_a12 = choice.delta) == null ? void 0 : _a12.content,
4714
+ content: (_a10 = choice.delta) == null ? void 0 : _a10.content,
4753
4715
  function_call: (_b = choice.delta) == null ? void 0 : _b.functionCall,
4754
4716
  role: (_c = choice.delta) == null ? void 0 : _c.role,
4755
4717
  tool_calls: ((_e = (_d = choice.delta) == null ? void 0 : _d.toolCalls) == null ? void 0 : _e.length) ? (_g = (_f = choice.delta) == null ? void 0 : _f.toolCalls) == null ? void 0 : _g.map((toolCall, index) => ({
@@ -4774,9 +4736,9 @@ function chunkToText() {
4774
4736
  const trimStartOfStream = trimStartOfStreamHelper();
4775
4737
  let isFunctionStreamingIn;
4776
4738
  return (json) => {
4777
- var _a12, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
4739
+ var _a10, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
4778
4740
  if (isChatCompletionChunk(json)) {
4779
- const delta = (_a12 = json.choices[0]) == null ? void 0 : _a12.delta;
4741
+ const delta = (_a10 = json.choices[0]) == null ? void 0 : _a10.delta;
4780
4742
  if ((_b = delta.function_call) == null ? void 0 : _b.name) {
4781
4743
  isFunctionStreamingIn = true;
4782
4744
  return {
@@ -5049,8 +5011,8 @@ function createFunctionCallTransformer(callbacks) {
5049
5011
 
5050
5012
  // streams/replicate-stream.ts
5051
5013
  async function ReplicateStream(res, cb, options) {
5052
- var _a12;
5053
- const url = (_a12 = res.urls) == null ? void 0 : _a12.stream;
5014
+ var _a10;
5015
+ const url = (_a10 = res.urls) == null ? void 0 : _a10.stream;
5054
5016
  if (!url) {
5055
5017
  if (res.error)
5056
5018
  throw new Error(res.error);
@@ -5071,8 +5033,8 @@ async function ReplicateStream(res, cb, options) {
5071
5033
 
5072
5034
  // streams/stream-to-response.ts
5073
5035
  function streamToResponse(res, response, init, data) {
5074
- var _a12;
5075
- response.writeHead((_a12 = init == null ? void 0 : init.status) != null ? _a12 : 200, {
5036
+ var _a10;
5037
+ response.writeHead((_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200, {
5076
5038
  "Content-Type": "text/plain; charset=utf-8",
5077
5039
  ...init == null ? void 0 : init.headers
5078
5040
  });
@@ -5135,7 +5097,6 @@ var nanoid = import_provider_utils8.generateId;
5135
5097
  InvalidArgumentError,
5136
5098
  InvalidDataContentError,
5137
5099
  InvalidMessageRoleError,
5138
- InvalidModelIdError,
5139
5100
  InvalidPromptError,
5140
5101
  InvalidResponseDataError,
5141
5102
  InvalidToolArgumentsError,
@@ -5144,6 +5105,7 @@ var nanoid = import_provider_utils8.generateId;
5144
5105
  LangChainStream,
5145
5106
  LoadAPIKeyError,
5146
5107
  MistralStream,
5108
+ NoContentGeneratedError,
5147
5109
  NoObjectGeneratedError,
5148
5110
  NoSuchModelError,
5149
5111
  NoSuchProviderError,
@@ -5166,6 +5128,7 @@ var nanoid = import_provider_utils8.generateId;
5166
5128
  experimental_StreamData,
5167
5129
  experimental_createModelRegistry,
5168
5130
  experimental_createProviderRegistry,
5131
+ experimental_customProvider,
5169
5132
  experimental_generateObject,
5170
5133
  experimental_generateText,
5171
5134
  experimental_streamObject,