ai 3.3.18 → 3.3.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -4,8 +4,8 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
4
  var __getOwnPropNames = Object.getOwnPropertyNames;
5
5
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
6
  var __export = (target, all) => {
7
- for (var name10 in all)
8
- __defProp(target, name10, { get: all[name10], enumerable: true });
7
+ for (var name11 in all)
8
+ __defProp(target, name11, { get: all[name11], enumerable: true });
9
9
  };
10
10
  var __copyProps = (to, from, except, desc) => {
11
11
  if (from && typeof from === "object" || typeof from === "function") {
@@ -20,9 +20,9 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
20
20
  // streams/index.ts
21
21
  var streams_exports = {};
22
22
  __export(streams_exports, {
23
- AISDKError: () => import_provider15.AISDKError,
23
+ AISDKError: () => import_provider16.AISDKError,
24
24
  AIStream: () => AIStream,
25
- APICallError: () => import_provider15.APICallError,
25
+ APICallError: () => import_provider16.APICallError,
26
26
  AWSBedrockAnthropicMessagesStream: () => AWSBedrockAnthropicMessagesStream,
27
27
  AWSBedrockAnthropicStream: () => AWSBedrockAnthropicStream,
28
28
  AWSBedrockCohereStream: () => AWSBedrockCohereStream,
@@ -32,24 +32,25 @@ __export(streams_exports, {
32
32
  AssistantResponse: () => AssistantResponse,
33
33
  CohereStream: () => CohereStream,
34
34
  DownloadError: () => DownloadError,
35
- EmptyResponseBodyError: () => import_provider15.EmptyResponseBodyError,
35
+ EmptyResponseBodyError: () => import_provider16.EmptyResponseBodyError,
36
36
  GoogleGenerativeAIStream: () => GoogleGenerativeAIStream,
37
37
  HuggingFaceStream: () => HuggingFaceStream,
38
38
  InkeepStream: () => InkeepStream,
39
39
  InvalidArgumentError: () => InvalidArgumentError,
40
40
  InvalidDataContentError: () => InvalidDataContentError,
41
41
  InvalidMessageRoleError: () => InvalidMessageRoleError,
42
- InvalidPromptError: () => import_provider15.InvalidPromptError,
43
- InvalidResponseDataError: () => import_provider15.InvalidResponseDataError,
42
+ InvalidPromptError: () => import_provider16.InvalidPromptError,
43
+ InvalidResponseDataError: () => import_provider16.InvalidResponseDataError,
44
44
  InvalidToolArgumentsError: () => InvalidToolArgumentsError,
45
- JSONParseError: () => import_provider15.JSONParseError,
45
+ JSONParseError: () => import_provider16.JSONParseError,
46
46
  LangChainAdapter: () => langchain_adapter_exports,
47
47
  LangChainStream: () => LangChainStream,
48
- LoadAPIKeyError: () => import_provider15.LoadAPIKeyError,
48
+ LoadAPIKeyError: () => import_provider16.LoadAPIKeyError,
49
+ MessageConversionError: () => MessageConversionError,
49
50
  MistralStream: () => MistralStream,
50
- NoContentGeneratedError: () => import_provider15.NoContentGeneratedError,
51
+ NoContentGeneratedError: () => import_provider16.NoContentGeneratedError,
51
52
  NoObjectGeneratedError: () => NoObjectGeneratedError,
52
- NoSuchModelError: () => import_provider15.NoSuchModelError,
53
+ NoSuchModelError: () => import_provider16.NoSuchModelError,
53
54
  NoSuchProviderError: () => NoSuchProviderError,
54
55
  NoSuchToolError: () => NoSuchToolError,
55
56
  OpenAIStream: () => OpenAIStream,
@@ -57,8 +58,8 @@ __export(streams_exports, {
57
58
  RetryError: () => RetryError,
58
59
  StreamData: () => StreamData2,
59
60
  StreamingTextResponse: () => StreamingTextResponse,
60
- TypeValidationError: () => import_provider15.TypeValidationError,
61
- UnsupportedFunctionalityError: () => import_provider15.UnsupportedFunctionalityError,
61
+ TypeValidationError: () => import_provider16.TypeValidationError,
62
+ UnsupportedFunctionalityError: () => import_provider16.UnsupportedFunctionalityError,
62
63
  convertToCoreMessages: () => convertToCoreMessages,
63
64
  cosineSimilarity: () => cosineSimilarity,
64
65
  createCallbacksTransformer: () => createCallbacksTransformer,
@@ -224,7 +225,7 @@ function getBaseTelemetryAttributes({
224
225
  telemetry,
225
226
  headers
226
227
  }) {
227
- var _a10;
228
+ var _a11;
228
229
  return {
229
230
  "ai.model.provider": model.provider,
230
231
  "ai.model.id": model.modelId,
@@ -234,7 +235,7 @@ function getBaseTelemetryAttributes({
234
235
  return attributes;
235
236
  }, {}),
236
237
  // add metadata as attributes:
237
- ...Object.entries((_a10 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a10 : {}).reduce(
238
+ ...Object.entries((_a11 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a11 : {}).reduce(
238
239
  (attributes, [key, value]) => {
239
240
  attributes[`ai.telemetry.metadata.${key}`] = value;
240
241
  return attributes;
@@ -259,7 +260,7 @@ var noopTracer = {
259
260
  startSpan() {
260
261
  return noopSpan;
261
262
  },
262
- startActiveSpan(name10, arg1, arg2, arg3) {
263
+ startActiveSpan(name11, arg1, arg2, arg3) {
263
264
  if (typeof arg1 === "function") {
264
265
  return arg1(noopSpan);
265
266
  }
@@ -327,13 +328,13 @@ function getTracer({ isEnabled }) {
327
328
  // core/telemetry/record-span.ts
328
329
  var import_api2 = require("@opentelemetry/api");
329
330
  function recordSpan({
330
- name: name10,
331
+ name: name11,
331
332
  tracer,
332
333
  attributes,
333
334
  fn,
334
335
  endWhenDone = true
335
336
  }) {
336
- return tracer.startActiveSpan(name10, { attributes }, async (span) => {
337
+ return tracer.startActiveSpan(name11, { attributes }, async (span) => {
337
338
  try {
338
339
  const result = await fn(span);
339
340
  if (endWhenDone) {
@@ -399,14 +400,14 @@ async function embed({
399
400
  headers,
400
401
  experimental_telemetry: telemetry
401
402
  }) {
402
- var _a10;
403
+ var _a11;
403
404
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
404
405
  model,
405
406
  telemetry,
406
407
  headers,
407
408
  settings: { maxRetries }
408
409
  });
409
- const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
410
+ const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
410
411
  return recordSpan({
411
412
  name: "ai.embed",
412
413
  attributes: selectTelemetryAttributes({
@@ -439,14 +440,14 @@ async function embed({
439
440
  }),
440
441
  tracer,
441
442
  fn: async (doEmbedSpan) => {
442
- var _a11;
443
+ var _a12;
443
444
  const modelResponse = await model.doEmbed({
444
445
  values: [value],
445
446
  abortSignal,
446
447
  headers
447
448
  });
448
449
  const embedding2 = modelResponse.embeddings[0];
449
- const usage2 = (_a11 = modelResponse.usage) != null ? _a11 : { tokens: NaN };
450
+ const usage2 = (_a12 = modelResponse.usage) != null ? _a12 : { tokens: NaN };
450
451
  doEmbedSpan.setAttributes(
451
452
  selectTelemetryAttributes({
452
453
  telemetry,
@@ -512,14 +513,14 @@ async function embedMany({
512
513
  headers,
513
514
  experimental_telemetry: telemetry
514
515
  }) {
515
- var _a10;
516
+ var _a11;
516
517
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
517
518
  model,
518
519
  telemetry,
519
520
  headers,
520
521
  settings: { maxRetries }
521
522
  });
522
- const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
523
+ const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
523
524
  return recordSpan({
524
525
  name: "ai.embedMany",
525
526
  attributes: selectTelemetryAttributes({
@@ -557,14 +558,14 @@ async function embedMany({
557
558
  }),
558
559
  tracer,
559
560
  fn: async (doEmbedSpan) => {
560
- var _a11;
561
+ var _a12;
561
562
  const modelResponse = await model.doEmbed({
562
563
  values,
563
564
  abortSignal,
564
565
  headers
565
566
  });
566
567
  const embeddings3 = modelResponse.embeddings;
567
- const usage2 = (_a11 = modelResponse.usage) != null ? _a11 : { tokens: NaN };
568
+ const usage2 = (_a12 = modelResponse.usage) != null ? _a12 : { tokens: NaN };
568
569
  doEmbedSpan.setAttributes(
569
570
  selectTelemetryAttributes({
570
571
  telemetry,
@@ -616,14 +617,14 @@ async function embedMany({
616
617
  }),
617
618
  tracer,
618
619
  fn: async (doEmbedSpan) => {
619
- var _a11;
620
+ var _a12;
620
621
  const modelResponse = await model.doEmbed({
621
622
  values: chunk,
622
623
  abortSignal,
623
624
  headers
624
625
  });
625
626
  const embeddings2 = modelResponse.embeddings;
626
- const usage2 = (_a11 = modelResponse.usage) != null ? _a11 : { tokens: NaN };
627
+ const usage2 = (_a12 = modelResponse.usage) != null ? _a12 : { tokens: NaN };
627
628
  doEmbedSpan.setAttributes(
628
629
  selectTelemetryAttributes({
629
630
  telemetry,
@@ -725,7 +726,7 @@ async function download({
725
726
  url,
726
727
  fetchImplementation = fetch
727
728
  }) {
728
- var _a10;
729
+ var _a11;
729
730
  const urlText = url.toString();
730
731
  try {
731
732
  const response = await fetchImplementation(urlText);
@@ -738,7 +739,7 @@ async function download({
738
739
  }
739
740
  return {
740
741
  data: new Uint8Array(await response.arrayBuffer()),
741
- mimeType: (_a10 = response.headers.get("content-type")) != null ? _a10 : void 0
742
+ mimeType: (_a11 = response.headers.get("content-type")) != null ? _a11 : void 0
742
743
  };
743
744
  } catch (error) {
744
745
  if (DownloadError.isInstance(error)) {
@@ -816,8 +817,8 @@ var dataContentSchema = import_zod.z.union([
816
817
  import_zod.z.custom(
817
818
  // Buffer might not be available in some environments such as CloudFlare:
818
819
  (value) => {
819
- var _a10, _b;
820
- return (_b = (_a10 = globalThis.Buffer) == null ? void 0 : _a10.isBuffer(value)) != null ? _b : false;
820
+ var _a11, _b;
821
+ return (_b = (_a11 = globalThis.Buffer) == null ? void 0 : _a11.isBuffer(value)) != null ? _b : false;
821
822
  },
822
823
  { message: "Must be a Buffer" }
823
824
  )
@@ -945,7 +946,7 @@ function convertToLanguageModelMessage(message, downloadedImages) {
945
946
  role: "user",
946
947
  content: message.content.map(
947
948
  (part) => {
948
- var _a10, _b, _c;
949
+ var _a11, _b, _c;
949
950
  switch (part.type) {
950
951
  case "text": {
951
952
  return {
@@ -968,7 +969,7 @@ function convertToLanguageModelMessage(message, downloadedImages) {
968
969
  return {
969
970
  type: "image",
970
971
  image: downloadedImage.data,
971
- mimeType: (_a10 = part.mimeType) != null ? _a10 : downloadedImage.mimeType,
972
+ mimeType: (_a11 = part.mimeType) != null ? _a11 : downloadedImage.mimeType,
972
973
  providerMetadata: part.experimental_providerMetadata
973
974
  };
974
975
  }
@@ -1400,8 +1401,8 @@ function prepareResponseHeaders(init, {
1400
1401
  contentType,
1401
1402
  dataStreamVersion
1402
1403
  }) {
1403
- var _a10;
1404
- const headers = new Headers((_a10 = init == null ? void 0 : init.headers) != null ? _a10 : {});
1404
+ var _a11;
1405
+ const headers = new Headers((_a11 = init == null ? void 0 : init.headers) != null ? _a11 : {});
1405
1406
  if (!headers.has("Content-Type")) {
1406
1407
  headers.set("Content-Type", contentType);
1407
1408
  }
@@ -1710,7 +1711,7 @@ async function generateObject({
1710
1711
  experimental_telemetry: telemetry,
1711
1712
  ...settings
1712
1713
  }) {
1713
- var _a10;
1714
+ var _a11;
1714
1715
  validateObjectGenerationInput({
1715
1716
  output,
1716
1717
  mode,
@@ -1728,7 +1729,7 @@ async function generateObject({
1728
1729
  headers,
1729
1730
  settings: { ...settings, maxRetries }
1730
1731
  });
1731
- const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
1732
+ const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
1732
1733
  return recordSpan({
1733
1734
  name: "ai.generateObject",
1734
1735
  attributes: selectTelemetryAttributes({
@@ -1889,7 +1890,7 @@ async function generateObject({
1889
1890
  }),
1890
1891
  tracer,
1891
1892
  fn: async (span2) => {
1892
- var _a11, _b;
1893
+ var _a12, _b;
1893
1894
  const result2 = await model.doGenerate({
1894
1895
  mode: {
1895
1896
  type: "object-tool",
@@ -1906,7 +1907,7 @@ async function generateObject({
1906
1907
  abortSignal,
1907
1908
  headers
1908
1909
  });
1909
- const objectText = (_b = (_a11 = result2.toolCalls) == null ? void 0 : _a11[0]) == null ? void 0 : _b.args;
1910
+ const objectText = (_b = (_a12 = result2.toolCalls) == null ? void 0 : _a12[0]) == null ? void 0 : _b.args;
1910
1911
  if (objectText === void 0) {
1911
1912
  throw new NoObjectGeneratedError();
1912
1913
  }
@@ -1994,9 +1995,9 @@ var DefaultGenerateObjectResult = class {
1994
1995
  this.experimental_providerMetadata = options.providerMetadata;
1995
1996
  }
1996
1997
  toJsonResponse(init) {
1997
- var _a10;
1998
+ var _a11;
1998
1999
  return new Response(JSON.stringify(this.object), {
1999
- status: (_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200,
2000
+ status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
2000
2001
  headers: prepareResponseHeaders(init, {
2001
2002
  contentType: "application/json; charset=utf-8"
2002
2003
  })
@@ -2046,17 +2047,17 @@ var DelayedPromise = class {
2046
2047
  return this.promise;
2047
2048
  }
2048
2049
  resolve(value) {
2049
- var _a10;
2050
+ var _a11;
2050
2051
  this.status = { type: "resolved", value };
2051
2052
  if (this.promise) {
2052
- (_a10 = this._resolve) == null ? void 0 : _a10.call(this, value);
2053
+ (_a11 = this._resolve) == null ? void 0 : _a11.call(this, value);
2053
2054
  }
2054
2055
  }
2055
2056
  reject(error) {
2056
- var _a10;
2057
+ var _a11;
2057
2058
  this.status = { type: "rejected", error };
2058
2059
  if (this.promise) {
2059
- (_a10 = this._reject) == null ? void 0 : _a10.call(this, error);
2060
+ (_a11 = this._reject) == null ? void 0 : _a11.call(this, error);
2060
2061
  }
2061
2062
  }
2062
2063
  };
@@ -2079,7 +2080,7 @@ async function streamObject({
2079
2080
  onFinish,
2080
2081
  ...settings
2081
2082
  }) {
2082
- var _a10;
2083
+ var _a11;
2083
2084
  validateObjectGenerationInput({
2084
2085
  output,
2085
2086
  mode,
@@ -2097,7 +2098,7 @@ async function streamObject({
2097
2098
  headers,
2098
2099
  settings: { ...settings, maxRetries }
2099
2100
  });
2100
- const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
2101
+ const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
2101
2102
  const retry = retryWithExponentialBackoff({ maxRetries });
2102
2103
  return recordSpan({
2103
2104
  name: "ai.streamObject",
@@ -2486,8 +2487,8 @@ var DefaultStreamObjectResult = class {
2486
2487
  });
2487
2488
  }
2488
2489
  pipeTextStreamToResponse(response, init) {
2489
- var _a10;
2490
- response.writeHead((_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200, {
2490
+ var _a11;
2491
+ response.writeHead((_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200, {
2491
2492
  "Content-Type": "text/plain; charset=utf-8",
2492
2493
  ...init == null ? void 0 : init.headers
2493
2494
  });
@@ -2509,9 +2510,9 @@ var DefaultStreamObjectResult = class {
2509
2510
  read();
2510
2511
  }
2511
2512
  toTextStreamResponse(init) {
2512
- var _a10;
2513
+ var _a11;
2513
2514
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
2514
- status: (_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200,
2515
+ status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
2515
2516
  headers: prepareResponseHeaders(init, {
2516
2517
  contentType: "text/plain; charset=utf-8"
2517
2518
  })
@@ -2540,9 +2541,9 @@ function prepareToolsAndToolChoice({
2540
2541
  };
2541
2542
  }
2542
2543
  return {
2543
- tools: Object.entries(tools).map(([name10, tool2]) => ({
2544
+ tools: Object.entries(tools).map(([name11, tool2]) => ({
2544
2545
  type: "function",
2545
- name: name10,
2546
+ name: name11,
2546
2547
  description: tool2.description,
2547
2548
  parameters: (0, import_ui_utils3.asSchema)(tool2.parameters).jsonSchema
2548
2549
  })),
@@ -2691,14 +2692,14 @@ async function generateText({
2691
2692
  experimental_telemetry: telemetry,
2692
2693
  ...settings
2693
2694
  }) {
2694
- var _a10;
2695
+ var _a11;
2695
2696
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
2696
2697
  model,
2697
2698
  telemetry,
2698
2699
  headers,
2699
2700
  settings: { ...settings, maxRetries }
2700
2701
  });
2701
- const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
2702
+ const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
2702
2703
  return recordSpan({
2703
2704
  name: "ai.generateText",
2704
2705
  attributes: selectTelemetryAttributes({
@@ -2718,7 +2719,7 @@ async function generateText({
2718
2719
  }),
2719
2720
  tracer,
2720
2721
  fn: async (span) => {
2721
- var _a11, _b, _c, _d;
2722
+ var _a12, _b, _c, _d;
2722
2723
  const retry = retryWithExponentialBackoff({ maxRetries });
2723
2724
  const validatedPrompt = validatePrompt({
2724
2725
  system,
@@ -2804,7 +2805,7 @@ async function generateText({
2804
2805
  }
2805
2806
  })
2806
2807
  );
2807
- currentToolCalls = ((_a11 = currentModelResponse.toolCalls) != null ? _a11 : []).map(
2808
+ currentToolCalls = ((_a12 = currentModelResponse.toolCalls) != null ? _a12 : []).map(
2808
2809
  (modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
2809
2810
  );
2810
2811
  currentToolResults = tools == null ? [] : await executeTools({
@@ -3272,14 +3273,14 @@ async function streamText({
3272
3273
  onFinish,
3273
3274
  ...settings
3274
3275
  }) {
3275
- var _a10;
3276
+ var _a11;
3276
3277
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
3277
3278
  model,
3278
3279
  telemetry,
3279
3280
  headers,
3280
3281
  settings: { ...settings, maxRetries }
3281
3282
  });
3282
- const tracer = getTracer({ isEnabled: (_a10 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a10 : false });
3283
+ const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
3283
3284
  return recordSpan({
3284
3285
  name: "ai.streamText",
3285
3286
  attributes: selectTelemetryAttributes({
@@ -3656,8 +3657,8 @@ var DefaultStreamTextResult = class {
3656
3657
  return this.pipeDataStreamToResponse(response, init);
3657
3658
  }
3658
3659
  pipeDataStreamToResponse(response, init) {
3659
- var _a10;
3660
- response.writeHead((_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200, {
3660
+ var _a11;
3661
+ response.writeHead((_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200, {
3661
3662
  "Content-Type": "text/plain; charset=utf-8",
3662
3663
  ...init == null ? void 0 : init.headers
3663
3664
  });
@@ -3679,8 +3680,8 @@ var DefaultStreamTextResult = class {
3679
3680
  read();
3680
3681
  }
3681
3682
  pipeTextStreamToResponse(response, init) {
3682
- var _a10;
3683
- response.writeHead((_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200, {
3683
+ var _a11;
3684
+ response.writeHead((_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200, {
3684
3685
  "Content-Type": "text/plain; charset=utf-8",
3685
3686
  ...init == null ? void 0 : init.headers
3686
3687
  });
@@ -3705,7 +3706,7 @@ var DefaultStreamTextResult = class {
3705
3706
  return this.toDataStreamResponse(options);
3706
3707
  }
3707
3708
  toDataStreamResponse(options) {
3708
- var _a10;
3709
+ var _a11;
3709
3710
  const init = options == null ? void 0 : "init" in options ? options.init : {
3710
3711
  headers: "headers" in options ? options.headers : void 0,
3711
3712
  status: "status" in options ? options.status : void 0,
@@ -3715,7 +3716,7 @@ var DefaultStreamTextResult = class {
3715
3716
  const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
3716
3717
  const stream = data ? mergeStreams(data.stream, this.toDataStream({ getErrorMessage: getErrorMessage4 })) : this.toDataStream({ getErrorMessage: getErrorMessage4 });
3717
3718
  return new Response(stream, {
3718
- status: (_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200,
3719
+ status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
3719
3720
  statusText: init == null ? void 0 : init.statusText,
3720
3721
  headers: prepareResponseHeaders(init, {
3721
3722
  contentType: "text/plain; charset=utf-8",
@@ -3724,9 +3725,9 @@ var DefaultStreamTextResult = class {
3724
3725
  });
3725
3726
  }
3726
3727
  toTextStreamResponse(init) {
3727
- var _a10;
3728
+ var _a11;
3728
3729
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
3729
- status: (_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200,
3730
+ status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
3730
3731
  headers: prepareResponseHeaders(init, {
3731
3732
  contentType: "text/plain; charset=utf-8"
3732
3733
  })
@@ -3737,7 +3738,7 @@ var experimental_streamText = streamText;
3737
3738
 
3738
3739
  // core/prompt/attachments-to-parts.ts
3739
3740
  function attachmentsToParts(attachments) {
3740
- var _a10, _b, _c;
3741
+ var _a11, _b, _c;
3741
3742
  const parts = [];
3742
3743
  for (const attachment of attachments) {
3743
3744
  let url;
@@ -3749,7 +3750,7 @@ function attachmentsToParts(attachments) {
3749
3750
  switch (url.protocol) {
3750
3751
  case "http:":
3751
3752
  case "https:": {
3752
- if ((_a10 = attachment.contentType) == null ? void 0 : _a10.startsWith("image/")) {
3753
+ if ((_a11 = attachment.contentType) == null ? void 0 : _a11.startsWith("image/")) {
3753
3754
  parts.push({ type: "image", image: url });
3754
3755
  }
3755
3756
  break;
@@ -3790,15 +3791,32 @@ function attachmentsToParts(attachments) {
3790
3791
  return parts;
3791
3792
  }
3792
3793
 
3794
+ // core/prompt/message-conversion-error.ts
3795
+ var import_provider12 = require("@ai-sdk/provider");
3796
+ var name9 = "AI_MessageConversionError";
3797
+ var marker9 = `vercel.ai.error.${name9}`;
3798
+ var symbol9 = Symbol.for(marker9);
3799
+ var _a9;
3800
+ var MessageConversionError = class extends import_provider12.AISDKError {
3801
+ constructor({
3802
+ originalMessage,
3803
+ message
3804
+ }) {
3805
+ super({ name: name9, message });
3806
+ this[_a9] = true;
3807
+ this.originalMessage = originalMessage;
3808
+ }
3809
+ static isInstance(error) {
3810
+ return import_provider12.AISDKError.hasMarker(error, marker9);
3811
+ }
3812
+ };
3813
+ _a9 = symbol9;
3814
+
3793
3815
  // core/prompt/convert-to-core-messages.ts
3794
3816
  function convertToCoreMessages(messages) {
3795
3817
  const coreMessages = [];
3796
- for (const {
3797
- role,
3798
- content,
3799
- toolInvocations,
3800
- experimental_attachments
3801
- } of messages) {
3818
+ for (const message of messages) {
3819
+ const { role, content, toolInvocations, experimental_attachments } = message;
3802
3820
  switch (role) {
3803
3821
  case "system": {
3804
3822
  coreMessages.push({
@@ -3836,21 +3854,36 @@ function convertToCoreMessages(messages) {
3836
3854
  });
3837
3855
  coreMessages.push({
3838
3856
  role: "tool",
3839
- content: toolInvocations.map(
3840
- ({ toolCallId, toolName, args, result }) => ({
3857
+ content: toolInvocations.map((ToolInvocation) => {
3858
+ if (!("result" in ToolInvocation)) {
3859
+ throw new MessageConversionError({
3860
+ originalMessage: message,
3861
+ message: "ToolInvocation must have a result: " + JSON.stringify(ToolInvocation)
3862
+ });
3863
+ }
3864
+ const { toolCallId, toolName, args, result } = ToolInvocation;
3865
+ return {
3841
3866
  type: "tool-result",
3842
3867
  toolCallId,
3843
3868
  toolName,
3844
3869
  args,
3845
3870
  result
3846
- })
3847
- )
3871
+ };
3872
+ })
3848
3873
  });
3849
3874
  break;
3850
3875
  }
3876
+ case "function":
3877
+ case "data":
3878
+ case "tool": {
3879
+ break;
3880
+ }
3851
3881
  default: {
3852
3882
  const _exhaustiveCheck = role;
3853
- throw new Error(`Unhandled role: ${_exhaustiveCheck}`);
3883
+ throw new MessageConversionError({
3884
+ originalMessage: message,
3885
+ message: `Unsupported role: ${_exhaustiveCheck}`
3886
+ });
3854
3887
  }
3855
3888
  }
3856
3889
  }
@@ -3858,7 +3891,7 @@ function convertToCoreMessages(messages) {
3858
3891
  }
3859
3892
 
3860
3893
  // core/registry/custom-provider.ts
3861
- var import_provider12 = require("@ai-sdk/provider");
3894
+ var import_provider13 = require("@ai-sdk/provider");
3862
3895
  function experimental_customProvider({
3863
3896
  languageModels,
3864
3897
  textEmbeddingModels,
@@ -3872,7 +3905,7 @@ function experimental_customProvider({
3872
3905
  if (fallbackProvider) {
3873
3906
  return fallbackProvider.languageModel(modelId);
3874
3907
  }
3875
- throw new import_provider12.NoSuchModelError({ modelId, modelType: "languageModel" });
3908
+ throw new import_provider13.NoSuchModelError({ modelId, modelType: "languageModel" });
3876
3909
  },
3877
3910
  textEmbeddingModel(modelId) {
3878
3911
  if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
@@ -3881,18 +3914,18 @@ function experimental_customProvider({
3881
3914
  if (fallbackProvider) {
3882
3915
  return fallbackProvider.textEmbeddingModel(modelId);
3883
3916
  }
3884
- throw new import_provider12.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
3917
+ throw new import_provider13.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
3885
3918
  }
3886
3919
  };
3887
3920
  }
3888
3921
 
3889
3922
  // core/registry/no-such-provider-error.ts
3890
- var import_provider13 = require("@ai-sdk/provider");
3891
- var name9 = "AI_NoSuchProviderError";
3892
- var marker9 = `vercel.ai.error.${name9}`;
3893
- var symbol9 = Symbol.for(marker9);
3894
- var _a9;
3895
- var NoSuchProviderError = class extends import_provider13.NoSuchModelError {
3923
+ var import_provider14 = require("@ai-sdk/provider");
3924
+ var name10 = "AI_NoSuchProviderError";
3925
+ var marker10 = `vercel.ai.error.${name10}`;
3926
+ var symbol10 = Symbol.for(marker10);
3927
+ var _a10;
3928
+ var NoSuchProviderError = class extends import_provider14.NoSuchModelError {
3896
3929
  constructor({
3897
3930
  modelId,
3898
3931
  modelType,
@@ -3900,19 +3933,19 @@ var NoSuchProviderError = class extends import_provider13.NoSuchModelError {
3900
3933
  availableProviders,
3901
3934
  message = `No such provider: ${providerId} (available providers: ${availableProviders.join()})`
3902
3935
  }) {
3903
- super({ errorName: name9, modelId, modelType, message });
3904
- this[_a9] = true;
3936
+ super({ errorName: name10, modelId, modelType, message });
3937
+ this[_a10] = true;
3905
3938
  this.providerId = providerId;
3906
3939
  this.availableProviders = availableProviders;
3907
3940
  }
3908
3941
  static isInstance(error) {
3909
- return import_provider13.AISDKError.hasMarker(error, marker9);
3942
+ return import_provider14.AISDKError.hasMarker(error, marker10);
3910
3943
  }
3911
3944
  /**
3912
3945
  * @deprecated use `isInstance` instead
3913
3946
  */
3914
3947
  static isNoSuchProviderError(error) {
3915
- return error instanceof Error && error.name === name9 && typeof error.providerId === "string" && Array.isArray(error.availableProviders);
3948
+ return error instanceof Error && error.name === name10 && typeof error.providerId === "string" && Array.isArray(error.availableProviders);
3916
3949
  }
3917
3950
  /**
3918
3951
  * @deprecated Do not use this method. It will be removed in the next major version.
@@ -3929,10 +3962,10 @@ var NoSuchProviderError = class extends import_provider13.NoSuchModelError {
3929
3962
  };
3930
3963
  }
3931
3964
  };
3932
- _a9 = symbol9;
3965
+ _a10 = symbol10;
3933
3966
 
3934
3967
  // core/registry/provider-registry.ts
3935
- var import_provider14 = require("@ai-sdk/provider");
3968
+ var import_provider15 = require("@ai-sdk/provider");
3936
3969
  function experimental_createProviderRegistry(providers) {
3937
3970
  const registry = new DefaultProviderRegistry();
3938
3971
  for (const [id, provider] of Object.entries(providers)) {
@@ -3966,7 +3999,7 @@ var DefaultProviderRegistry = class {
3966
3999
  splitId(id, modelType) {
3967
4000
  const index = id.indexOf(":");
3968
4001
  if (index === -1) {
3969
- throw new import_provider14.NoSuchModelError({
4002
+ throw new import_provider15.NoSuchModelError({
3970
4003
  modelId: id,
3971
4004
  modelType,
3972
4005
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
@@ -3975,21 +4008,21 @@ var DefaultProviderRegistry = class {
3975
4008
  return [id.slice(0, index), id.slice(index + 1)];
3976
4009
  }
3977
4010
  languageModel(id) {
3978
- var _a10, _b;
4011
+ var _a11, _b;
3979
4012
  const [providerId, modelId] = this.splitId(id, "languageModel");
3980
- const model = (_b = (_a10 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a10, modelId);
4013
+ const model = (_b = (_a11 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a11, modelId);
3981
4014
  if (model == null) {
3982
- throw new import_provider14.NoSuchModelError({ modelId: id, modelType: "languageModel" });
4015
+ throw new import_provider15.NoSuchModelError({ modelId: id, modelType: "languageModel" });
3983
4016
  }
3984
4017
  return model;
3985
4018
  }
3986
4019
  textEmbeddingModel(id) {
3987
- var _a10, _b, _c;
4020
+ var _a11, _b, _c;
3988
4021
  const [providerId, modelId] = this.splitId(id, "textEmbeddingModel");
3989
4022
  const provider = this.getProvider(providerId);
3990
- const model = (_c = (_a10 = provider.textEmbeddingModel) == null ? void 0 : _a10.call(provider, modelId)) != null ? _c : "textEmbedding" in provider ? (_b = provider.textEmbedding) == null ? void 0 : _b.call(provider, modelId) : void 0;
4023
+ const model = (_c = (_a11 = provider.textEmbeddingModel) == null ? void 0 : _a11.call(provider, modelId)) != null ? _c : "textEmbedding" in provider ? (_b = provider.textEmbedding) == null ? void 0 : _b.call(provider, modelId) : void 0;
3991
4024
  if (model == null) {
3992
- throw new import_provider14.NoSuchModelError({
4025
+ throw new import_provider15.NoSuchModelError({
3993
4026
  modelId: id,
3994
4027
  modelType: "textEmbeddingModel"
3995
4028
  });
@@ -4029,7 +4062,7 @@ function magnitude(vector) {
4029
4062
  }
4030
4063
 
4031
4064
  // errors/index.ts
4032
- var import_provider15 = require("@ai-sdk/provider");
4065
+ var import_provider16 = require("@ai-sdk/provider");
4033
4066
 
4034
4067
  // streams/ai-stream.ts
4035
4068
  var import_eventsource_parser = require("eventsource-parser");
@@ -4147,8 +4180,8 @@ function readableFromAsyncIterable(iterable) {
4147
4180
  controller.enqueue(value);
4148
4181
  },
4149
4182
  async cancel(reason) {
4150
- var _a10;
4151
- await ((_a10 = it.return) == null ? void 0 : _a10.call(it, reason));
4183
+ var _a11;
4184
+ await ((_a11 = it.return) == null ? void 0 : _a11.call(it, reason));
4152
4185
  }
4153
4186
  });
4154
4187
  }
@@ -4285,7 +4318,7 @@ var import_ui_utils8 = require("@ai-sdk/ui-utils");
4285
4318
  function AssistantResponse({ threadId, messageId }, process2) {
4286
4319
  const stream = new ReadableStream({
4287
4320
  async start(controller) {
4288
- var _a10;
4321
+ var _a11;
4289
4322
  const textEncoder = new TextEncoder();
4290
4323
  const sendMessage = (message) => {
4291
4324
  controller.enqueue(
@@ -4303,7 +4336,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4303
4336
  );
4304
4337
  };
4305
4338
  const forwardStream = async (stream2) => {
4306
- var _a11, _b;
4339
+ var _a12, _b;
4307
4340
  let result = void 0;
4308
4341
  for await (const value of stream2) {
4309
4342
  switch (value.event) {
@@ -4320,7 +4353,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4320
4353
  break;
4321
4354
  }
4322
4355
  case "thread.message.delta": {
4323
- const content = (_a11 = value.data.delta.content) == null ? void 0 : _a11[0];
4356
+ const content = (_a12 = value.data.delta.content) == null ? void 0 : _a12[0];
4324
4357
  if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
4325
4358
  controller.enqueue(
4326
4359
  textEncoder.encode(
@@ -4356,7 +4389,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4356
4389
  forwardStream
4357
4390
  });
4358
4391
  } catch (error) {
4359
- sendError((_a10 = error.message) != null ? _a10 : `${error}`);
4392
+ sendError((_a11 = error.message) != null ? _a11 : `${error}`);
4360
4393
  } finally {
4361
4394
  controller.close();
4362
4395
  }
@@ -4377,9 +4410,9 @@ var experimental_AssistantResponse = AssistantResponse;
4377
4410
 
4378
4411
  // streams/aws-bedrock-stream.ts
4379
4412
  async function* asDeltaIterable(response, extractTextDeltaFromChunk) {
4380
- var _a10, _b;
4413
+ var _a11, _b;
4381
4414
  const decoder = new TextDecoder();
4382
- for await (const chunk of (_a10 = response.body) != null ? _a10 : []) {
4415
+ for await (const chunk of (_a11 = response.body) != null ? _a11 : []) {
4383
4416
  const bytes = (_b = chunk.chunk) == null ? void 0 : _b.bytes;
4384
4417
  if (bytes != null) {
4385
4418
  const chunkText = decoder.decode(bytes);
@@ -4393,8 +4426,8 @@ async function* asDeltaIterable(response, extractTextDeltaFromChunk) {
4393
4426
  }
4394
4427
  function AWSBedrockAnthropicMessagesStream(response, callbacks) {
4395
4428
  return AWSBedrockStream(response, callbacks, (chunk) => {
4396
- var _a10;
4397
- return (_a10 = chunk.delta) == null ? void 0 : _a10.text;
4429
+ var _a11;
4430
+ return (_a11 = chunk.delta) == null ? void 0 : _a11.text;
4398
4431
  });
4399
4432
  }
4400
4433
  function AWSBedrockAnthropicStream(response, callbacks) {
@@ -4441,8 +4474,8 @@ async function readAndProcessLines(reader, controller) {
4441
4474
  controller.close();
4442
4475
  }
4443
4476
  function createParser2(res) {
4444
- var _a10;
4445
- const reader = (_a10 = res.body) == null ? void 0 : _a10.getReader();
4477
+ var _a11;
4478
+ const reader = (_a11 = res.body) == null ? void 0 : _a11.getReader();
4446
4479
  return new ReadableStream({
4447
4480
  async start(controller) {
4448
4481
  if (!reader) {
@@ -4472,9 +4505,9 @@ function CohereStream(reader, callbacks) {
4472
4505
 
4473
4506
  // streams/google-generative-ai-stream.ts
4474
4507
  async function* streamable3(response) {
4475
- var _a10, _b, _c;
4508
+ var _a11, _b, _c;
4476
4509
  for await (const chunk of response.stream) {
4477
- const parts = (_c = (_b = (_a10 = chunk.candidates) == null ? void 0 : _a10[0]) == null ? void 0 : _b.content) == null ? void 0 : _c.parts;
4510
+ const parts = (_c = (_b = (_a11 = chunk.candidates) == null ? void 0 : _a11[0]) == null ? void 0 : _b.content) == null ? void 0 : _c.parts;
4478
4511
  if (parts === void 0) {
4479
4512
  continue;
4480
4513
  }
@@ -4493,13 +4526,13 @@ function createParser3(res) {
4493
4526
  const trimStartOfStream = trimStartOfStreamHelper();
4494
4527
  return new ReadableStream({
4495
4528
  async pull(controller) {
4496
- var _a10, _b;
4529
+ var _a11, _b;
4497
4530
  const { value, done } = await res.next();
4498
4531
  if (done) {
4499
4532
  controller.close();
4500
4533
  return;
4501
4534
  }
4502
- const text = trimStartOfStream((_b = (_a10 = value.token) == null ? void 0 : _a10.text) != null ? _b : "");
4535
+ const text = trimStartOfStream((_b = (_a11 = value.token) == null ? void 0 : _a11.text) != null ? _b : "");
4503
4536
  if (!text)
4504
4537
  return;
4505
4538
  if (value.generated_text != null && value.generated_text.length > 0) {
@@ -4524,11 +4557,11 @@ function InkeepStream(res, callbacks) {
4524
4557
  let chat_session_id = "";
4525
4558
  let records_cited;
4526
4559
  const inkeepEventParser = (data, options) => {
4527
- var _a10, _b;
4560
+ var _a11, _b;
4528
4561
  const { event } = options;
4529
4562
  if (event === "records_cited") {
4530
4563
  records_cited = JSON.parse(data);
4531
- (_a10 = callbacks == null ? void 0 : callbacks.onRecordsCited) == null ? void 0 : _a10.call(callbacks, records_cited);
4564
+ (_a11 = callbacks == null ? void 0 : callbacks.onRecordsCited) == null ? void 0 : _a11.call(callbacks, records_cited);
4532
4565
  }
4533
4566
  if (event === "message_chunk") {
4534
4567
  const inkeepMessageChunk = JSON.parse(data);
@@ -4541,12 +4574,12 @@ function InkeepStream(res, callbacks) {
4541
4574
  passThroughCallbacks = {
4542
4575
  ...passThroughCallbacks,
4543
4576
  onFinal: (completion) => {
4544
- var _a10;
4577
+ var _a11;
4545
4578
  const inkeepOnFinalMetadata = {
4546
4579
  chat_session_id,
4547
4580
  records_cited
4548
4581
  };
4549
- (_a10 = callbacks == null ? void 0 : callbacks.onFinal) == null ? void 0 : _a10.call(callbacks, completion, inkeepOnFinalMetadata);
4582
+ (_a11 = callbacks == null ? void 0 : callbacks.onFinal) == null ? void 0 : _a11.call(callbacks, completion, inkeepOnFinalMetadata);
4550
4583
  }
4551
4584
  };
4552
4585
  return AIStream(res, inkeepEventParser, passThroughCallbacks).pipeThrough(
@@ -4568,7 +4601,7 @@ function toDataStream(stream, callbacks) {
4568
4601
  return stream.pipeThrough(
4569
4602
  new TransformStream({
4570
4603
  transform: async (value, controller) => {
4571
- var _a10;
4604
+ var _a11;
4572
4605
  if (typeof value === "string") {
4573
4606
  controller.enqueue(value);
4574
4607
  return;
@@ -4576,7 +4609,7 @@ function toDataStream(stream, callbacks) {
4576
4609
  if ("event" in value) {
4577
4610
  if (value.event === "on_chat_model_stream") {
4578
4611
  forwardAIMessageChunk(
4579
- (_a10 = value.data) == null ? void 0 : _a10.chunk,
4612
+ (_a11 = value.data) == null ? void 0 : _a11.chunk,
4580
4613
  controller
4581
4614
  );
4582
4615
  }
@@ -4588,13 +4621,13 @@ function toDataStream(stream, callbacks) {
4588
4621
  ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(createStreamDataTransformer());
4589
4622
  }
4590
4623
  function toDataStreamResponse(stream, options) {
4591
- var _a10;
4624
+ var _a11;
4592
4625
  const dataStream = toDataStream(stream, options == null ? void 0 : options.callbacks);
4593
4626
  const data = options == null ? void 0 : options.data;
4594
4627
  const init = options == null ? void 0 : options.init;
4595
4628
  const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
4596
4629
  return new Response(responseStream, {
4597
- status: (_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200,
4630
+ status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
4598
4631
  statusText: init == null ? void 0 : init.statusText,
4599
4632
  headers: prepareResponseHeaders(init, {
4600
4633
  contentType: "text/plain; charset=utf-8",
@@ -4676,9 +4709,9 @@ function LangChainStream(callbacks) {
4676
4709
 
4677
4710
  // streams/mistral-stream.ts
4678
4711
  async function* streamable4(stream) {
4679
- var _a10, _b;
4712
+ var _a11, _b;
4680
4713
  for await (const chunk of stream) {
4681
- const content = (_b = (_a10 = chunk.choices[0]) == null ? void 0 : _a10.delta) == null ? void 0 : _b.content;
4714
+ const content = (_b = (_a11 = chunk.choices[0]) == null ? void 0 : _a11.delta) == null ? void 0 : _b.content;
4682
4715
  if (content === void 0 || content === "") {
4683
4716
  continue;
4684
4717
  }
@@ -4708,10 +4741,10 @@ async function* streamable5(stream) {
4708
4741
  model: chunk.model,
4709
4742
  // not exposed by Azure API
4710
4743
  choices: chunk.choices.map((choice) => {
4711
- var _a10, _b, _c, _d, _e, _f, _g;
4744
+ var _a11, _b, _c, _d, _e, _f, _g;
4712
4745
  return {
4713
4746
  delta: {
4714
- content: (_a10 = choice.delta) == null ? void 0 : _a10.content,
4747
+ content: (_a11 = choice.delta) == null ? void 0 : _a11.content,
4715
4748
  function_call: (_b = choice.delta) == null ? void 0 : _b.functionCall,
4716
4749
  role: (_c = choice.delta) == null ? void 0 : _c.role,
4717
4750
  tool_calls: ((_e = (_d = choice.delta) == null ? void 0 : _d.toolCalls) == null ? void 0 : _e.length) ? (_g = (_f = choice.delta) == null ? void 0 : _f.toolCalls) == null ? void 0 : _g.map((toolCall, index) => ({
@@ -4736,9 +4769,9 @@ function chunkToText() {
4736
4769
  const trimStartOfStream = trimStartOfStreamHelper();
4737
4770
  let isFunctionStreamingIn;
4738
4771
  return (json) => {
4739
- var _a10, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
4772
+ var _a11, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
4740
4773
  if (isChatCompletionChunk(json)) {
4741
- const delta = (_a10 = json.choices[0]) == null ? void 0 : _a10.delta;
4774
+ const delta = (_a11 = json.choices[0]) == null ? void 0 : _a11.delta;
4742
4775
  if ((_b = delta.function_call) == null ? void 0 : _b.name) {
4743
4776
  isFunctionStreamingIn = true;
4744
4777
  return {
@@ -5011,8 +5044,8 @@ function createFunctionCallTransformer(callbacks) {
5011
5044
 
5012
5045
  // streams/replicate-stream.ts
5013
5046
  async function ReplicateStream(res, cb, options) {
5014
- var _a10;
5015
- const url = (_a10 = res.urls) == null ? void 0 : _a10.stream;
5047
+ var _a11;
5048
+ const url = (_a11 = res.urls) == null ? void 0 : _a11.stream;
5016
5049
  if (!url) {
5017
5050
  if (res.error)
5018
5051
  throw new Error(res.error);
@@ -5033,8 +5066,8 @@ async function ReplicateStream(res, cb, options) {
5033
5066
 
5034
5067
  // streams/stream-to-response.ts
5035
5068
  function streamToResponse(res, response, init, data) {
5036
- var _a10;
5037
- response.writeHead((_a10 = init == null ? void 0 : init.status) != null ? _a10 : 200, {
5069
+ var _a11;
5070
+ response.writeHead((_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200, {
5038
5071
  "Content-Type": "text/plain; charset=utf-8",
5039
5072
  ...init == null ? void 0 : init.headers
5040
5073
  });
@@ -5104,6 +5137,7 @@ var nanoid = import_provider_utils8.generateId;
5104
5137
  LangChainAdapter,
5105
5138
  LangChainStream,
5106
5139
  LoadAPIKeyError,
5140
+ MessageConversionError,
5107
5141
  MistralStream,
5108
5142
  NoContentGeneratedError,
5109
5143
  NoObjectGeneratedError,