ai 5.0.0-alpha.5 → 5.0.0-alpha.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -4462,11 +4462,17 @@ function validateObjectGenerationInput({
4462
4462
  }
4463
4463
  }
4464
4464
 
4465
+ // core/prompt/resolve-language-model.ts
4466
+ import { gateway } from "@ai-sdk/gateway";
4467
+ function resolveLanguageModel(model) {
4468
+ return typeof model === "string" ? gateway.languageModel(model) : model;
4469
+ }
4470
+
4465
4471
  // core/generate-object/generate-object.ts
4466
4472
  var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
4467
4473
  async function generateObject(options) {
4468
4474
  const {
4469
- model,
4475
+ model: modelArg,
4470
4476
  output = "object",
4471
4477
  system,
4472
4478
  prompt,
@@ -4483,6 +4489,7 @@ async function generateObject(options) {
4483
4489
  } = {},
4484
4490
  ...settings
4485
4491
  } = options;
4492
+ const model = resolveLanguageModel(modelArg);
4486
4493
  const enumValues = "enum" in options ? options.enum : void 0;
4487
4494
  const {
4488
4495
  schema: inputSchema,
@@ -4942,7 +4949,7 @@ function streamObject(options) {
4942
4949
  }
4943
4950
  var DefaultStreamObjectResult = class {
4944
4951
  constructor({
4945
- model,
4952
+ model: modelArg,
4946
4953
  headers,
4947
4954
  telemetry,
4948
4955
  settings,
@@ -4967,6 +4974,7 @@ var DefaultStreamObjectResult = class {
4967
4974
  this._warnings = new DelayedPromise();
4968
4975
  this._request = new DelayedPromise();
4969
4976
  this._response = new DelayedPromise();
4977
+ const model = resolveLanguageModel(modelArg);
4970
4978
  const { maxRetries, retry } = prepareRetries({
4971
4979
  maxRetries: maxRetriesArg
4972
4980
  });
@@ -5784,7 +5792,7 @@ var originalGenerateId3 = createIdGenerator3({
5784
5792
  size: 24
5785
5793
  });
5786
5794
  async function generateText({
5787
- model,
5795
+ model: modelArg,
5788
5796
  tools,
5789
5797
  toolChoice,
5790
5798
  system,
@@ -5809,6 +5817,7 @@ async function generateText({
5809
5817
  onStepFinish,
5810
5818
  ...settings
5811
5819
  }) {
5820
+ const model = resolveLanguageModel(modelArg);
5812
5821
  const stopConditions = asArray(stopWhen);
5813
5822
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
5814
5823
  const callSettings = prepareCallSettings(settings);
@@ -5845,7 +5854,7 @@ async function generateText({
5845
5854
  }),
5846
5855
  tracer,
5847
5856
  fn: async (span) => {
5848
- var _a17, _b, _c, _d;
5857
+ var _a17, _b, _c, _d, _e;
5849
5858
  const callSettings2 = prepareCallSettings(settings);
5850
5859
  let currentModelResponse;
5851
5860
  let currentToolCalls = [];
@@ -5864,16 +5873,18 @@ async function generateText({
5864
5873
  }));
5865
5874
  const promptMessages = await convertToLanguageModelPrompt({
5866
5875
  prompt: {
5867
- system: initialPrompt.system,
5876
+ system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
5868
5877
  messages: stepInputMessages
5869
5878
  },
5870
5879
  supportedUrls: await model.supportedUrls
5871
5880
  });
5872
- const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
5881
+ const stepModel = resolveLanguageModel(
5882
+ (_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
5883
+ );
5873
5884
  const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5874
5885
  tools,
5875
- toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
5876
- activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _c : activeTools
5886
+ toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
5887
+ activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
5877
5888
  });
5878
5889
  currentModelResponse = await retry(
5879
5890
  () => {
@@ -5916,7 +5927,7 @@ async function generateText({
5916
5927
  }),
5917
5928
  tracer,
5918
5929
  fn: async (span2) => {
5919
- var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
5930
+ var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
5920
5931
  const result = await stepModel.doGenerate({
5921
5932
  ...callSettings2,
5922
5933
  tools: stepTools,
@@ -5930,7 +5941,7 @@ async function generateText({
5930
5941
  const responseData = {
5931
5942
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5932
5943
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5933
- modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : stepModel.modelId,
5944
+ modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
5934
5945
  headers: (_g = result.response) == null ? void 0 : _g.headers,
5935
5946
  body: (_h = result.response) == null ? void 0 : _h.body
5936
5947
  };
@@ -6006,7 +6017,7 @@ async function generateText({
6006
6017
  usage: currentModelResponse.usage,
6007
6018
  warnings: currentModelResponse.warnings,
6008
6019
  providerMetadata: currentModelResponse.providerMetadata,
6009
- request: (_d = currentModelResponse.request) != null ? _d : {},
6020
+ request: (_e = currentModelResponse.request) != null ? _e : {},
6010
6021
  response: {
6011
6022
  ...currentModelResponse.response,
6012
6023
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -6602,7 +6613,7 @@ function streamText({
6602
6613
  ...settings
6603
6614
  }) {
6604
6615
  return new DefaultStreamTextResult({
6605
- model,
6616
+ model: resolveLanguageModel(model),
6606
6617
  telemetry,
6607
6618
  headers,
6608
6619
  settings,
@@ -6917,7 +6928,7 @@ var DefaultStreamTextResult = class {
6917
6928
  responseMessages,
6918
6929
  usage
6919
6930
  }) {
6920
- var _a17, _b, _c;
6931
+ var _a17, _b, _c, _d;
6921
6932
  stepFinish = new DelayedPromise();
6922
6933
  const initialPrompt = await standardizePrompt({
6923
6934
  system,
@@ -6935,16 +6946,18 @@ var DefaultStreamTextResult = class {
6935
6946
  }));
6936
6947
  const promptMessages = await convertToLanguageModelPrompt({
6937
6948
  prompt: {
6938
- system: initialPrompt.system,
6949
+ system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
6939
6950
  messages: stepInputMessages
6940
6951
  },
6941
6952
  supportedUrls: await model.supportedUrls
6942
6953
  });
6943
- const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
6954
+ const stepModel = resolveLanguageModel(
6955
+ (_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
6956
+ );
6944
6957
  const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
6945
6958
  tools,
6946
- toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
6947
- activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _c : activeTools
6959
+ toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
6960
+ activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
6948
6961
  });
6949
6962
  const {
6950
6963
  result: { stream: stream2, response, request },
@@ -7050,7 +7063,7 @@ var DefaultStreamTextResult = class {
7050
7063
  streamWithToolResults.pipeThrough(
7051
7064
  new TransformStream({
7052
7065
  async transform(chunk, controller) {
7053
- var _a18, _b2, _c2, _d;
7066
+ var _a18, _b2, _c2, _d2;
7054
7067
  if (chunk.type === "stream-start") {
7055
7068
  warnings = chunk.warnings;
7056
7069
  return;
@@ -7127,7 +7140,7 @@ var DefaultStreamTextResult = class {
7127
7140
  doStreamSpan.addEvent("ai.stream.finish");
7128
7141
  doStreamSpan.setAttributes({
7129
7142
  "ai.response.msToFinish": msToFinish,
7130
- "ai.response.avgOutputTokensPerSecond": 1e3 * ((_d = stepUsage.outputTokens) != null ? _d : 0) / msToFinish
7143
+ "ai.response.avgOutputTokensPerSecond": 1e3 * ((_d2 = stepUsage.outputTokens) != null ? _d2 : 0) / msToFinish
7131
7144
  });
7132
7145
  break;
7133
7146
  }
@@ -7824,7 +7837,9 @@ var doWrap = ({
7824
7837
  };
7825
7838
 
7826
7839
  // core/registry/custom-provider.ts
7827
- import { NoSuchModelError as NoSuchModelError2 } from "@ai-sdk/provider";
7840
+ import {
7841
+ NoSuchModelError as NoSuchModelError2
7842
+ } from "@ai-sdk/provider";
7828
7843
  function customProvider({
7829
7844
  languageModels,
7830
7845
  textEmbeddingModels,
@@ -7889,7 +7904,9 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
7889
7904
  _a16 = symbol16;
7890
7905
 
7891
7906
  // core/registry/provider-registry.ts
7892
- import { NoSuchModelError as NoSuchModelError4 } from "@ai-sdk/provider";
7907
+ import {
7908
+ NoSuchModelError as NoSuchModelError4
7909
+ } from "@ai-sdk/provider";
7893
7910
  function createProviderRegistry(providers, {
7894
7911
  separator = ":"
7895
7912
  } = {}) {