ai 4.1.16 → 4.1.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2341,7 +2341,7 @@ async function generateObject({
2341
2341
  }),
2342
2342
  tracer,
2343
2343
  fn: async (span) => {
2344
- var _a15, _b;
2344
+ var _a15, _b, _c, _d;
2345
2345
  if (mode === "auto" || mode == null) {
2346
2346
  mode = model.defaultObjectGenerationMode;
2347
2347
  }
@@ -2370,7 +2370,8 @@ async function generateObject({
2370
2370
  const promptMessages = await convertToLanguageModelPrompt({
2371
2371
  prompt: standardizedPrompt,
2372
2372
  modelSupportsImageUrls: model.supportsImageUrls,
2373
- modelSupportsUrl: model.supportsUrl
2373
+ modelSupportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model)
2374
+ // support 'this' context
2374
2375
  });
2375
2376
  const generateResult = await retry(
2376
2377
  () => recordSpan({
@@ -2403,7 +2404,7 @@ async function generateObject({
2403
2404
  }),
2404
2405
  tracer,
2405
2406
  fn: async (span2) => {
2406
- var _a16, _b2, _c, _d, _e, _f;
2407
+ var _a16, _b2, _c2, _d2, _e, _f;
2407
2408
  const result2 = await model.doGenerate({
2408
2409
  mode: {
2409
2410
  type: "object-json",
@@ -2420,7 +2421,7 @@ async function generateObject({
2420
2421
  });
2421
2422
  const responseData = {
2422
2423
  id: (_b2 = (_a16 = result2.response) == null ? void 0 : _a16.id) != null ? _b2 : generateId3(),
2423
- timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
2424
+ timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
2424
2425
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
2425
2426
  };
2426
2427
  if (result2.text === void 0) {
@@ -2461,7 +2462,7 @@ async function generateObject({
2461
2462
  rawResponse = generateResult.rawResponse;
2462
2463
  logprobs = generateResult.logprobs;
2463
2464
  resultProviderMetadata = generateResult.providerMetadata;
2464
- request = (_a15 = generateResult.request) != null ? _a15 : {};
2465
+ request = (_b = generateResult.request) != null ? _b : {};
2465
2466
  response = generateResult.responseData;
2466
2467
  break;
2467
2468
  }
@@ -2473,7 +2474,8 @@ async function generateObject({
2473
2474
  const promptMessages = await convertToLanguageModelPrompt({
2474
2475
  prompt: standardizedPrompt,
2475
2476
  modelSupportsImageUrls: model.supportsImageUrls,
2476
- modelSupportsUrl: model.supportsUrl
2477
+ modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
2478
+ // support 'this' context,
2477
2479
  });
2478
2480
  const inputFormat = standardizedPrompt.type;
2479
2481
  const generateResult = await retry(
@@ -2507,7 +2509,7 @@ async function generateObject({
2507
2509
  }),
2508
2510
  tracer,
2509
2511
  fn: async (span2) => {
2510
- var _a16, _b2, _c, _d, _e, _f, _g, _h;
2512
+ var _a16, _b2, _c2, _d2, _e, _f, _g, _h;
2511
2513
  const result2 = await model.doGenerate({
2512
2514
  mode: {
2513
2515
  type: "object-tool",
@@ -2527,7 +2529,7 @@ async function generateObject({
2527
2529
  });
2528
2530
  const objectText = (_b2 = (_a16 = result2.toolCalls) == null ? void 0 : _a16[0]) == null ? void 0 : _b2.args;
2529
2531
  const responseData = {
2530
- id: (_d = (_c = result2.response) == null ? void 0 : _c.id) != null ? _d : generateId3(),
2532
+ id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
2531
2533
  timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
2532
2534
  modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId
2533
2535
  };
@@ -2569,7 +2571,7 @@ async function generateObject({
2569
2571
  rawResponse = generateResult.rawResponse;
2570
2572
  logprobs = generateResult.logprobs;
2571
2573
  resultProviderMetadata = generateResult.providerMetadata;
2572
- request = (_b = generateResult.request) != null ? _b : {};
2574
+ request = (_d = generateResult.request) != null ? _d : {};
2573
2575
  response = generateResult.responseData;
2574
2576
  break;
2575
2577
  }
@@ -2930,6 +2932,7 @@ var DefaultStreamObjectResult = class {
2930
2932
  tracer,
2931
2933
  endWhenDone: false,
2932
2934
  fn: async (rootSpan) => {
2935
+ var _a15, _b;
2933
2936
  if (mode === "auto" || mode == null) {
2934
2937
  mode = model.defaultObjectGenerationMode;
2935
2938
  }
@@ -2960,7 +2963,8 @@ var DefaultStreamObjectResult = class {
2960
2963
  prompt: await convertToLanguageModelPrompt({
2961
2964
  prompt: standardizedPrompt,
2962
2965
  modelSupportsImageUrls: model.supportsImageUrls,
2963
- modelSupportsUrl: model.supportsUrl
2966
+ modelSupportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model)
2967
+ // support 'this' context
2964
2968
  }),
2965
2969
  providerMetadata: providerOptions,
2966
2970
  abortSignal,
@@ -3002,7 +3006,8 @@ var DefaultStreamObjectResult = class {
3002
3006
  prompt: await convertToLanguageModelPrompt({
3003
3007
  prompt: standardizedPrompt,
3004
3008
  modelSupportsImageUrls: model.supportsImageUrls,
3005
- modelSupportsUrl: model.supportsUrl
3009
+ modelSupportsUrl: (_b = model.supportsUrl) == null ? void 0 : _b.bind(model)
3010
+ // support 'this' context,
3006
3011
  }),
3007
3012
  providerMetadata: providerOptions,
3008
3013
  abortSignal,
@@ -3096,7 +3101,7 @@ var DefaultStreamObjectResult = class {
3096
3101
  const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
3097
3102
  new TransformStream({
3098
3103
  async transform(chunk, controller) {
3099
- var _a15, _b, _c;
3104
+ var _a16, _b2, _c;
3100
3105
  if (isFirstChunk) {
3101
3106
  const msToFirstChunk = now2() - startTimestampMs;
3102
3107
  isFirstChunk = false;
@@ -3142,8 +3147,8 @@ var DefaultStreamObjectResult = class {
3142
3147
  switch (chunk.type) {
3143
3148
  case "response-metadata": {
3144
3149
  response = {
3145
- id: (_a15 = chunk.id) != null ? _a15 : response.id,
3146
- timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
3150
+ id: (_a16 = chunk.id) != null ? _a16 : response.id,
3151
+ timestamp: (_b2 = chunk.timestamp) != null ? _b2 : response.timestamp,
3147
3152
  modelId: (_c = chunk.modelId) != null ? _c : response.modelId
3148
3153
  };
3149
3154
  break;
@@ -3740,7 +3745,7 @@ async function generateText({
3740
3745
  }),
3741
3746
  tracer,
3742
3747
  fn: async (span) => {
3743
- var _a16, _b, _c, _d, _e, _f;
3748
+ var _a16, _b, _c, _d, _e, _f, _g;
3744
3749
  const mode = {
3745
3750
  type: "regular",
3746
3751
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -3772,7 +3777,8 @@ async function generateText({
3772
3777
  messages: stepInputMessages
3773
3778
  },
3774
3779
  modelSupportsImageUrls: model.supportsImageUrls,
3775
- modelSupportsUrl: model.supportsUrl
3780
+ modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
3781
+ // support 'this' context
3776
3782
  });
3777
3783
  currentModelResponse = await retry(
3778
3784
  () => recordSpan({
@@ -3859,7 +3865,7 @@ async function generateText({
3859
3865
  })
3860
3866
  );
3861
3867
  currentToolCalls = await Promise.all(
3862
- ((_a16 = currentModelResponse.toolCalls) != null ? _a16 : []).map(
3868
+ ((_b = currentModelResponse.toolCalls) != null ? _b : []).map(
3863
3869
  (toolCall) => parseToolCall({
3864
3870
  toolCall,
3865
3871
  tools,
@@ -3894,7 +3900,7 @@ async function generateText({
3894
3900
  nextStepType = "tool-result";
3895
3901
  }
3896
3902
  }
3897
- const originalText = (_b = currentModelResponse.text) != null ? _b : "";
3903
+ const originalText = (_c = currentModelResponse.text) != null ? _c : "";
3898
3904
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
3899
3905
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
3900
3906
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -3931,10 +3937,10 @@ async function generateText({
3931
3937
  usage: currentUsage,
3932
3938
  warnings: currentModelResponse.warnings,
3933
3939
  logprobs: currentModelResponse.logprobs,
3934
- request: (_c = currentModelResponse.request) != null ? _c : {},
3940
+ request: (_d = currentModelResponse.request) != null ? _d : {},
3935
3941
  response: {
3936
3942
  ...currentModelResponse.response,
3937
- headers: (_d = currentModelResponse.rawResponse) == null ? void 0 : _d.headers,
3943
+ headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers,
3938
3944
  // deep clone msgs to avoid mutating past messages in multi-step:
3939
3945
  messages: structuredClone(responseMessages)
3940
3946
  },
@@ -3978,10 +3984,10 @@ async function generateText({
3978
3984
  finishReason: currentModelResponse.finishReason,
3979
3985
  usage,
3980
3986
  warnings: currentModelResponse.warnings,
3981
- request: (_e = currentModelResponse.request) != null ? _e : {},
3987
+ request: (_f = currentModelResponse.request) != null ? _f : {},
3982
3988
  response: {
3983
3989
  ...currentModelResponse.response,
3984
- headers: (_f = currentModelResponse.rawResponse) == null ? void 0 : _f.headers,
3990
+ headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers,
3985
3991
  messages: responseMessages
3986
3992
  },
3987
3993
  logprobs: currentModelResponse.logprobs,
@@ -4923,6 +4929,7 @@ var DefaultStreamTextResult = class {
4923
4929
  hasLeadingWhitespace,
4924
4930
  messageId
4925
4931
  }) {
4932
+ var _a16;
4926
4933
  const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
4927
4934
  const stepInputMessages = [
4928
4935
  ...initialPrompt.messages,
@@ -4935,7 +4942,8 @@ var DefaultStreamTextResult = class {
4935
4942
  messages: stepInputMessages
4936
4943
  },
4937
4944
  modelSupportsImageUrls: model.supportsImageUrls,
4938
- modelSupportsUrl: model.supportsUrl
4945
+ modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
4946
+ // support 'this' context
4939
4947
  });
4940
4948
  const mode = {
4941
4949
  type: "regular",
@@ -4965,8 +4973,8 @@ var DefaultStreamTextResult = class {
4965
4973
  "ai.prompt.tools": {
4966
4974
  // convert the language model level tools:
4967
4975
  input: () => {
4968
- var _a16;
4969
- return (_a16 = mode.tools) == null ? void 0 : _a16.map((tool2) => JSON.stringify(tool2));
4976
+ var _a17;
4977
+ return (_a17 = mode.tools) == null ? void 0 : _a17.map((tool2) => JSON.stringify(tool2));
4970
4978
  }
4971
4979
  },
4972
4980
  "ai.prompt.toolChoice": {
@@ -5052,7 +5060,7 @@ var DefaultStreamTextResult = class {
5052
5060
  transformedStream.pipeThrough(
5053
5061
  new TransformStream({
5054
5062
  async transform(chunk, controller) {
5055
- var _a16, _b, _c;
5063
+ var _a17, _b, _c;
5056
5064
  if (stepFirstChunk) {
5057
5065
  const msToFirstChunk = now2() - startTimestampMs;
5058
5066
  stepFirstChunk = false;
@@ -5115,7 +5123,7 @@ var DefaultStreamTextResult = class {
5115
5123
  }
5116
5124
  case "response-metadata": {
5117
5125
  stepResponse = {
5118
- id: (_a16 = chunk.id) != null ? _a16 : stepResponse.id,
5126
+ id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
5119
5127
  timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
5120
5128
  modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
5121
5129
  };