ai 4.1.16 → 4.1.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # ai
2
2
 
3
+ ## 4.1.18
4
+
5
+ ### Patch Changes
6
+
7
+ - 6a1acfe: fix (ai/core): revert '@internal' tag on function definitions due to build impacts
8
+
9
+ ## 4.1.17
10
+
11
+ ### Patch Changes
12
+
13
+ - 5af8cdb: fix (ai/core): support this reference in model.supportsUrl implementations
14
+
3
15
  ## 4.1.16
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -508,6 +508,12 @@ declare function generateImage({ model, prompt, n, size, aspectRatio, seed, prov
508
508
  Only applicable for HTTP-based providers.
509
509
  */
510
510
  headers?: Record<string, string>;
511
+ /**
512
+ * Internal. For test use only. May change without notice.
513
+ */
514
+ _internal?: {
515
+ currentDate?: () => Date;
516
+ };
511
517
  }): Promise<GenerateImageResult>;
512
518
 
513
519
  type CallSettings = {
@@ -1030,6 +1036,13 @@ functionality that can be fully encapsulated in the provider.
1030
1036
  @deprecated Use `providerOptions` instead.
1031
1037
  */
1032
1038
  experimental_providerMetadata?: ProviderMetadata;
1039
+ /**
1040
+ * Internal. For test use only. May change without notice.
1041
+ */
1042
+ _internal?: {
1043
+ generateId?: () => string;
1044
+ currentDate?: () => Date;
1045
+ };
1033
1046
  }): Promise<GenerateObjectResult<Array<ELEMENT>>>;
1034
1047
  /**
1035
1048
  Generate a value from an enum (limited list of string values) using a language model.
@@ -1310,6 +1323,14 @@ functionality that can be fully encapsulated in the provider.
1310
1323
  Callback that is called when the LLM response and the final object validation are finished.
1311
1324
  */
1312
1325
  onFinish?: OnFinishCallback<OBJECT>;
1326
+ /**
1327
+ * Internal. For test use only. May change without notice.
1328
+ */
1329
+ _internal?: {
1330
+ generateId?: () => string;
1331
+ currentDate?: () => Date;
1332
+ now?: () => number;
1333
+ };
1313
1334
  }): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
1314
1335
  /**
1315
1336
  Generate an array with structured, typed elements for a given prompt and element schema using a language model.
@@ -1977,6 +1998,13 @@ A function that attempts to repair a tool call that failed to parse.
1977
1998
  Callback that is called when each step (LLM call) is finished, including intermediate steps.
1978
1999
  */
1979
2000
  onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
2001
+ /**
2002
+ * Internal. For test use only. May change without notice.
2003
+ */
2004
+ _internal?: {
2005
+ generateId?: IDGenerator;
2006
+ currentDate?: () => Date;
2007
+ };
1980
2008
  }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
1981
2009
 
1982
2010
  /**
@@ -2234,6 +2262,12 @@ type TextStreamPart<TOOLS extends ToolSet> = {
2234
2262
  declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
2235
2263
  delayInMs?: number | null;
2236
2264
  chunking?: 'word' | 'line' | RegExp;
2265
+ /**
2266
+ * Internal. For test use only. May change without notice.
2267
+ */
2268
+ _internal?: {
2269
+ delay?: (delayInMs: number | null) => Promise<void>;
2270
+ };
2237
2271
  }): (options: {
2238
2272
  tools: TOOLS;
2239
2273
  }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
@@ -2392,6 +2426,14 @@ Details for all steps.
2392
2426
  Callback that is called when each step (LLM call) is finished, including intermediate steps.
2393
2427
  */
2394
2428
  onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
2429
+ /**
2430
+ Internal. For test use only. May change without notice.
2431
+ */
2432
+ _internal?: {
2433
+ now?: () => number;
2434
+ generateId?: IDGenerator;
2435
+ currentDate?: () => Date;
2436
+ };
2395
2437
  }): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
2396
2438
 
2397
2439
  /**
@@ -2567,6 +2609,9 @@ declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDela
2567
2609
  chunks: T[];
2568
2610
  initialDelayInMs?: number | null;
2569
2611
  chunkDelayInMs?: number | null;
2612
+ _internal?: {
2613
+ delay?: (ms: number | null) => Promise<void>;
2614
+ };
2570
2615
  }): ReadableStream<T>;
2571
2616
 
2572
2617
  declare const symbol$a: unique symbol;
package/dist/index.d.ts CHANGED
@@ -508,6 +508,12 @@ declare function generateImage({ model, prompt, n, size, aspectRatio, seed, prov
508
508
  Only applicable for HTTP-based providers.
509
509
  */
510
510
  headers?: Record<string, string>;
511
+ /**
512
+ * Internal. For test use only. May change without notice.
513
+ */
514
+ _internal?: {
515
+ currentDate?: () => Date;
516
+ };
511
517
  }): Promise<GenerateImageResult>;
512
518
 
513
519
  type CallSettings = {
@@ -1030,6 +1036,13 @@ functionality that can be fully encapsulated in the provider.
1030
1036
  @deprecated Use `providerOptions` instead.
1031
1037
  */
1032
1038
  experimental_providerMetadata?: ProviderMetadata;
1039
+ /**
1040
+ * Internal. For test use only. May change without notice.
1041
+ */
1042
+ _internal?: {
1043
+ generateId?: () => string;
1044
+ currentDate?: () => Date;
1045
+ };
1033
1046
  }): Promise<GenerateObjectResult<Array<ELEMENT>>>;
1034
1047
  /**
1035
1048
  Generate a value from an enum (limited list of string values) using a language model.
@@ -1310,6 +1323,14 @@ functionality that can be fully encapsulated in the provider.
1310
1323
  Callback that is called when the LLM response and the final object validation are finished.
1311
1324
  */
1312
1325
  onFinish?: OnFinishCallback<OBJECT>;
1326
+ /**
1327
+ * Internal. For test use only. May change without notice.
1328
+ */
1329
+ _internal?: {
1330
+ generateId?: () => string;
1331
+ currentDate?: () => Date;
1332
+ now?: () => number;
1333
+ };
1313
1334
  }): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
1314
1335
  /**
1315
1336
  Generate an array with structured, typed elements for a given prompt and element schema using a language model.
@@ -1977,6 +1998,13 @@ A function that attempts to repair a tool call that failed to parse.
1977
1998
  Callback that is called when each step (LLM call) is finished, including intermediate steps.
1978
1999
  */
1979
2000
  onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
2001
+ /**
2002
+ * Internal. For test use only. May change without notice.
2003
+ */
2004
+ _internal?: {
2005
+ generateId?: IDGenerator;
2006
+ currentDate?: () => Date;
2007
+ };
1980
2008
  }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
1981
2009
 
1982
2010
  /**
@@ -2234,6 +2262,12 @@ type TextStreamPart<TOOLS extends ToolSet> = {
2234
2262
  declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
2235
2263
  delayInMs?: number | null;
2236
2264
  chunking?: 'word' | 'line' | RegExp;
2265
+ /**
2266
+ * Internal. For test use only. May change without notice.
2267
+ */
2268
+ _internal?: {
2269
+ delay?: (delayInMs: number | null) => Promise<void>;
2270
+ };
2237
2271
  }): (options: {
2238
2272
  tools: TOOLS;
2239
2273
  }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
@@ -2392,6 +2426,14 @@ Details for all steps.
2392
2426
  Callback that is called when each step (LLM call) is finished, including intermediate steps.
2393
2427
  */
2394
2428
  onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
2429
+ /**
2430
+ Internal. For test use only. May change without notice.
2431
+ */
2432
+ _internal?: {
2433
+ now?: () => number;
2434
+ generateId?: IDGenerator;
2435
+ currentDate?: () => Date;
2436
+ };
2395
2437
  }): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
2396
2438
 
2397
2439
  /**
@@ -2567,6 +2609,9 @@ declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDela
2567
2609
  chunks: T[];
2568
2610
  initialDelayInMs?: number | null;
2569
2611
  chunkDelayInMs?: number | null;
2612
+ _internal?: {
2613
+ delay?: (ms: number | null) => Promise<void>;
2614
+ };
2570
2615
  }): ReadableStream<T>;
2571
2616
 
2572
2617
  declare const symbol$a: unique symbol;
package/dist/index.js CHANGED
@@ -2408,7 +2408,7 @@ async function generateObject({
2408
2408
  }),
2409
2409
  tracer,
2410
2410
  fn: async (span) => {
2411
- var _a15, _b;
2411
+ var _a15, _b, _c, _d;
2412
2412
  if (mode === "auto" || mode == null) {
2413
2413
  mode = model.defaultObjectGenerationMode;
2414
2414
  }
@@ -2437,7 +2437,8 @@ async function generateObject({
2437
2437
  const promptMessages = await convertToLanguageModelPrompt({
2438
2438
  prompt: standardizedPrompt,
2439
2439
  modelSupportsImageUrls: model.supportsImageUrls,
2440
- modelSupportsUrl: model.supportsUrl
2440
+ modelSupportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model)
2441
+ // support 'this' context
2441
2442
  });
2442
2443
  const generateResult = await retry(
2443
2444
  () => recordSpan({
@@ -2470,7 +2471,7 @@ async function generateObject({
2470
2471
  }),
2471
2472
  tracer,
2472
2473
  fn: async (span2) => {
2473
- var _a16, _b2, _c, _d, _e, _f;
2474
+ var _a16, _b2, _c2, _d2, _e, _f;
2474
2475
  const result2 = await model.doGenerate({
2475
2476
  mode: {
2476
2477
  type: "object-json",
@@ -2487,7 +2488,7 @@ async function generateObject({
2487
2488
  });
2488
2489
  const responseData = {
2489
2490
  id: (_b2 = (_a16 = result2.response) == null ? void 0 : _a16.id) != null ? _b2 : generateId3(),
2490
- timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
2491
+ timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
2491
2492
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
2492
2493
  };
2493
2494
  if (result2.text === void 0) {
@@ -2528,7 +2529,7 @@ async function generateObject({
2528
2529
  rawResponse = generateResult.rawResponse;
2529
2530
  logprobs = generateResult.logprobs;
2530
2531
  resultProviderMetadata = generateResult.providerMetadata;
2531
- request = (_a15 = generateResult.request) != null ? _a15 : {};
2532
+ request = (_b = generateResult.request) != null ? _b : {};
2532
2533
  response = generateResult.responseData;
2533
2534
  break;
2534
2535
  }
@@ -2540,7 +2541,8 @@ async function generateObject({
2540
2541
  const promptMessages = await convertToLanguageModelPrompt({
2541
2542
  prompt: standardizedPrompt,
2542
2543
  modelSupportsImageUrls: model.supportsImageUrls,
2543
- modelSupportsUrl: model.supportsUrl
2544
+ modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
2545
+ // support 'this' context,
2544
2546
  });
2545
2547
  const inputFormat = standardizedPrompt.type;
2546
2548
  const generateResult = await retry(
@@ -2574,7 +2576,7 @@ async function generateObject({
2574
2576
  }),
2575
2577
  tracer,
2576
2578
  fn: async (span2) => {
2577
- var _a16, _b2, _c, _d, _e, _f, _g, _h;
2579
+ var _a16, _b2, _c2, _d2, _e, _f, _g, _h;
2578
2580
  const result2 = await model.doGenerate({
2579
2581
  mode: {
2580
2582
  type: "object-tool",
@@ -2594,7 +2596,7 @@ async function generateObject({
2594
2596
  });
2595
2597
  const objectText = (_b2 = (_a16 = result2.toolCalls) == null ? void 0 : _a16[0]) == null ? void 0 : _b2.args;
2596
2598
  const responseData = {
2597
- id: (_d = (_c = result2.response) == null ? void 0 : _c.id) != null ? _d : generateId3(),
2599
+ id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
2598
2600
  timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
2599
2601
  modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId
2600
2602
  };
@@ -2636,7 +2638,7 @@ async function generateObject({
2636
2638
  rawResponse = generateResult.rawResponse;
2637
2639
  logprobs = generateResult.logprobs;
2638
2640
  resultProviderMetadata = generateResult.providerMetadata;
2639
- request = (_b = generateResult.request) != null ? _b : {};
2641
+ request = (_d = generateResult.request) != null ? _d : {};
2640
2642
  response = generateResult.responseData;
2641
2643
  break;
2642
2644
  }
@@ -2994,6 +2996,7 @@ var DefaultStreamObjectResult = class {
2994
2996
  tracer,
2995
2997
  endWhenDone: false,
2996
2998
  fn: async (rootSpan) => {
2999
+ var _a15, _b;
2997
3000
  if (mode === "auto" || mode == null) {
2998
3001
  mode = model.defaultObjectGenerationMode;
2999
3002
  }
@@ -3024,7 +3027,8 @@ var DefaultStreamObjectResult = class {
3024
3027
  prompt: await convertToLanguageModelPrompt({
3025
3028
  prompt: standardizedPrompt,
3026
3029
  modelSupportsImageUrls: model.supportsImageUrls,
3027
- modelSupportsUrl: model.supportsUrl
3030
+ modelSupportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model)
3031
+ // support 'this' context
3028
3032
  }),
3029
3033
  providerMetadata: providerOptions,
3030
3034
  abortSignal,
@@ -3066,7 +3070,8 @@ var DefaultStreamObjectResult = class {
3066
3070
  prompt: await convertToLanguageModelPrompt({
3067
3071
  prompt: standardizedPrompt,
3068
3072
  modelSupportsImageUrls: model.supportsImageUrls,
3069
- modelSupportsUrl: model.supportsUrl
3073
+ modelSupportsUrl: (_b = model.supportsUrl) == null ? void 0 : _b.bind(model)
3074
+ // support 'this' context,
3070
3075
  }),
3071
3076
  providerMetadata: providerOptions,
3072
3077
  abortSignal,
@@ -3160,7 +3165,7 @@ var DefaultStreamObjectResult = class {
3160
3165
  const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
3161
3166
  new TransformStream({
3162
3167
  async transform(chunk, controller) {
3163
- var _a15, _b, _c;
3168
+ var _a16, _b2, _c;
3164
3169
  if (isFirstChunk) {
3165
3170
  const msToFirstChunk = now2() - startTimestampMs;
3166
3171
  isFirstChunk = false;
@@ -3206,8 +3211,8 @@ var DefaultStreamObjectResult = class {
3206
3211
  switch (chunk.type) {
3207
3212
  case "response-metadata": {
3208
3213
  response = {
3209
- id: (_a15 = chunk.id) != null ? _a15 : response.id,
3210
- timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
3214
+ id: (_a16 = chunk.id) != null ? _a16 : response.id,
3215
+ timestamp: (_b2 = chunk.timestamp) != null ? _b2 : response.timestamp,
3211
3216
  modelId: (_c = chunk.modelId) != null ? _c : response.modelId
3212
3217
  };
3213
3218
  break;
@@ -3804,7 +3809,7 @@ async function generateText({
3804
3809
  }),
3805
3810
  tracer,
3806
3811
  fn: async (span) => {
3807
- var _a16, _b, _c, _d, _e, _f;
3812
+ var _a16, _b, _c, _d, _e, _f, _g;
3808
3813
  const mode = {
3809
3814
  type: "regular",
3810
3815
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -3836,7 +3841,8 @@ async function generateText({
3836
3841
  messages: stepInputMessages
3837
3842
  },
3838
3843
  modelSupportsImageUrls: model.supportsImageUrls,
3839
- modelSupportsUrl: model.supportsUrl
3844
+ modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
3845
+ // support 'this' context
3840
3846
  });
3841
3847
  currentModelResponse = await retry(
3842
3848
  () => recordSpan({
@@ -3923,7 +3929,7 @@ async function generateText({
3923
3929
  })
3924
3930
  );
3925
3931
  currentToolCalls = await Promise.all(
3926
- ((_a16 = currentModelResponse.toolCalls) != null ? _a16 : []).map(
3932
+ ((_b = currentModelResponse.toolCalls) != null ? _b : []).map(
3927
3933
  (toolCall) => parseToolCall({
3928
3934
  toolCall,
3929
3935
  tools,
@@ -3958,7 +3964,7 @@ async function generateText({
3958
3964
  nextStepType = "tool-result";
3959
3965
  }
3960
3966
  }
3961
- const originalText = (_b = currentModelResponse.text) != null ? _b : "";
3967
+ const originalText = (_c = currentModelResponse.text) != null ? _c : "";
3962
3968
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
3963
3969
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
3964
3970
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -3995,10 +4001,10 @@ async function generateText({
3995
4001
  usage: currentUsage,
3996
4002
  warnings: currentModelResponse.warnings,
3997
4003
  logprobs: currentModelResponse.logprobs,
3998
- request: (_c = currentModelResponse.request) != null ? _c : {},
4004
+ request: (_d = currentModelResponse.request) != null ? _d : {},
3999
4005
  response: {
4000
4006
  ...currentModelResponse.response,
4001
- headers: (_d = currentModelResponse.rawResponse) == null ? void 0 : _d.headers,
4007
+ headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers,
4002
4008
  // deep clone msgs to avoid mutating past messages in multi-step:
4003
4009
  messages: structuredClone(responseMessages)
4004
4010
  },
@@ -4042,10 +4048,10 @@ async function generateText({
4042
4048
  finishReason: currentModelResponse.finishReason,
4043
4049
  usage,
4044
4050
  warnings: currentModelResponse.warnings,
4045
- request: (_e = currentModelResponse.request) != null ? _e : {},
4051
+ request: (_f = currentModelResponse.request) != null ? _f : {},
4046
4052
  response: {
4047
4053
  ...currentModelResponse.response,
4048
- headers: (_f = currentModelResponse.rawResponse) == null ? void 0 : _f.headers,
4054
+ headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers,
4049
4055
  messages: responseMessages
4050
4056
  },
4051
4057
  logprobs: currentModelResponse.logprobs,
@@ -4972,6 +4978,7 @@ var DefaultStreamTextResult = class {
4972
4978
  hasLeadingWhitespace,
4973
4979
  messageId
4974
4980
  }) {
4981
+ var _a16;
4975
4982
  const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
4976
4983
  const stepInputMessages = [
4977
4984
  ...initialPrompt.messages,
@@ -4984,7 +4991,8 @@ var DefaultStreamTextResult = class {
4984
4991
  messages: stepInputMessages
4985
4992
  },
4986
4993
  modelSupportsImageUrls: model.supportsImageUrls,
4987
- modelSupportsUrl: model.supportsUrl
4994
+ modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
4995
+ // support 'this' context
4988
4996
  });
4989
4997
  const mode = {
4990
4998
  type: "regular",
@@ -5014,8 +5022,8 @@ var DefaultStreamTextResult = class {
5014
5022
  "ai.prompt.tools": {
5015
5023
  // convert the language model level tools:
5016
5024
  input: () => {
5017
- var _a16;
5018
- return (_a16 = mode.tools) == null ? void 0 : _a16.map((tool2) => JSON.stringify(tool2));
5025
+ var _a17;
5026
+ return (_a17 = mode.tools) == null ? void 0 : _a17.map((tool2) => JSON.stringify(tool2));
5019
5027
  }
5020
5028
  },
5021
5029
  "ai.prompt.toolChoice": {
@@ -5101,7 +5109,7 @@ var DefaultStreamTextResult = class {
5101
5109
  transformedStream.pipeThrough(
5102
5110
  new TransformStream({
5103
5111
  async transform(chunk, controller) {
5104
- var _a16, _b, _c;
5112
+ var _a17, _b, _c;
5105
5113
  if (stepFirstChunk) {
5106
5114
  const msToFirstChunk = now2() - startTimestampMs;
5107
5115
  stepFirstChunk = false;
@@ -5164,7 +5172,7 @@ var DefaultStreamTextResult = class {
5164
5172
  }
5165
5173
  case "response-metadata": {
5166
5174
  stepResponse = {
5167
- id: (_a16 = chunk.id) != null ? _a16 : stepResponse.id,
5175
+ id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
5168
5176
  timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
5169
5177
  modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
5170
5178
  };