ai 4.3.9 → 4.3.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # ai
2
2
 
3
+ ## 4.3.10
4
+
5
+ ### Patch Changes
6
+
7
+ - 0432959: feat (ai): add experimental prepareStep callback to generateText
8
+
3
9
  ## 4.3.9
4
10
 
5
11
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -2463,7 +2463,7 @@ If set and supported by the model, calls will generate deterministic results.
2463
2463
  @returns
2464
2464
  A result object that contains the generated text, the results of the tool calls, and additional information.
2465
2465
  */
2466
- declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
2466
+ declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_prepareStep: prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
2467
2467
  /**
2468
2468
  The language model to use.
2469
2469
  */
@@ -2518,6 +2518,28 @@ Optional specification for parsing structured outputs from the LLM response.
2518
2518
  */
2519
2519
  experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
2520
2520
  /**
2521
+ Optional function that you can use to provide different settings for a step.
2522
+
2523
+ @param options - The options for the step.
2524
+ @param options.steps - The steps that have been executed so far.
2525
+ @param options.stepNumber - The number of the step that is being executed.
2526
+ @param options.maxSteps - The maximum number of steps.
2527
+ @param options.model - The model that is being used.
2528
+
2529
+ @returns An object that contains the settings for the step.
2530
+ If you return undefined (or for undefined settings), the settings from the outer level will be used.
2531
+ */
2532
+ experimental_prepareStep?: (options: {
2533
+ steps: Array<StepResult<TOOLS>>;
2534
+ stepNumber: number;
2535
+ maxSteps: number;
2536
+ model: LanguageModel;
2537
+ }) => PromiseLike<{
2538
+ model?: LanguageModel;
2539
+ toolChoice?: ToolChoice<TOOLS>;
2540
+ experimental_activeTools?: Array<keyof TOOLS>;
2541
+ } | undefined>;
2542
+ /**
2521
2543
  A function that attempts to repair a tool call that failed to parse.
2522
2544
  */
2523
2545
  experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
package/dist/index.d.ts CHANGED
@@ -2463,7 +2463,7 @@ If set and supported by the model, calls will generate deterministic results.
2463
2463
  @returns
2464
2464
  A result object that contains the generated text, the results of the tool calls, and additional information.
2465
2465
  */
2466
- declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
2466
+ declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_prepareStep: prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
2467
2467
  /**
2468
2468
  The language model to use.
2469
2469
  */
@@ -2518,6 +2518,28 @@ Optional specification for parsing structured outputs from the LLM response.
2518
2518
  */
2519
2519
  experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
2520
2520
  /**
2521
+ Optional function that you can use to provide different settings for a step.
2522
+
2523
+ @param options - The options for the step.
2524
+ @param options.steps - The steps that have been executed so far.
2525
+ @param options.stepNumber - The number of the step that is being executed.
2526
+ @param options.maxSteps - The maximum number of steps.
2527
+ @param options.model - The model that is being used.
2528
+
2529
+ @returns An object that contains the settings for the step.
2530
+ If you return undefined (or for undefined settings), the settings from the outer level will be used.
2531
+ */
2532
+ experimental_prepareStep?: (options: {
2533
+ steps: Array<StepResult<TOOLS>>;
2534
+ stepNumber: number;
2535
+ maxSteps: number;
2536
+ model: LanguageModel;
2537
+ }) => PromiseLike<{
2538
+ model?: LanguageModel;
2539
+ toolChoice?: ToolChoice<TOOLS>;
2540
+ experimental_activeTools?: Array<keyof TOOLS>;
2541
+ } | undefined>;
2542
+ /**
2521
2543
  A function that attempts to repair a tool call that failed to parse.
2522
2544
  */
2523
2545
  experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
package/dist/index.js CHANGED
@@ -4175,6 +4175,7 @@ async function generateText({
4175
4175
  experimental_providerMetadata,
4176
4176
  providerOptions = experimental_providerMetadata,
4177
4177
  experimental_activeTools: activeTools,
4178
+ experimental_prepareStep: prepareStep,
4178
4179
  experimental_repairToolCall: repairToolCall,
4179
4180
  _internal: {
4180
4181
  generateId: generateId3 = originalGenerateId3,
@@ -4217,6 +4218,9 @@ async function generateText({
4217
4218
  telemetry
4218
4219
  }),
4219
4220
  ...baseTelemetryAttributes,
4221
+ // model:
4222
+ "ai.model.provider": model.provider,
4223
+ "ai.model.id": model.modelId,
4220
4224
  // specific settings that only make sense on the outer level:
4221
4225
  "ai.prompt": {
4222
4226
  input: () => JSON.stringify({ system, prompt, messages })
@@ -4226,11 +4230,7 @@ async function generateText({
4226
4230
  }),
4227
4231
  tracer,
4228
4232
  fn: async (span) => {
4229
- var _a18, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
4230
- const mode = {
4231
- type: "regular",
4232
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
4233
- };
4233
+ var _a18, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
4234
4234
  const callSettings = prepareCallSettings(settings);
4235
4235
  let currentModelResponse;
4236
4236
  let currentToolCalls = [];
@@ -4253,16 +4253,33 @@ async function generateText({
4253
4253
  ...initialPrompt.messages,
4254
4254
  ...responseMessages
4255
4255
  ];
4256
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
4257
+ model,
4258
+ steps,
4259
+ maxSteps,
4260
+ stepNumber: stepCount
4261
+ }));
4262
+ const stepToolChoice = (_a18 = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _a18 : toolChoice;
4263
+ const stepActiveTools = (_b = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _b : activeTools;
4264
+ const stepModel = (_c = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _c : model;
4256
4265
  const promptMessages = await convertToLanguageModelPrompt({
4257
4266
  prompt: {
4258
4267
  type: promptFormat,
4259
4268
  system: initialPrompt.system,
4260
4269
  messages: stepInputMessages
4261
4270
  },
4262
- modelSupportsImageUrls: model.supportsImageUrls,
4263
- modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
4271
+ modelSupportsImageUrls: stepModel.supportsImageUrls,
4272
+ modelSupportsUrl: (_d = stepModel.supportsUrl) == null ? void 0 : _d.bind(stepModel)
4264
4273
  // support 'this' context
4265
4274
  });
4275
+ const mode = {
4276
+ type: "regular",
4277
+ ...prepareToolsAndToolChoice({
4278
+ tools,
4279
+ toolChoice: stepToolChoice,
4280
+ activeTools: stepActiveTools
4281
+ })
4282
+ };
4266
4283
  currentModelResponse = await retry(
4267
4284
  () => recordSpan({
4268
4285
  name: "ai.generateText.doGenerate",
@@ -4274,6 +4291,10 @@ async function generateText({
4274
4291
  telemetry
4275
4292
  }),
4276
4293
  ...baseTelemetryAttributes,
4294
+ // model:
4295
+ "ai.model.provider": stepModel.provider,
4296
+ "ai.model.id": stepModel.modelId,
4297
+ // prompt:
4277
4298
  "ai.prompt.format": { input: () => promptFormat },
4278
4299
  "ai.prompt.messages": {
4279
4300
  input: () => JSON.stringify(promptMessages)
@@ -4289,8 +4310,8 @@ async function generateText({
4289
4310
  input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
4290
4311
  },
4291
4312
  // standardized gen-ai llm span attributes:
4292
- "gen_ai.system": model.provider,
4293
- "gen_ai.request.model": model.modelId,
4313
+ "gen_ai.system": stepModel.provider,
4314
+ "gen_ai.request.model": stepModel.modelId,
4294
4315
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4295
4316
  "gen_ai.request.max_tokens": settings.maxTokens,
4296
4317
  "gen_ai.request.presence_penalty": settings.presencePenalty,
@@ -4303,7 +4324,7 @@ async function generateText({
4303
4324
  tracer,
4304
4325
  fn: async (span2) => {
4305
4326
  var _a19, _b2, _c2, _d2, _e2, _f2;
4306
- const result = await model.doGenerate({
4327
+ const result = await stepModel.doGenerate({
4307
4328
  mode,
4308
4329
  ...callSettings,
4309
4330
  inputFormat: promptFormat,
@@ -4316,7 +4337,7 @@ async function generateText({
4316
4337
  const responseData = {
4317
4338
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
4318
4339
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4319
- modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
4340
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId
4320
4341
  };
4321
4342
  span2.setAttributes(
4322
4343
  selectTelemetryAttributes({
@@ -4348,7 +4369,7 @@ async function generateText({
4348
4369
  })
4349
4370
  );
4350
4371
  currentToolCalls = await Promise.all(
4351
- ((_b = currentModelResponse.toolCalls) != null ? _b : []).map(
4372
+ ((_e = currentModelResponse.toolCalls) != null ? _e : []).map(
4352
4373
  (toolCall) => parseToolCall({
4353
4374
  toolCall,
4354
4375
  tools,
@@ -4383,7 +4404,7 @@ async function generateText({
4383
4404
  nextStepType = "tool-result";
4384
4405
  }
4385
4406
  }
4386
- const originalText = (_c = currentModelResponse.text) != null ? _c : "";
4407
+ const originalText = (_f = currentModelResponse.text) != null ? _f : "";
4387
4408
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
4388
4409
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
4389
4410
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -4391,7 +4412,7 @@ async function generateText({
4391
4412
  currentReasoningDetails = asReasoningDetails(
4392
4413
  currentModelResponse.reasoning
4393
4414
  );
4394
- sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
4415
+ sources.push(...(_g = currentModelResponse.sources) != null ? _g : []);
4395
4416
  if (stepType === "continue") {
4396
4417
  const lastMessage = responseMessages[responseMessages.length - 1];
4397
4418
  if (typeof lastMessage.content === "string") {
@@ -4423,18 +4444,18 @@ async function generateText({
4423
4444
  reasoning: asReasoningText(currentReasoningDetails),
4424
4445
  reasoningDetails: currentReasoningDetails,
4425
4446
  files: asFiles(currentModelResponse.files),
4426
- sources: (_e = currentModelResponse.sources) != null ? _e : [],
4447
+ sources: (_h = currentModelResponse.sources) != null ? _h : [],
4427
4448
  toolCalls: currentToolCalls,
4428
4449
  toolResults: currentToolResults,
4429
4450
  finishReason: currentModelResponse.finishReason,
4430
4451
  usage: currentUsage,
4431
4452
  warnings: currentModelResponse.warnings,
4432
4453
  logprobs: currentModelResponse.logprobs,
4433
- request: (_f = currentModelResponse.request) != null ? _f : {},
4454
+ request: (_i = currentModelResponse.request) != null ? _i : {},
4434
4455
  response: {
4435
4456
  ...currentModelResponse.response,
4436
- headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers,
4437
- body: (_h = currentModelResponse.rawResponse) == null ? void 0 : _h.body,
4457
+ headers: (_j = currentModelResponse.rawResponse) == null ? void 0 : _j.headers,
4458
+ body: (_k = currentModelResponse.rawResponse) == null ? void 0 : _k.body,
4438
4459
  // deep clone msgs to avoid mutating past messages in multi-step:
4439
4460
  messages: structuredClone(responseMessages)
4440
4461
  },
@@ -4486,11 +4507,11 @@ async function generateText({
4486
4507
  finishReason: currentModelResponse.finishReason,
4487
4508
  usage,
4488
4509
  warnings: currentModelResponse.warnings,
4489
- request: (_i = currentModelResponse.request) != null ? _i : {},
4510
+ request: (_l = currentModelResponse.request) != null ? _l : {},
4490
4511
  response: {
4491
4512
  ...currentModelResponse.response,
4492
- headers: (_j = currentModelResponse.rawResponse) == null ? void 0 : _j.headers,
4493
- body: (_k = currentModelResponse.rawResponse) == null ? void 0 : _k.body,
4513
+ headers: (_m = currentModelResponse.rawResponse) == null ? void 0 : _m.headers,
4514
+ body: (_n = currentModelResponse.rawResponse) == null ? void 0 : _n.body,
4494
4515
  messages: responseMessages
4495
4516
  },
4496
4517
  logprobs: currentModelResponse.logprobs,