ai 5.0.0-canary.13 → 5.0.0-canary.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -2215,8 +2215,8 @@ async function embedMany({
2215
2215
  }),
2216
2216
  tracer,
2217
2217
  fn: async (span) => {
2218
- const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
2219
- if (maxEmbeddingsPerCall == null) {
2218
+ const maxEmbeddingsPerCall = await model.maxEmbeddingsPerCall;
2219
+ if (maxEmbeddingsPerCall == null || maxEmbeddingsPerCall === Infinity) {
2220
2220
  const { embeddings: embeddings2, usage, response } = await retry(() => {
2221
2221
  return recordSpan({
2222
2222
  name: "ai.embedMany.doEmbed",
@@ -3705,7 +3705,6 @@ async function standardizePrompt({
3705
3705
  });
3706
3706
  }
3707
3707
  return {
3708
- type: "prompt",
3709
3708
  system: prompt.system,
3710
3709
  messages: [
3711
3710
  {
@@ -3744,7 +3743,6 @@ async function standardizePrompt({
3744
3743
  });
3745
3744
  }
3746
3745
  return {
3747
- type: "messages",
3748
3746
  messages,
3749
3747
  system: prompt.system
3750
3748
  };
@@ -4140,29 +4138,31 @@ function validateObjectGenerationInput({
4140
4138
 
4141
4139
  // core/generate-object/generate-object.ts
4142
4140
  var originalGenerateId = (0, import_provider_utils13.createIdGenerator)({ prefix: "aiobj", size: 24 });
4143
- async function generateObject({
4144
- model,
4145
- enum: enumValues,
4146
- // rename bc enum is reserved by typescript
4147
- schema: inputSchema,
4148
- schemaName,
4149
- schemaDescription,
4150
- output = "object",
4151
- system,
4152
- prompt,
4153
- messages,
4154
- maxRetries: maxRetriesArg,
4155
- abortSignal,
4156
- headers,
4157
- experimental_repairText: repairText,
4158
- experimental_telemetry: telemetry,
4159
- providerOptions,
4160
- _internal: {
4161
- generateId: generateId3 = originalGenerateId,
4162
- currentDate = () => /* @__PURE__ */ new Date()
4163
- } = {},
4164
- ...settings
4165
- }) {
4141
+ async function generateObject(options) {
4142
+ const {
4143
+ model,
4144
+ output = "object",
4145
+ system,
4146
+ prompt,
4147
+ messages,
4148
+ maxRetries: maxRetriesArg,
4149
+ abortSignal,
4150
+ headers,
4151
+ experimental_repairText: repairText,
4152
+ experimental_telemetry: telemetry,
4153
+ providerOptions,
4154
+ _internal: {
4155
+ generateId: generateId3 = originalGenerateId,
4156
+ currentDate = () => /* @__PURE__ */ new Date()
4157
+ } = {},
4158
+ ...settings
4159
+ } = options;
4160
+ const enumValues = "enum" in options ? options.enum : void 0;
4161
+ const {
4162
+ schema: inputSchema,
4163
+ schemaDescription,
4164
+ schemaName
4165
+ } = "schema" in options ? options : {};
4166
4166
  validateObjectGenerationInput({
4167
4167
  output,
4168
4168
  schema: inputSchema,
@@ -4233,9 +4233,6 @@ async function generateObject({
4233
4233
  telemetry
4234
4234
  }),
4235
4235
  ...baseTelemetryAttributes,
4236
- "ai.prompt.format": {
4237
- input: () => standardizedPrompt.type
4238
- },
4239
4236
  "ai.prompt.messages": {
4240
4237
  input: () => JSON.stringify(promptMessages)
4241
4238
  },
@@ -4261,7 +4258,6 @@ async function generateObject({
4261
4258
  description: schemaDescription
4262
4259
  },
4263
4260
  ...prepareCallSettings(settings),
4264
- inputFormat: standardizedPrompt.type,
4265
4261
  prompt: promptMessages,
4266
4262
  providerOptions,
4267
4263
  abortSignal,
@@ -4557,29 +4553,32 @@ function now() {
4557
4553
 
4558
4554
  // core/generate-object/stream-object.ts
4559
4555
  var originalGenerateId2 = (0, import_provider_utils14.createIdGenerator)({ prefix: "aiobj", size: 24 });
4560
- function streamObject({
4561
- model,
4562
- schema: inputSchema,
4563
- schemaName,
4564
- schemaDescription,
4565
- output = "object",
4566
- system,
4567
- prompt,
4568
- messages,
4569
- maxRetries,
4570
- abortSignal,
4571
- headers,
4572
- experimental_telemetry: telemetry,
4573
- providerOptions,
4574
- onError,
4575
- onFinish,
4576
- _internal: {
4577
- generateId: generateId3 = originalGenerateId2,
4578
- currentDate = () => /* @__PURE__ */ new Date(),
4579
- now: now2 = now
4580
- } = {},
4581
- ...settings
4582
- }) {
4556
+ function streamObject(options) {
4557
+ const {
4558
+ model,
4559
+ output = "object",
4560
+ system,
4561
+ prompt,
4562
+ messages,
4563
+ maxRetries,
4564
+ abortSignal,
4565
+ headers,
4566
+ experimental_telemetry: telemetry,
4567
+ providerOptions,
4568
+ onError,
4569
+ onFinish,
4570
+ _internal: {
4571
+ generateId: generateId3 = originalGenerateId2,
4572
+ currentDate = () => /* @__PURE__ */ new Date(),
4573
+ now: now2 = now
4574
+ } = {},
4575
+ ...settings
4576
+ } = options;
4577
+ const {
4578
+ schema: inputSchema,
4579
+ schemaDescription,
4580
+ schemaName
4581
+ } = "schema" in options ? options : {};
4583
4582
  validateObjectGenerationInput({
4584
4583
  output,
4585
4584
  schema: inputSchema,
@@ -4692,7 +4691,6 @@ var DefaultStreamObjectResult = class {
4692
4691
  description: schemaDescription
4693
4692
  },
4694
4693
  ...prepareCallSettings(settings),
4695
- inputFormat: standardizedPrompt.type,
4696
4694
  prompt: await convertToLanguageModelPrompt({
4697
4695
  prompt: standardizedPrompt,
4698
4696
  supportedUrls: await model.getSupportedUrls()
@@ -4730,9 +4728,6 @@ var DefaultStreamObjectResult = class {
4730
4728
  telemetry
4731
4729
  }),
4732
4730
  ...baseTelemetryAttributes,
4733
- "ai.prompt.format": {
4734
- input: () => callOptions.inputFormat
4735
- },
4736
4731
  "ai.prompt.messages": {
4737
4732
  input: () => JSON.stringify(callOptions.prompt)
4738
4733
  },
@@ -5408,6 +5403,7 @@ async function generateText({
5408
5403
  experimental_telemetry: telemetry,
5409
5404
  providerOptions,
5410
5405
  experimental_activeTools: activeTools,
5406
+ experimental_prepareStep: prepareStep,
5411
5407
  experimental_repairToolCall: repairToolCall,
5412
5408
  _internal: {
5413
5409
  generateId: generateId3 = originalGenerateId3,
@@ -5446,6 +5442,9 @@ async function generateText({
5446
5442
  telemetry
5447
5443
  }),
5448
5444
  ...baseTelemetryAttributes,
5445
+ // model:
5446
+ "ai.model.provider": model.provider,
5447
+ "ai.model.id": model.modelId,
5449
5448
  // specific settings that only make sense on the outer level:
5450
5449
  "ai.prompt": {
5451
5450
  input: () => JSON.stringify({ system, prompt, messages })
@@ -5455,10 +5454,8 @@ async function generateText({
5455
5454
  }),
5456
5455
  tracer,
5457
5456
  fn: async (span) => {
5458
- var _a17, _b, _c;
5459
- const toolsAndToolChoice = {
5460
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5461
- };
5457
+ var _a17, _b, _c, _d, _e, _f;
5458
+ const callSettings2 = prepareCallSettings(settings);
5462
5459
  let currentModelResponse;
5463
5460
  let currentToolCalls = [];
5464
5461
  let currentToolResults = [];
@@ -5475,19 +5472,29 @@ async function generateText({
5475
5472
  };
5476
5473
  let stepType = "initial";
5477
5474
  do {
5478
- const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
5479
5475
  const stepInputMessages = [
5480
5476
  ...initialPrompt.messages,
5481
5477
  ...responseMessages
5482
5478
  ];
5479
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
5480
+ model,
5481
+ steps,
5482
+ maxSteps,
5483
+ stepNumber: stepCount
5484
+ }));
5483
5485
  const promptMessages = await convertToLanguageModelPrompt({
5484
5486
  prompt: {
5485
- type: promptFormat,
5486
5487
  system: initialPrompt.system,
5487
5488
  messages: stepInputMessages
5488
5489
  },
5489
5490
  supportedUrls: await model.getSupportedUrls()
5490
5491
  });
5492
+ const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
5493
+ const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5494
+ tools,
5495
+ toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
5496
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
5497
+ });
5491
5498
  currentModelResponse = await retry(
5492
5499
  () => {
5493
5500
  var _a18;
@@ -5501,23 +5508,23 @@ async function generateText({
5501
5508
  telemetry
5502
5509
  }),
5503
5510
  ...baseTelemetryAttributes,
5504
- "ai.prompt.format": { input: () => promptFormat },
5511
+ // model:
5512
+ "ai.model.provider": stepModel.provider,
5513
+ "ai.model.id": stepModel.modelId,
5514
+ // prompt:
5505
5515
  "ai.prompt.messages": {
5506
5516
  input: () => JSON.stringify(promptMessages)
5507
5517
  },
5508
5518
  "ai.prompt.tools": {
5509
5519
  // convert the language model level tools:
5510
- input: () => {
5511
- var _a19;
5512
- return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
5513
- }
5520
+ input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
5514
5521
  },
5515
5522
  "ai.prompt.toolChoice": {
5516
- input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
5523
+ input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
5517
5524
  },
5518
5525
  // standardized gen-ai llm span attributes:
5519
- "gen_ai.system": model.provider,
5520
- "gen_ai.request.model": model.modelId,
5526
+ "gen_ai.system": stepModel.provider,
5527
+ "gen_ai.request.model": stepModel.modelId,
5521
5528
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5522
5529
  "gen_ai.request.max_tokens": settings.maxOutputTokens,
5523
5530
  "gen_ai.request.presence_penalty": settings.presencePenalty,
@@ -5529,11 +5536,11 @@ async function generateText({
5529
5536
  }),
5530
5537
  tracer,
5531
5538
  fn: async (span2) => {
5532
- var _a19, _b2, _c2, _d, _e, _f, _g, _h;
5533
- const result = await model.doGenerate({
5534
- ...callSettings,
5535
- ...toolsAndToolChoice,
5536
- inputFormat: promptFormat,
5539
+ var _a19, _b2, _c2, _d2, _e2, _f2, _g, _h;
5540
+ const result = await stepModel.doGenerate({
5541
+ ...callSettings2,
5542
+ tools: stepTools,
5543
+ toolChoice: stepToolChoice,
5537
5544
  responseFormat: output == null ? void 0 : output.responseFormat,
5538
5545
  prompt: promptMessages,
5539
5546
  providerOptions,
@@ -5542,8 +5549,8 @@ async function generateText({
5542
5549
  });
5543
5550
  const responseData = {
5544
5551
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5545
- timestamp: (_d = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d : currentDate(),
5546
- modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5552
+ timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5553
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId,
5547
5554
  headers: (_g = result.response) == null ? void 0 : _g.headers,
5548
5555
  body: (_h = result.response) == null ? void 0 : _h.body
5549
5556
  };
@@ -5619,7 +5626,7 @@ async function generateText({
5619
5626
  nextStepType = "tool-result";
5620
5627
  }
5621
5628
  }
5622
- const originalText = (_a17 = extractContentText(currentModelResponse.content)) != null ? _a17 : "";
5629
+ const originalText = (_d = extractContentText(currentModelResponse.content)) != null ? _d : "";
5623
5630
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5624
5631
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5625
5632
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -5672,7 +5679,7 @@ async function generateText({
5672
5679
  finishReason: currentModelResponse.finishReason,
5673
5680
  usage: currentUsage,
5674
5681
  warnings: currentModelResponse.warnings,
5675
- request: (_b = currentModelResponse.request) != null ? _b : {},
5682
+ request: (_e = currentModelResponse.request) != null ? _e : {},
5676
5683
  response: {
5677
5684
  ...currentModelResponse.response,
5678
5685
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -5725,7 +5732,7 @@ async function generateText({
5725
5732
  finishReason: currentModelResponse.finishReason,
5726
5733
  usage,
5727
5734
  warnings: currentModelResponse.warnings,
5728
- request: (_c = currentModelResponse.request) != null ? _c : {},
5735
+ request: (_f = currentModelResponse.request) != null ? _f : {},
5729
5736
  response: {
5730
5737
  ...currentModelResponse.response,
5731
5738
  messages: responseMessages
@@ -6795,14 +6802,12 @@ var DefaultStreamTextResult = class {
6795
6802
  prompt: { system, prompt, messages },
6796
6803
  tools
6797
6804
  });
6798
- const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
6799
6805
  const stepInputMessages = [
6800
6806
  ...initialPrompt.messages,
6801
6807
  ...responseMessages
6802
6808
  ];
6803
6809
  const promptMessages = await convertToLanguageModelPrompt({
6804
6810
  prompt: {
6805
- type: promptFormat,
6806
6811
  system: initialPrompt.system,
6807
6812
  messages: stepInputMessages
6808
6813
  },
@@ -6826,9 +6831,6 @@ var DefaultStreamTextResult = class {
6826
6831
  telemetry
6827
6832
  }),
6828
6833
  ...baseTelemetryAttributes,
6829
- "ai.prompt.format": {
6830
- input: () => promptFormat
6831
- },
6832
6834
  "ai.prompt.messages": {
6833
6835
  input: () => JSON.stringify(promptMessages)
6834
6836
  },
@@ -6866,7 +6868,6 @@ var DefaultStreamTextResult = class {
6866
6868
  result: await model.doStream({
6867
6869
  ...callSettings,
6868
6870
  ...toolsAndToolChoice,
6869
- inputFormat: promptFormat,
6870
6871
  responseFormat: output == null ? void 0 : output.responseFormat,
6871
6872
  prompt: promptMessages,
6872
6873
  providerOptions,