ai 3.2.25 → 3.2.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -723,7 +723,8 @@ async function generateObject({
723
723
  ...prepareCallSettings(settings),
724
724
  inputFormat: validatedPrompt.type,
725
725
  prompt: convertToLanguageModelPrompt(validatedPrompt),
726
- abortSignal
726
+ abortSignal,
727
+ headers
727
728
  })
728
729
  );
729
730
  if (generateResult.text === void 0) {
@@ -757,7 +758,8 @@ async function generateObject({
757
758
  ...prepareCallSettings(settings),
758
759
  inputFormat: validatedPrompt.type,
759
760
  prompt: convertToLanguageModelPrompt(validatedPrompt),
760
- abortSignal
761
+ abortSignal,
762
+ headers
761
763
  })
762
764
  );
763
765
  const functionArgs = (_b = (_a = generateResult.toolCalls) == null ? void 0 : _a[0]) == null ? void 0 : _b.args;
@@ -1455,6 +1457,11 @@ async function generateText({
1455
1457
  let roundtripCount = 0;
1456
1458
  const responseMessages = [];
1457
1459
  const roundtrips = [];
1460
+ const usage = {
1461
+ completionTokens: 0,
1462
+ promptTokens: 0,
1463
+ totalTokens: 0
1464
+ };
1458
1465
  do {
1459
1466
  const currentInputFormat = roundtripCount === 0 ? validatedPrompt.type : "messages";
1460
1467
  currentModelResponse = await retry(
@@ -1494,12 +1501,18 @@ async function generateText({
1494
1501
  tools,
1495
1502
  tracer
1496
1503
  });
1504
+ const currentUsage = calculateCompletionTokenUsage(
1505
+ currentModelResponse.usage
1506
+ );
1507
+ usage.completionTokens += currentUsage.completionTokens;
1508
+ usage.promptTokens += currentUsage.promptTokens;
1509
+ usage.totalTokens += currentUsage.totalTokens;
1497
1510
  roundtrips.push({
1498
1511
  text: (_b = currentModelResponse.text) != null ? _b : "",
1499
1512
  toolCalls: currentToolCalls,
1500
1513
  toolResults: currentToolResults,
1501
1514
  finishReason: currentModelResponse.finishReason,
1502
- usage: calculateCompletionTokenUsage(currentModelResponse.usage),
1515
+ usage: currentUsage,
1503
1516
  warnings: currentModelResponse.warnings,
1504
1517
  logprobs: currentModelResponse.logprobs
1505
1518
  });
@@ -1533,7 +1546,7 @@ async function generateText({
1533
1546
  toolCalls: currentToolCalls,
1534
1547
  toolResults: currentToolResults,
1535
1548
  finishReason: currentModelResponse.finishReason,
1536
- usage: calculateCompletionTokenUsage(currentModelResponse.usage),
1549
+ usage,
1537
1550
  warnings: currentModelResponse.warnings,
1538
1551
  rawResponse: currentModelResponse.rawResponse,
1539
1552
  logprobs: currentModelResponse.logprobs,