@posthog/ai 6.0.1 → 6.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1319,17 +1319,17 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1319
1319
  const latency = (Date.now() - startTime) / 1000;
1320
1320
  const providerMetadata = result.providerMetadata;
1321
1321
  const additionalTokenValues = {
1322
- ...(providerMetadata?.openai?.reasoningTokens ? {
1323
- reasoningTokens: providerMetadata.openai.reasoningTokens
1324
- } : {}),
1325
- ...(providerMetadata?.openai?.cachedPromptTokens ? {
1326
- cacheReadInputTokens: providerMetadata.openai.cachedPromptTokens
1327
- } : {}),
1328
1322
  ...(providerMetadata?.anthropic ? {
1329
- cacheReadInputTokens: providerMetadata.anthropic.cacheReadInputTokens,
1330
1323
  cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
1331
1324
  } : {})
1332
1325
  };
1326
+ const usage = {
1327
+ inputTokens: result.usage.inputTokens,
1328
+ outputTokens: result.usage.outputTokens,
1329
+ reasoningTokens: result.usage.reasoningTokens,
1330
+ cacheReadInputTokens: result.usage.cachedInputTokens,
1331
+ ...additionalTokenValues
1332
+ };
1333
1333
  await sendEventToPosthog({
1334
1334
  client: phClient,
1335
1335
  distinctId: options.posthogDistinctId,
@@ -1342,11 +1342,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1342
1342
  baseURL,
1343
1343
  params: mergedParams,
1344
1344
  httpStatus: 200,
1345
- usage: {
1346
- inputTokens: result.usage.inputTokens,
1347
- outputTokens: result.usage.outputTokens,
1348
- ...additionalTokenValues
1349
- },
1345
+ usage,
1350
1346
  tools: availableTools,
1351
1347
  captureImmediate: options.posthogCaptureImmediate
1352
1348
  });
@@ -1408,22 +1404,19 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1408
1404
  reasoningText += chunk.delta; // New in v5
1409
1405
  }
1410
1406
  if (chunk.type === 'finish') {
1407
+ const providerMetadata = chunk.providerMetadata;
1408
+ const additionalTokenValues = {
1409
+ ...(providerMetadata?.anthropic ? {
1410
+ cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
1411
+ } : {})
1412
+ };
1411
1413
  usage = {
1412
1414
  inputTokens: chunk.usage?.inputTokens,
1413
- outputTokens: chunk.usage?.outputTokens
1415
+ outputTokens: chunk.usage?.outputTokens,
1416
+ reasoningTokens: chunk.usage?.reasoningTokens,
1417
+ cacheReadInputTokens: chunk.usage?.cachedInputTokens,
1418
+ ...additionalTokenValues
1414
1419
  };
1415
- if (chunk.providerMetadata?.openai?.reasoningTokens) {
1416
- usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens;
1417
- }
1418
- if (chunk.providerMetadata?.openai?.cachedPromptTokens) {
1419
- usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptTokens;
1420
- }
1421
- if (chunk.providerMetadata?.anthropic?.cacheReadInputTokens) {
1422
- usage.cacheReadInputTokens = chunk.providerMetadata.anthropic.cacheReadInputTokens;
1423
- }
1424
- if (chunk.providerMetadata?.anthropic?.cacheCreationInputTokens) {
1425
- usage.cacheCreationInputTokens = chunk.providerMetadata.anthropic.cacheCreationInputTokens;
1426
- }
1427
1420
  }
1428
1421
  controller.enqueue(chunk);
1429
1422
  },
@@ -1722,7 +1715,9 @@ class WrappedModels {
1722
1715
  httpStatus: 200,
1723
1716
  usage: {
1724
1717
  inputTokens: response.usageMetadata?.promptTokenCount ?? 0,
1725
- outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0
1718
+ outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0,
1719
+ reasoningTokens: response.usageMetadata?.thoughtsTokenCount ?? 0,
1720
+ cacheReadInputTokens: response.usageMetadata?.cachedContentTokenCount ?? 0
1726
1721
  },
1727
1722
  tools: availableTools,
1728
1723
  captureImmediate: posthogCaptureImmediate
@@ -1778,7 +1773,9 @@ class WrappedModels {
1778
1773
  if (chunk.usageMetadata) {
1779
1774
  usage = {
1780
1775
  inputTokens: chunk.usageMetadata.promptTokenCount ?? 0,
1781
- outputTokens: chunk.usageMetadata.candidatesTokenCount ?? 0
1776
+ outputTokens: chunk.usageMetadata.candidatesTokenCount ?? 0,
1777
+ reasoningTokens: chunk.usageMetadata.thoughtsTokenCount ?? 0,
1778
+ cacheReadInputTokens: chunk.usageMetadata.cachedContentTokenCount ?? 0
1782
1779
  };
1783
1780
  }
1784
1781
  yield chunk;