@posthog/ai 3.3.1 → 3.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.esm.js CHANGED
@@ -94,7 +94,7 @@ const sendEventToPosthog = ({
94
94
  $ai_total_cost_usd: inputCostUSD + outputCostUSD
95
95
  };
96
96
  }
97
- let additionalTokenValues = {
97
+ const additionalTokenValues = {
98
98
  ...(usage.reasoningTokens ? {
99
99
  $ai_reasoning_tokens: usage.reasoningTokens
100
100
  } : {}),
@@ -532,7 +532,7 @@ const mapVercelPrompt = prompt => {
532
532
  });
533
533
  };
534
534
  const mapVercelOutput = result => {
535
- let output = {
535
+ const output = {
536
536
  ...(result.text ? {
537
537
  text: result.text
538
538
  } : {}),
@@ -593,10 +593,10 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
593
593
  const modelId = options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
594
594
  const provider = options.posthogProviderOverride ?? extractProvider(model);
595
595
  const baseURL = ''; // cannot currently get baseURL from vercel
596
- let content = mapVercelOutput(result);
596
+ const content = mapVercelOutput(result);
597
597
  // let tools = result.toolCalls
598
- let providerMetadata = result.providerMetadata;
599
- let additionalTokenValues = {
598
+ const providerMetadata = result.providerMetadata;
599
+ const additionalTokenValues = {
600
600
  ...(providerMetadata?.openai?.reasoningTokens ? {
601
601
  reasoningTokens: providerMetadata.openai.reasoningTokens
602
602
  } : {}),
@@ -1667,9 +1667,16 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1667
1667
  eventProperties['$ai_is_error'] = true;
1668
1668
  } else {
1669
1669
  // Handle token usage
1670
- const [inputTokens, outputTokens] = this.parseUsage(output);
1670
+ const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output);
1671
1671
  eventProperties['$ai_input_tokens'] = inputTokens;
1672
1672
  eventProperties['$ai_output_tokens'] = outputTokens;
1673
+ // Add additional token data to properties
1674
+ if (additionalTokenData.cacheReadInputTokens) {
1675
+ eventProperties['$ai_cache_read_tokens'] = additionalTokenData.cacheReadInputTokens;
1676
+ }
1677
+ if (additionalTokenData.reasoningTokens) {
1678
+ eventProperties['$ai_reasoning_tokens'] = additionalTokenData.reasoningTokens;
1679
+ }
1673
1680
  // Handle generations/completions
1674
1681
  let completions;
1675
1682
  if (output.generations && Array.isArray(output.generations)) {
@@ -1708,6 +1715,8 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1708
1715
  for (const arg of args) {
1709
1716
  if (arg && typeof arg === 'object' && 'name' in arg) {
1710
1717
  return arg.name;
1718
+ } else if (arg && typeof arg === 'object' && 'runName' in arg) {
1719
+ return arg.runName;
1711
1720
  }
1712
1721
  }
1713
1722
  }
@@ -1781,10 +1790,28 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1781
1790
  input: 0,
1782
1791
  output: 0
1783
1792
  });
1784
- return [parsedUsage.input, parsedUsage.output];
1793
+ // Extract additional token details like cached tokens and reasoning tokens
1794
+ const additionalTokenData = {};
1795
+ // Check for cached tokens in various formats
1796
+ if (usage.prompt_tokens_details?.cached_tokens != null) {
1797
+ additionalTokenData.cacheReadInputTokens = usage.prompt_tokens_details.cached_tokens;
1798
+ } else if (usage.input_token_details?.cache_read != null) {
1799
+ additionalTokenData.cacheReadInputTokens = usage.input_token_details.cache_read;
1800
+ } else if (usage.cachedPromptTokens != null) {
1801
+ additionalTokenData.cacheReadInputTokens = usage.cachedPromptTokens;
1802
+ }
1803
+ // Check for reasoning tokens in various formats
1804
+ if (usage.completion_tokens_details?.reasoning_tokens != null) {
1805
+ additionalTokenData.reasoningTokens = usage.completion_tokens_details.reasoning_tokens;
1806
+ } else if (usage.output_token_details?.reasoning != null) {
1807
+ additionalTokenData.reasoningTokens = usage.output_token_details.reasoning;
1808
+ } else if (usage.reasoningTokens != null) {
1809
+ additionalTokenData.reasoningTokens = usage.reasoningTokens;
1810
+ }
1811
+ return [parsedUsage.input, parsedUsage.output, additionalTokenData];
1785
1812
  }
1786
1813
  parseUsage(response) {
1787
- let llmUsage = [0, 0];
1814
+ let llmUsage = [0, 0, {}];
1788
1815
  const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage'];
1789
1816
  if (response.llmOutput != null) {
1790
1817
  const key = llmUsageKeys.find(k => response.llmOutput?.[k] != null);
@@ -1796,6 +1823,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1796
1823
  if (llmUsage[0] === 0 && llmUsage[1] === 0 && response.generations) {
1797
1824
  for (const generation of response.generations) {
1798
1825
  for (const genChunk of generation) {
1826
+ // Check other paths for usage information
1799
1827
  if (genChunk.generationInfo?.usage_metadata) {
1800
1828
  llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata);
1801
1829
  return llmUsage;