@posthog/ai 3.3.0 → 3.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,11 @@
1
+ # 3.3.2 - 2025-03-25
2
+
3
+ - fix: langchain name mapping
4
+
5
+ # 3.3.1 - 2025-03-13
6
+
7
+ - fix: fix vercel output mapping and token caching
8
+
1
9
  # 3.3.0 - 2025-03-08
2
10
 
3
11
  - feat: add reasoning and cache tokens to openai and anthropic
package/lib/index.cjs.js CHANGED
@@ -121,7 +121,7 @@ const sendEventToPosthog = ({
121
121
  $ai_total_cost_usd: inputCostUSD + outputCostUSD
122
122
  };
123
123
  }
124
- let additionalTokenValues = {
124
+ const additionalTokenValues = {
125
125
  ...(usage.reasoningTokens ? {
126
126
  $ai_reasoning_tokens: usage.reasoningTokens
127
127
  } : {}),
@@ -558,6 +558,45 @@ const mapVercelPrompt = prompt => {
558
558
  };
559
559
  });
560
560
  };
561
+ const mapVercelOutput = result => {
562
+ const output = {
563
+ ...(result.text ? {
564
+ text: result.text
565
+ } : {}),
566
+ ...(result.object ? {
567
+ object: result.object
568
+ } : {}),
569
+ ...(result.reasoning ? {
570
+ reasoning: result.reasoning
571
+ } : {}),
572
+ ...(result.response ? {
573
+ response: result.response
574
+ } : {}),
575
+ ...(result.finishReason ? {
576
+ finishReason: result.finishReason
577
+ } : {}),
578
+ ...(result.usage ? {
579
+ usage: result.usage
580
+ } : {}),
581
+ ...(result.warnings ? {
582
+ warnings: result.warnings
583
+ } : {}),
584
+ ...(result.providerMetadata ? {
585
+ toolCalls: result.providerMetadata
586
+ } : {})
587
+ };
588
+ // if text and no object or reasoning, return text
589
+ if (output.text && !output.object && !output.reasoning) {
590
+ return [{
591
+ content: output.text,
592
+ role: 'assistant'
593
+ }];
594
+ }
595
+ return [{
596
+ content: JSON.stringify(output),
597
+ role: 'assistant'
598
+ }];
599
+ };
561
600
  const extractProvider = model => {
562
601
  // vercel provider is in the format of provider.endpoint
563
602
  const provider = model.provider.toLowerCase();
@@ -581,14 +620,14 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
581
620
  const modelId = options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
582
621
  const provider = options.posthogProviderOverride ?? extractProvider(model);
583
622
  const baseURL = ''; // cannot currently get baseURL from vercel
584
- let content = result.text || JSON.stringify(result);
623
+ const content = mapVercelOutput(result);
585
624
  // let tools = result.toolCalls
586
- let providerMetadata = result.providerMetadata;
587
- let additionalTokenValues = {
625
+ const providerMetadata = result.providerMetadata;
626
+ const additionalTokenValues = {
588
627
  ...(providerMetadata?.openai?.reasoningTokens ? {
589
628
  reasoningTokens: providerMetadata.openai.reasoningTokens
590
629
  } : {}),
591
- ...(providerMetadata?.openai?.cachedPromptToken ? {
630
+ ...(providerMetadata?.openai?.cachedPromptTokens ? {
592
631
  cacheReadInputTokens: providerMetadata.openai.cachedPromptTokens
593
632
  } : {}),
594
633
  ...(providerMetadata?.anthropic ? {
@@ -674,8 +713,8 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
674
713
  if (chunk.providerMetadata?.openai?.reasoningTokens) {
675
714
  usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens;
676
715
  }
677
- if (chunk.providerMetadata?.openai?.cachedPromptToken) {
678
- usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptToken;
716
+ if (chunk.providerMetadata?.openai?.cachedPromptTokens) {
717
+ usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptTokens;
679
718
  }
680
719
  if (chunk.providerMetadata?.anthropic?.cacheReadInputTokens) {
681
720
  usage.cacheReadInputTokens = chunk.providerMetadata.anthropic.cacheReadInputTokens;
@@ -1655,9 +1694,16 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1655
1694
  eventProperties['$ai_is_error'] = true;
1656
1695
  } else {
1657
1696
  // Handle token usage
1658
- const [inputTokens, outputTokens] = this.parseUsage(output);
1697
+ const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output);
1659
1698
  eventProperties['$ai_input_tokens'] = inputTokens;
1660
1699
  eventProperties['$ai_output_tokens'] = outputTokens;
1700
+ // Add additional token data to properties
1701
+ if (additionalTokenData.cacheReadInputTokens) {
1702
+ eventProperties['$ai_cache_read_tokens'] = additionalTokenData.cacheReadInputTokens;
1703
+ }
1704
+ if (additionalTokenData.reasoningTokens) {
1705
+ eventProperties['$ai_reasoning_tokens'] = additionalTokenData.reasoningTokens;
1706
+ }
1661
1707
  // Handle generations/completions
1662
1708
  let completions;
1663
1709
  if (output.generations && Array.isArray(output.generations)) {
@@ -1696,6 +1742,8 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1696
1742
  for (const arg of args) {
1697
1743
  if (arg && typeof arg === 'object' && 'name' in arg) {
1698
1744
  return arg.name;
1745
+ } else if (arg && typeof arg === 'object' && 'runName' in arg) {
1746
+ return arg.runName;
1699
1747
  }
1700
1748
  }
1701
1749
  }
@@ -1769,10 +1817,28 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1769
1817
  input: 0,
1770
1818
  output: 0
1771
1819
  });
1772
- return [parsedUsage.input, parsedUsage.output];
1820
+ // Extract additional token details like cached tokens and reasoning tokens
1821
+ const additionalTokenData = {};
1822
+ // Check for cached tokens in various formats
1823
+ if (usage.prompt_tokens_details?.cached_tokens != null) {
1824
+ additionalTokenData.cacheReadInputTokens = usage.prompt_tokens_details.cached_tokens;
1825
+ } else if (usage.input_token_details?.cache_read != null) {
1826
+ additionalTokenData.cacheReadInputTokens = usage.input_token_details.cache_read;
1827
+ } else if (usage.cachedPromptTokens != null) {
1828
+ additionalTokenData.cacheReadInputTokens = usage.cachedPromptTokens;
1829
+ }
1830
+ // Check for reasoning tokens in various formats
1831
+ if (usage.completion_tokens_details?.reasoning_tokens != null) {
1832
+ additionalTokenData.reasoningTokens = usage.completion_tokens_details.reasoning_tokens;
1833
+ } else if (usage.output_token_details?.reasoning != null) {
1834
+ additionalTokenData.reasoningTokens = usage.output_token_details.reasoning;
1835
+ } else if (usage.reasoningTokens != null) {
1836
+ additionalTokenData.reasoningTokens = usage.reasoningTokens;
1837
+ }
1838
+ return [parsedUsage.input, parsedUsage.output, additionalTokenData];
1773
1839
  }
1774
1840
  parseUsage(response) {
1775
- let llmUsage = [0, 0];
1841
+ let llmUsage = [0, 0, {}];
1776
1842
  const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage'];
1777
1843
  if (response.llmOutput != null) {
1778
1844
  const key = llmUsageKeys.find(k => response.llmOutput?.[k] != null);
@@ -1784,6 +1850,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1784
1850
  if (llmUsage[0] === 0 && llmUsage[1] === 0 && response.generations) {
1785
1851
  for (const generation of response.generations) {
1786
1852
  for (const genChunk of generation) {
1853
+ // Check other paths for usage information
1787
1854
  if (genChunk.generationInfo?.usage_metadata) {
1788
1855
  llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata);
1789
1856
  return llmUsage;