@posthog/ai 3.3.1 → 3.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ # 3.3.2 - 2025-03-25
2
+
3
+ - fix: langchain name mapping
4
+
1
5
  # 3.3.1 - 2025-03-13
2
6
 
3
7
  - fix: fix vercel output mapping and token caching
package/lib/index.cjs.js CHANGED
@@ -121,7 +121,7 @@ const sendEventToPosthog = ({
121
121
  $ai_total_cost_usd: inputCostUSD + outputCostUSD
122
122
  };
123
123
  }
124
- let additionalTokenValues = {
124
+ const additionalTokenValues = {
125
125
  ...(usage.reasoningTokens ? {
126
126
  $ai_reasoning_tokens: usage.reasoningTokens
127
127
  } : {}),
@@ -559,7 +559,7 @@ const mapVercelPrompt = prompt => {
559
559
  });
560
560
  };
561
561
  const mapVercelOutput = result => {
562
- let output = {
562
+ const output = {
563
563
  ...(result.text ? {
564
564
  text: result.text
565
565
  } : {}),
@@ -620,10 +620,10 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
620
620
  const modelId = options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
621
621
  const provider = options.posthogProviderOverride ?? extractProvider(model);
622
622
  const baseURL = ''; // cannot currently get baseURL from vercel
623
- let content = mapVercelOutput(result);
623
+ const content = mapVercelOutput(result);
624
624
  // let tools = result.toolCalls
625
- let providerMetadata = result.providerMetadata;
626
- let additionalTokenValues = {
625
+ const providerMetadata = result.providerMetadata;
626
+ const additionalTokenValues = {
627
627
  ...(providerMetadata?.openai?.reasoningTokens ? {
628
628
  reasoningTokens: providerMetadata.openai.reasoningTokens
629
629
  } : {}),
@@ -1694,9 +1694,16 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1694
1694
  eventProperties['$ai_is_error'] = true;
1695
1695
  } else {
1696
1696
  // Handle token usage
1697
- const [inputTokens, outputTokens] = this.parseUsage(output);
1697
+ const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output);
1698
1698
  eventProperties['$ai_input_tokens'] = inputTokens;
1699
1699
  eventProperties['$ai_output_tokens'] = outputTokens;
1700
+ // Add additional token data to properties
1701
+ if (additionalTokenData.cacheReadInputTokens) {
1702
+ eventProperties['$ai_cache_read_tokens'] = additionalTokenData.cacheReadInputTokens;
1703
+ }
1704
+ if (additionalTokenData.reasoningTokens) {
1705
+ eventProperties['$ai_reasoning_tokens'] = additionalTokenData.reasoningTokens;
1706
+ }
1700
1707
  // Handle generations/completions
1701
1708
  let completions;
1702
1709
  if (output.generations && Array.isArray(output.generations)) {
@@ -1735,6 +1742,8 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1735
1742
  for (const arg of args) {
1736
1743
  if (arg && typeof arg === 'object' && 'name' in arg) {
1737
1744
  return arg.name;
1745
+ } else if (arg && typeof arg === 'object' && 'runName' in arg) {
1746
+ return arg.runName;
1738
1747
  }
1739
1748
  }
1740
1749
  }
@@ -1808,10 +1817,28 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1808
1817
  input: 0,
1809
1818
  output: 0
1810
1819
  });
1811
- return [parsedUsage.input, parsedUsage.output];
1820
+ // Extract additional token details like cached tokens and reasoning tokens
1821
+ const additionalTokenData = {};
1822
+ // Check for cached tokens in various formats
1823
+ if (usage.prompt_tokens_details?.cached_tokens != null) {
1824
+ additionalTokenData.cacheReadInputTokens = usage.prompt_tokens_details.cached_tokens;
1825
+ } else if (usage.input_token_details?.cache_read != null) {
1826
+ additionalTokenData.cacheReadInputTokens = usage.input_token_details.cache_read;
1827
+ } else if (usage.cachedPromptTokens != null) {
1828
+ additionalTokenData.cacheReadInputTokens = usage.cachedPromptTokens;
1829
+ }
1830
+ // Check for reasoning tokens in various formats
1831
+ if (usage.completion_tokens_details?.reasoning_tokens != null) {
1832
+ additionalTokenData.reasoningTokens = usage.completion_tokens_details.reasoning_tokens;
1833
+ } else if (usage.output_token_details?.reasoning != null) {
1834
+ additionalTokenData.reasoningTokens = usage.output_token_details.reasoning;
1835
+ } else if (usage.reasoningTokens != null) {
1836
+ additionalTokenData.reasoningTokens = usage.reasoningTokens;
1837
+ }
1838
+ return [parsedUsage.input, parsedUsage.output, additionalTokenData];
1812
1839
  }
1813
1840
  parseUsage(response) {
1814
- let llmUsage = [0, 0];
1841
+ let llmUsage = [0, 0, {}];
1815
1842
  const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage'];
1816
1843
  if (response.llmOutput != null) {
1817
1844
  const key = llmUsageKeys.find(k => response.llmOutput?.[k] != null);
@@ -1823,6 +1850,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1823
1850
  if (llmUsage[0] === 0 && llmUsage[1] === 0 && response.generations) {
1824
1851
  for (const generation of response.generations) {
1825
1852
  for (const genChunk of generation) {
1853
+ // Check other paths for usage information
1826
1854
  if (genChunk.generationInfo?.usage_metadata) {
1827
1855
  llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata);
1828
1856
  return llmUsage;