@posthog/ai 3.3.0 → 3.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ # 3.3.1 - 2025-03-13
2
+
3
+ - fix: fix vercel output mapping and token caching
4
+
1
5
  # 3.3.0 - 2025-03-08
2
6
 
3
7
  - feat: add reasoning and cache tokens to openai and anthropic
package/lib/index.cjs.js CHANGED
@@ -558,6 +558,45 @@ const mapVercelPrompt = prompt => {
558
558
  };
559
559
  });
560
560
  };
561
+ const mapVercelOutput = result => {
562
+ let output = {
563
+ ...(result.text ? {
564
+ text: result.text
565
+ } : {}),
566
+ ...(result.object ? {
567
+ object: result.object
568
+ } : {}),
569
+ ...(result.reasoning ? {
570
+ reasoning: result.reasoning
571
+ } : {}),
572
+ ...(result.response ? {
573
+ response: result.response
574
+ } : {}),
575
+ ...(result.finishReason ? {
576
+ finishReason: result.finishReason
577
+ } : {}),
578
+ ...(result.usage ? {
579
+ usage: result.usage
580
+ } : {}),
581
+ ...(result.warnings ? {
582
+ warnings: result.warnings
583
+ } : {}),
584
+ ...(result.providerMetadata ? {
585
+ toolCalls: result.providerMetadata
586
+ } : {})
587
+ };
588
+ // if text and no object or reasoning, return text
589
+ if (output.text && !output.object && !output.reasoning) {
590
+ return [{
591
+ content: output.text,
592
+ role: 'assistant'
593
+ }];
594
+ }
595
+ return [{
596
+ content: JSON.stringify(output),
597
+ role: 'assistant'
598
+ }];
599
+ };
561
600
  const extractProvider = model => {
562
601
  // vercel provider is in the format of provider.endpoint
563
602
  const provider = model.provider.toLowerCase();
@@ -581,14 +620,14 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
581
620
  const modelId = options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
582
621
  const provider = options.posthogProviderOverride ?? extractProvider(model);
583
622
  const baseURL = ''; // cannot currently get baseURL from vercel
584
- let content = result.text || JSON.stringify(result);
623
+ let content = mapVercelOutput(result);
585
624
  // let tools = result.toolCalls
586
625
  let providerMetadata = result.providerMetadata;
587
626
  let additionalTokenValues = {
588
627
  ...(providerMetadata?.openai?.reasoningTokens ? {
589
628
  reasoningTokens: providerMetadata.openai.reasoningTokens
590
629
  } : {}),
591
- ...(providerMetadata?.openai?.cachedPromptToken ? {
630
+ ...(providerMetadata?.openai?.cachedPromptTokens ? {
592
631
  cacheReadInputTokens: providerMetadata.openai.cachedPromptTokens
593
632
  } : {}),
594
633
  ...(providerMetadata?.anthropic ? {
@@ -674,8 +713,8 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
674
713
  if (chunk.providerMetadata?.openai?.reasoningTokens) {
675
714
  usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens;
676
715
  }
677
- if (chunk.providerMetadata?.openai?.cachedPromptToken) {
678
- usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptToken;
716
+ if (chunk.providerMetadata?.openai?.cachedPromptTokens) {
717
+ usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptTokens;
679
718
  }
680
719
  if (chunk.providerMetadata?.anthropic?.cacheReadInputTokens) {
681
720
  usage.cacheReadInputTokens = chunk.providerMetadata.anthropic.cacheReadInputTokens;