@posthog/ai 3.3.0 → 3.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -0
- package/lib/index.cjs.js +77 -10
- package/lib/index.cjs.js.map +1 -1
- package/lib/index.esm.js +77 -10
- package/lib/index.esm.js.map +1 -1
- package/package.json +1 -1
- package/src/langchain/callbacks.ts +39 -7
- package/src/utils.ts +1 -1
- package/src/vercel/middleware.ts +24 -6
package/lib/index.esm.js
CHANGED
|
@@ -94,7 +94,7 @@ const sendEventToPosthog = ({
|
|
|
94
94
|
$ai_total_cost_usd: inputCostUSD + outputCostUSD
|
|
95
95
|
};
|
|
96
96
|
}
|
|
97
|
-
|
|
97
|
+
const additionalTokenValues = {
|
|
98
98
|
...(usage.reasoningTokens ? {
|
|
99
99
|
$ai_reasoning_tokens: usage.reasoningTokens
|
|
100
100
|
} : {}),
|
|
@@ -531,6 +531,45 @@ const mapVercelPrompt = prompt => {
|
|
|
531
531
|
};
|
|
532
532
|
});
|
|
533
533
|
};
|
|
534
|
+
const mapVercelOutput = result => {
|
|
535
|
+
const output = {
|
|
536
|
+
...(result.text ? {
|
|
537
|
+
text: result.text
|
|
538
|
+
} : {}),
|
|
539
|
+
...(result.object ? {
|
|
540
|
+
object: result.object
|
|
541
|
+
} : {}),
|
|
542
|
+
...(result.reasoning ? {
|
|
543
|
+
reasoning: result.reasoning
|
|
544
|
+
} : {}),
|
|
545
|
+
...(result.response ? {
|
|
546
|
+
response: result.response
|
|
547
|
+
} : {}),
|
|
548
|
+
...(result.finishReason ? {
|
|
549
|
+
finishReason: result.finishReason
|
|
550
|
+
} : {}),
|
|
551
|
+
...(result.usage ? {
|
|
552
|
+
usage: result.usage
|
|
553
|
+
} : {}),
|
|
554
|
+
...(result.warnings ? {
|
|
555
|
+
warnings: result.warnings
|
|
556
|
+
} : {}),
|
|
557
|
+
...(result.providerMetadata ? {
|
|
558
|
+
toolCalls: result.providerMetadata
|
|
559
|
+
} : {})
|
|
560
|
+
};
|
|
561
|
+
// if text and no object or reasoning, return text
|
|
562
|
+
if (output.text && !output.object && !output.reasoning) {
|
|
563
|
+
return [{
|
|
564
|
+
content: output.text,
|
|
565
|
+
role: 'assistant'
|
|
566
|
+
}];
|
|
567
|
+
}
|
|
568
|
+
return [{
|
|
569
|
+
content: JSON.stringify(output),
|
|
570
|
+
role: 'assistant'
|
|
571
|
+
}];
|
|
572
|
+
};
|
|
534
573
|
const extractProvider = model => {
|
|
535
574
|
// vercel provider is in the format of provider.endpoint
|
|
536
575
|
const provider = model.provider.toLowerCase();
|
|
@@ -554,14 +593,14 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
554
593
|
const modelId = options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
|
|
555
594
|
const provider = options.posthogProviderOverride ?? extractProvider(model);
|
|
556
595
|
const baseURL = ''; // cannot currently get baseURL from vercel
|
|
557
|
-
|
|
596
|
+
const content = mapVercelOutput(result);
|
|
558
597
|
// let tools = result.toolCalls
|
|
559
|
-
|
|
560
|
-
|
|
598
|
+
const providerMetadata = result.providerMetadata;
|
|
599
|
+
const additionalTokenValues = {
|
|
561
600
|
...(providerMetadata?.openai?.reasoningTokens ? {
|
|
562
601
|
reasoningTokens: providerMetadata.openai.reasoningTokens
|
|
563
602
|
} : {}),
|
|
564
|
-
...(providerMetadata?.openai?.
|
|
603
|
+
...(providerMetadata?.openai?.cachedPromptTokens ? {
|
|
565
604
|
cacheReadInputTokens: providerMetadata.openai.cachedPromptTokens
|
|
566
605
|
} : {}),
|
|
567
606
|
...(providerMetadata?.anthropic ? {
|
|
@@ -647,8 +686,8 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
647
686
|
if (chunk.providerMetadata?.openai?.reasoningTokens) {
|
|
648
687
|
usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens;
|
|
649
688
|
}
|
|
650
|
-
if (chunk.providerMetadata?.openai?.
|
|
651
|
-
usage.cacheReadInputTokens = chunk.providerMetadata.openai.
|
|
689
|
+
if (chunk.providerMetadata?.openai?.cachedPromptTokens) {
|
|
690
|
+
usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptTokens;
|
|
652
691
|
}
|
|
653
692
|
if (chunk.providerMetadata?.anthropic?.cacheReadInputTokens) {
|
|
654
693
|
usage.cacheReadInputTokens = chunk.providerMetadata.anthropic.cacheReadInputTokens;
|
|
@@ -1628,9 +1667,16 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
1628
1667
|
eventProperties['$ai_is_error'] = true;
|
|
1629
1668
|
} else {
|
|
1630
1669
|
// Handle token usage
|
|
1631
|
-
const [inputTokens, outputTokens] = this.parseUsage(output);
|
|
1670
|
+
const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output);
|
|
1632
1671
|
eventProperties['$ai_input_tokens'] = inputTokens;
|
|
1633
1672
|
eventProperties['$ai_output_tokens'] = outputTokens;
|
|
1673
|
+
// Add additional token data to properties
|
|
1674
|
+
if (additionalTokenData.cacheReadInputTokens) {
|
|
1675
|
+
eventProperties['$ai_cache_read_tokens'] = additionalTokenData.cacheReadInputTokens;
|
|
1676
|
+
}
|
|
1677
|
+
if (additionalTokenData.reasoningTokens) {
|
|
1678
|
+
eventProperties['$ai_reasoning_tokens'] = additionalTokenData.reasoningTokens;
|
|
1679
|
+
}
|
|
1634
1680
|
// Handle generations/completions
|
|
1635
1681
|
let completions;
|
|
1636
1682
|
if (output.generations && Array.isArray(output.generations)) {
|
|
@@ -1669,6 +1715,8 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
1669
1715
|
for (const arg of args) {
|
|
1670
1716
|
if (arg && typeof arg === 'object' && 'name' in arg) {
|
|
1671
1717
|
return arg.name;
|
|
1718
|
+
} else if (arg && typeof arg === 'object' && 'runName' in arg) {
|
|
1719
|
+
return arg.runName;
|
|
1672
1720
|
}
|
|
1673
1721
|
}
|
|
1674
1722
|
}
|
|
@@ -1742,10 +1790,28 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
1742
1790
|
input: 0,
|
|
1743
1791
|
output: 0
|
|
1744
1792
|
});
|
|
1745
|
-
|
|
1793
|
+
// Extract additional token details like cached tokens and reasoning tokens
|
|
1794
|
+
const additionalTokenData = {};
|
|
1795
|
+
// Check for cached tokens in various formats
|
|
1796
|
+
if (usage.prompt_tokens_details?.cached_tokens != null) {
|
|
1797
|
+
additionalTokenData.cacheReadInputTokens = usage.prompt_tokens_details.cached_tokens;
|
|
1798
|
+
} else if (usage.input_token_details?.cache_read != null) {
|
|
1799
|
+
additionalTokenData.cacheReadInputTokens = usage.input_token_details.cache_read;
|
|
1800
|
+
} else if (usage.cachedPromptTokens != null) {
|
|
1801
|
+
additionalTokenData.cacheReadInputTokens = usage.cachedPromptTokens;
|
|
1802
|
+
}
|
|
1803
|
+
// Check for reasoning tokens in various formats
|
|
1804
|
+
if (usage.completion_tokens_details?.reasoning_tokens != null) {
|
|
1805
|
+
additionalTokenData.reasoningTokens = usage.completion_tokens_details.reasoning_tokens;
|
|
1806
|
+
} else if (usage.output_token_details?.reasoning != null) {
|
|
1807
|
+
additionalTokenData.reasoningTokens = usage.output_token_details.reasoning;
|
|
1808
|
+
} else if (usage.reasoningTokens != null) {
|
|
1809
|
+
additionalTokenData.reasoningTokens = usage.reasoningTokens;
|
|
1810
|
+
}
|
|
1811
|
+
return [parsedUsage.input, parsedUsage.output, additionalTokenData];
|
|
1746
1812
|
}
|
|
1747
1813
|
parseUsage(response) {
|
|
1748
|
-
let llmUsage = [0, 0];
|
|
1814
|
+
let llmUsage = [0, 0, {}];
|
|
1749
1815
|
const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage'];
|
|
1750
1816
|
if (response.llmOutput != null) {
|
|
1751
1817
|
const key = llmUsageKeys.find(k => response.llmOutput?.[k] != null);
|
|
@@ -1757,6 +1823,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
1757
1823
|
if (llmUsage[0] === 0 && llmUsage[1] === 0 && response.generations) {
|
|
1758
1824
|
for (const generation of response.generations) {
|
|
1759
1825
|
for (const genChunk of generation) {
|
|
1826
|
+
// Check other paths for usage information
|
|
1760
1827
|
if (genChunk.generationInfo?.usage_metadata) {
|
|
1761
1828
|
llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata);
|
|
1762
1829
|
return llmUsage;
|