@posthog/ai 7.3.1 → 7.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.cjs +7 -3
- package/dist/anthropic/index.cjs.map +1 -1
- package/dist/anthropic/index.mjs +1 -1
- package/dist/gemini/index.cjs +1 -1
- package/dist/gemini/index.mjs +1 -1
- package/dist/index.cjs +29 -9
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +21 -5
- package/dist/index.mjs.map +1 -1
- package/dist/langchain/index.cjs +25 -7
- package/dist/langchain/index.cjs.map +1 -1
- package/dist/langchain/index.mjs +22 -5
- package/dist/langchain/index.mjs.map +1 -1
- package/dist/openai/index.cjs +1 -1
- package/dist/openai/index.mjs +1 -1
- package/dist/vercel/index.cjs +1 -1
- package/dist/vercel/index.mjs +1 -1
- package/package.json +3 -3
package/dist/langchain/index.mjs
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import 'buffer';
|
|
2
2
|
import * as uuid from 'uuid';
|
|
3
3
|
|
|
4
|
-
var version = "7.3.
|
|
4
|
+
var version = "7.3.2";
|
|
5
5
|
|
|
6
6
|
// Type guards for safer type checking
|
|
7
7
|
|
|
@@ -950,7 +950,10 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
950
950
|
|
|
951
951
|
// Add additional token data to properties
|
|
952
952
|
if (additionalTokenData.cacheReadInputTokens) {
|
|
953
|
-
eventProperties['$
|
|
953
|
+
eventProperties['$ai_cache_read_input_tokens'] = additionalTokenData.cacheReadInputTokens;
|
|
954
|
+
}
|
|
955
|
+
if (additionalTokenData.cacheWriteInputTokens) {
|
|
956
|
+
eventProperties['$ai_cache_creation_input_tokens'] = additionalTokenData.cacheWriteInputTokens;
|
|
954
957
|
}
|
|
955
958
|
if (additionalTokenData.reasoningTokens) {
|
|
956
959
|
eventProperties['$ai_reasoning_tokens'] = additionalTokenData.reasoningTokens;
|
|
@@ -1118,6 +1121,15 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
1118
1121
|
additionalTokenData.cacheReadInputTokens = usage.input_token_details.cache_read;
|
|
1119
1122
|
} else if (usage.cachedPromptTokens != null) {
|
|
1120
1123
|
additionalTokenData.cacheReadInputTokens = usage.cachedPromptTokens;
|
|
1124
|
+
} else if (usage.cache_read_input_tokens != null) {
|
|
1125
|
+
additionalTokenData.cacheReadInputTokens = usage.cache_read_input_tokens;
|
|
1126
|
+
}
|
|
1127
|
+
|
|
1128
|
+
// Check for cache write/creation tokens in various formats
|
|
1129
|
+
if (usage.cache_creation_input_tokens != null) {
|
|
1130
|
+
additionalTokenData.cacheWriteInputTokens = usage.cache_creation_input_tokens;
|
|
1131
|
+
} else if (usage.input_token_details?.cache_creation != null) {
|
|
1132
|
+
additionalTokenData.cacheWriteInputTokens = usage.input_token_details.cache_creation;
|
|
1121
1133
|
}
|
|
1122
1134
|
|
|
1123
1135
|
// Check for reasoning tokens in various formats
|
|
@@ -1167,8 +1179,10 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
1167
1179
|
additionalTokenData.webSearchCount = webSearchCount;
|
|
1168
1180
|
}
|
|
1169
1181
|
|
|
1170
|
-
// For Anthropic providers, LangChain reports input_tokens as the sum of input
|
|
1182
|
+
// For Anthropic providers, LangChain reports input_tokens as the sum of all input tokens.
|
|
1171
1183
|
// Our cost calculation expects them to be separate for Anthropic, so we subtract cache tokens.
|
|
1184
|
+
// Both cache_read and cache_write tokens should be subtracted since Anthropic's raw API
|
|
1185
|
+
// reports input_tokens as tokens NOT read from or used to create a cache.
|
|
1172
1186
|
// For other providers (OpenAI, etc.), input_tokens already excludes cache tokens as expected.
|
|
1173
1187
|
// Match logic consistent with plugin-server: exact match on provider OR substring match on model
|
|
1174
1188
|
let isAnthropic = false;
|
|
@@ -1177,8 +1191,11 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
1177
1191
|
} else if (model && model.toLowerCase().includes('anthropic')) {
|
|
1178
1192
|
isAnthropic = true;
|
|
1179
1193
|
}
|
|
1180
|
-
if (isAnthropic && parsedUsage.input
|
|
1181
|
-
|
|
1194
|
+
if (isAnthropic && parsedUsage.input) {
|
|
1195
|
+
const cacheTokens = (additionalTokenData.cacheReadInputTokens || 0) + (additionalTokenData.cacheWriteInputTokens || 0);
|
|
1196
|
+
if (cacheTokens > 0) {
|
|
1197
|
+
parsedUsage.input = Math.max(parsedUsage.input - cacheTokens, 0);
|
|
1198
|
+
}
|
|
1182
1199
|
}
|
|
1183
1200
|
return [parsedUsage.input, parsedUsage.output, additionalTokenData];
|
|
1184
1201
|
}
|