@posthog/ai 7.1.0 → 7.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +56 -2
- package/dist/anthropic/index.cjs +1 -1
- package/dist/anthropic/index.mjs +1 -1
- package/dist/gemini/index.cjs +1 -1
- package/dist/gemini/index.mjs +1 -1
- package/dist/index.cjs +18 -10
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +18 -10
- package/dist/index.mjs.map +1 -1
- package/dist/langchain/index.cjs +18 -10
- package/dist/langchain/index.cjs.map +1 -1
- package/dist/langchain/index.mjs +18 -10
- package/dist/langchain/index.mjs.map +1 -1
- package/dist/openai/index.cjs +1 -1
- package/dist/openai/index.mjs +1 -1
- package/dist/vercel/index.cjs +1 -1
- package/dist/vercel/index.mjs +1 -1
- package/package.json +5 -4
package/dist/index.mjs
CHANGED
|
@@ -6,7 +6,7 @@ import { wrapLanguageModel } from 'ai';
|
|
|
6
6
|
import AnthropicOriginal from '@anthropic-ai/sdk';
|
|
7
7
|
import { GoogleGenAI } from '@google/genai';
|
|
8
8
|
|
|
9
|
-
var version = "7.1.
|
|
9
|
+
var version = "7.1.1";
|
|
10
10
|
|
|
11
11
|
// Type guards for safer type checking
|
|
12
12
|
const isString = value => {
|
|
@@ -3711,7 +3711,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
3711
3711
|
eventProperties['$ai_is_error'] = true;
|
|
3712
3712
|
} else {
|
|
3713
3713
|
// Handle token usage
|
|
3714
|
-
const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output);
|
|
3714
|
+
const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output, run.provider, run.model);
|
|
3715
3715
|
eventProperties['$ai_input_tokens'] = inputTokens;
|
|
3716
3716
|
eventProperties['$ai_output_tokens'] = outputTokens;
|
|
3717
3717
|
// Add additional token data to properties
|
|
@@ -3858,7 +3858,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
3858
3858
|
// Sanitize the message content to redact base64 images
|
|
3859
3859
|
return sanitizeLangChain(messageDict);
|
|
3860
3860
|
}
|
|
3861
|
-
_parseUsageModel(usage) {
|
|
3861
|
+
_parseUsageModel(usage, provider, model) {
|
|
3862
3862
|
const conversionList = [['promptTokens', 'input'], ['completionTokens', 'output'], ['input_tokens', 'input'], ['output_tokens', 'output'], ['prompt_token_count', 'input'], ['candidates_token_count', 'output'], ['inputTokenCount', 'input'], ['outputTokenCount', 'output'], ['input_token_count', 'input'], ['generated_token_count', 'output']];
|
|
3863
3863
|
const parsedUsage = conversionList.reduce((acc, [modelKey, typeKey]) => {
|
|
3864
3864
|
const value = usage[modelKey];
|
|
@@ -3925,20 +3925,28 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
3925
3925
|
if (webSearchCount !== undefined) {
|
|
3926
3926
|
additionalTokenData.webSearchCount = webSearchCount;
|
|
3927
3927
|
}
|
|
3928
|
-
//
|
|
3929
|
-
// Our cost calculation expects them to be separate
|
|
3930
|
-
|
|
3928
|
+
// For Anthropic providers, LangChain reports input_tokens as the sum of input and cache read tokens.
|
|
3929
|
+
// Our cost calculation expects them to be separate for Anthropic, so we subtract cache tokens.
|
|
3930
|
+
// For other providers (OpenAI, etc.), input_tokens already excludes cache tokens as expected.
|
|
3931
|
+
// Match logic consistent with plugin-server: exact match on provider OR substring match on model
|
|
3932
|
+
let isAnthropic = false;
|
|
3933
|
+
if (provider && provider.toLowerCase() === 'anthropic') {
|
|
3934
|
+
isAnthropic = true;
|
|
3935
|
+
} else if (model && model.toLowerCase().includes('anthropic')) {
|
|
3936
|
+
isAnthropic = true;
|
|
3937
|
+
}
|
|
3938
|
+
if (isAnthropic && parsedUsage.input && additionalTokenData.cacheReadInputTokens) {
|
|
3931
3939
|
parsedUsage.input = Math.max(parsedUsage.input - additionalTokenData.cacheReadInputTokens, 0);
|
|
3932
3940
|
}
|
|
3933
3941
|
return [parsedUsage.input, parsedUsage.output, additionalTokenData];
|
|
3934
3942
|
}
|
|
3935
|
-
parseUsage(response) {
|
|
3943
|
+
parseUsage(response, provider, model) {
|
|
3936
3944
|
let llmUsage = [0, 0, {}];
|
|
3937
3945
|
const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage'];
|
|
3938
3946
|
if (response.llmOutput != null) {
|
|
3939
3947
|
const key = llmUsageKeys.find(k => response.llmOutput?.[k] != null);
|
|
3940
3948
|
if (key) {
|
|
3941
|
-
llmUsage = this._parseUsageModel(response.llmOutput[key]);
|
|
3949
|
+
llmUsage = this._parseUsageModel(response.llmOutput[key], provider, model);
|
|
3942
3950
|
}
|
|
3943
3951
|
}
|
|
3944
3952
|
// If top-level usage info was not found, try checking the generations.
|
|
@@ -3947,14 +3955,14 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
3947
3955
|
for (const genChunk of generation) {
|
|
3948
3956
|
// Check other paths for usage information
|
|
3949
3957
|
if (genChunk.generationInfo?.usage_metadata) {
|
|
3950
|
-
llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata);
|
|
3958
|
+
llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata, provider, model);
|
|
3951
3959
|
return llmUsage;
|
|
3952
3960
|
}
|
|
3953
3961
|
const messageChunk = genChunk.generationInfo ?? {};
|
|
3954
3962
|
const responseMetadata = messageChunk.response_metadata ?? {};
|
|
3955
3963
|
const chunkUsage = responseMetadata['usage'] ?? responseMetadata['amazon-bedrock-invocationMetrics'] ?? messageChunk.usage_metadata;
|
|
3956
3964
|
if (chunkUsage) {
|
|
3957
|
-
llmUsage = this._parseUsageModel(chunkUsage);
|
|
3965
|
+
llmUsage = this._parseUsageModel(chunkUsage, provider, model);
|
|
3958
3966
|
return llmUsage;
|
|
3959
3967
|
}
|
|
3960
3968
|
}
|