@posthog/ai 7.0.0 → 7.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +56 -2
- package/dist/anthropic/index.cjs +2 -2
- package/dist/anthropic/index.cjs.map +1 -1
- package/dist/anthropic/index.mjs +2 -2
- package/dist/anthropic/index.mjs.map +1 -1
- package/dist/gemini/index.cjs +2 -2
- package/dist/gemini/index.cjs.map +1 -1
- package/dist/gemini/index.mjs +2 -2
- package/dist/gemini/index.mjs.map +1 -1
- package/dist/index.cjs +155 -11
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +19 -0
- package/dist/index.mjs +155 -11
- package/dist/index.mjs.map +1 -1
- package/dist/langchain/index.cjs +19 -11
- package/dist/langchain/index.cjs.map +1 -1
- package/dist/langchain/index.mjs +19 -11
- package/dist/langchain/index.mjs.map +1 -1
- package/dist/openai/index.cjs +155 -2
- package/dist/openai/index.cjs.map +1 -1
- package/dist/openai/index.d.ts +20 -1
- package/dist/openai/index.mjs +154 -3
- package/dist/openai/index.mjs.map +1 -1
- package/dist/vercel/index.cjs +2 -2
- package/dist/vercel/index.cjs.map +1 -1
- package/dist/vercel/index.mjs +2 -2
- package/dist/vercel/index.mjs.map +1 -1
- package/package.json +6 -5
package/dist/langchain/index.mjs
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import 'buffer';
|
|
2
2
|
import * as uuid from 'uuid';
|
|
3
3
|
|
|
4
|
-
var version = "7.
|
|
4
|
+
var version = "7.1.1";
|
|
5
5
|
|
|
6
6
|
// Type guards for safer type checking
|
|
7
7
|
|
|
@@ -42,7 +42,7 @@ const getModelParams = params => {
|
|
|
42
42
|
return {};
|
|
43
43
|
}
|
|
44
44
|
const modelParams = {};
|
|
45
|
-
const paramKeys = ['temperature', 'max_tokens', 'max_completion_tokens', 'top_p', 'frequency_penalty', 'presence_penalty', 'n', 'stop', 'stream', 'streaming'];
|
|
45
|
+
const paramKeys = ['temperature', 'max_tokens', 'max_completion_tokens', 'top_p', 'frequency_penalty', 'presence_penalty', 'n', 'stop', 'stream', 'streaming', 'language', 'response_format', 'timestamp_granularities'];
|
|
46
46
|
for (const key of paramKeys) {
|
|
47
47
|
if (key in params && params[key] !== undefined) {
|
|
48
48
|
modelParams[key] = params[key];
|
|
@@ -933,7 +933,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
933
933
|
eventProperties['$ai_is_error'] = true;
|
|
934
934
|
} else {
|
|
935
935
|
// Handle token usage
|
|
936
|
-
const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output);
|
|
936
|
+
const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output, run.provider, run.model);
|
|
937
937
|
eventProperties['$ai_input_tokens'] = inputTokens;
|
|
938
938
|
eventProperties['$ai_output_tokens'] = outputTokens;
|
|
939
939
|
|
|
@@ -1083,7 +1083,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
1083
1083
|
// Sanitize the message content to redact base64 images
|
|
1084
1084
|
return sanitizeLangChain(messageDict);
|
|
1085
1085
|
}
|
|
1086
|
-
_parseUsageModel(usage) {
|
|
1086
|
+
_parseUsageModel(usage, provider, model) {
|
|
1087
1087
|
const conversionList = [['promptTokens', 'input'], ['completionTokens', 'output'], ['input_tokens', 'input'], ['output_tokens', 'output'], ['prompt_token_count', 'input'], ['candidates_token_count', 'output'], ['inputTokenCount', 'input'], ['outputTokenCount', 'output'], ['input_token_count', 'input'], ['generated_token_count', 'output']];
|
|
1088
1088
|
const parsedUsage = conversionList.reduce((acc, [modelKey, typeKey]) => {
|
|
1089
1089
|
const value = usage[modelKey];
|
|
@@ -1156,20 +1156,28 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
1156
1156
|
additionalTokenData.webSearchCount = webSearchCount;
|
|
1157
1157
|
}
|
|
1158
1158
|
|
|
1159
|
-
//
|
|
1160
|
-
// Our cost calculation expects them to be separate
|
|
1161
|
-
|
|
1159
|
+
// For Anthropic providers, LangChain reports input_tokens as the sum of input and cache read tokens.
|
|
1160
|
+
// Our cost calculation expects them to be separate for Anthropic, so we subtract cache tokens.
|
|
1161
|
+
// For other providers (OpenAI, etc.), input_tokens already excludes cache tokens as expected.
|
|
1162
|
+
// Match logic consistent with plugin-server: exact match on provider OR substring match on model
|
|
1163
|
+
let isAnthropic = false;
|
|
1164
|
+
if (provider && provider.toLowerCase() === 'anthropic') {
|
|
1165
|
+
isAnthropic = true;
|
|
1166
|
+
} else if (model && model.toLowerCase().includes('anthropic')) {
|
|
1167
|
+
isAnthropic = true;
|
|
1168
|
+
}
|
|
1169
|
+
if (isAnthropic && parsedUsage.input && additionalTokenData.cacheReadInputTokens) {
|
|
1162
1170
|
parsedUsage.input = Math.max(parsedUsage.input - additionalTokenData.cacheReadInputTokens, 0);
|
|
1163
1171
|
}
|
|
1164
1172
|
return [parsedUsage.input, parsedUsage.output, additionalTokenData];
|
|
1165
1173
|
}
|
|
1166
|
-
parseUsage(response) {
|
|
1174
|
+
parseUsage(response, provider, model) {
|
|
1167
1175
|
let llmUsage = [0, 0, {}];
|
|
1168
1176
|
const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage'];
|
|
1169
1177
|
if (response.llmOutput != null) {
|
|
1170
1178
|
const key = llmUsageKeys.find(k => response.llmOutput?.[k] != null);
|
|
1171
1179
|
if (key) {
|
|
1172
|
-
llmUsage = this._parseUsageModel(response.llmOutput[key]);
|
|
1180
|
+
llmUsage = this._parseUsageModel(response.llmOutput[key], provider, model);
|
|
1173
1181
|
}
|
|
1174
1182
|
}
|
|
1175
1183
|
|
|
@@ -1179,14 +1187,14 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
1179
1187
|
for (const genChunk of generation) {
|
|
1180
1188
|
// Check other paths for usage information
|
|
1181
1189
|
if (genChunk.generationInfo?.usage_metadata) {
|
|
1182
|
-
llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata);
|
|
1190
|
+
llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata, provider, model);
|
|
1183
1191
|
return llmUsage;
|
|
1184
1192
|
}
|
|
1185
1193
|
const messageChunk = genChunk.generationInfo ?? {};
|
|
1186
1194
|
const responseMetadata = messageChunk.response_metadata ?? {};
|
|
1187
1195
|
const chunkUsage = responseMetadata['usage'] ?? responseMetadata['amazon-bedrock-invocationMetrics'] ?? messageChunk.usage_metadata;
|
|
1188
1196
|
if (chunkUsage) {
|
|
1189
|
-
llmUsage = this._parseUsageModel(chunkUsage);
|
|
1197
|
+
llmUsage = this._parseUsageModel(chunkUsage, provider, model);
|
|
1190
1198
|
return llmUsage;
|
|
1191
1199
|
}
|
|
1192
1200
|
}
|