@posthog/ai 7.0.0 → 7.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -22,7 +22,7 @@ function _interopNamespaceDefault(e) {
22
22
 
23
23
  var uuid__namespace = /*#__PURE__*/_interopNamespaceDefault(uuid);
24
24
 
25
- var version = "7.0.0";
25
+ var version = "7.1.1";
26
26
 
27
27
  // Type guards for safer type checking
28
28
 
@@ -63,7 +63,7 @@ const getModelParams = params => {
63
63
  return {};
64
64
  }
65
65
  const modelParams = {};
66
- const paramKeys = ['temperature', 'max_tokens', 'max_completion_tokens', 'top_p', 'frequency_penalty', 'presence_penalty', 'n', 'stop', 'stream', 'streaming'];
66
+ const paramKeys = ['temperature', 'max_tokens', 'max_completion_tokens', 'top_p', 'frequency_penalty', 'presence_penalty', 'n', 'stop', 'stream', 'streaming', 'language', 'response_format', 'timestamp_granularities'];
67
67
  for (const key of paramKeys) {
68
68
  if (key in params && params[key] !== undefined) {
69
69
  modelParams[key] = params[key];
@@ -954,7 +954,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
954
954
  eventProperties['$ai_is_error'] = true;
955
955
  } else {
956
956
  // Handle token usage
957
- const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output);
957
+ const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output, run.provider, run.model);
958
958
  eventProperties['$ai_input_tokens'] = inputTokens;
959
959
  eventProperties['$ai_output_tokens'] = outputTokens;
960
960
 
@@ -1104,7 +1104,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1104
1104
  // Sanitize the message content to redact base64 images
1105
1105
  return sanitizeLangChain(messageDict);
1106
1106
  }
1107
- _parseUsageModel(usage) {
1107
+ _parseUsageModel(usage, provider, model) {
1108
1108
  const conversionList = [['promptTokens', 'input'], ['completionTokens', 'output'], ['input_tokens', 'input'], ['output_tokens', 'output'], ['prompt_token_count', 'input'], ['candidates_token_count', 'output'], ['inputTokenCount', 'input'], ['outputTokenCount', 'output'], ['input_token_count', 'input'], ['generated_token_count', 'output']];
1109
1109
  const parsedUsage = conversionList.reduce((acc, [modelKey, typeKey]) => {
1110
1110
  const value = usage[modelKey];
@@ -1177,20 +1177,28 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1177
1177
  additionalTokenData.webSearchCount = webSearchCount;
1178
1178
  }
1179
1179
 
1180
- // In LangChain, input_tokens is the sum of input and cache read tokens.
1181
- // Our cost calculation expects them to be separate, for Anthropic.
1182
- if (parsedUsage.input && additionalTokenData.cacheReadInputTokens) {
1180
+ // For Anthropic providers, LangChain reports input_tokens as the sum of input and cache read tokens.
1181
+ // Our cost calculation expects them to be separate for Anthropic, so we subtract cache tokens.
1182
+ // For other providers (OpenAI, etc.), input_tokens already excludes cache tokens as expected.
1183
+ // Match logic consistent with plugin-server: exact match on provider OR substring match on model
1184
+ let isAnthropic = false;
1185
+ if (provider && provider.toLowerCase() === 'anthropic') {
1186
+ isAnthropic = true;
1187
+ } else if (model && model.toLowerCase().includes('anthropic')) {
1188
+ isAnthropic = true;
1189
+ }
1190
+ if (isAnthropic && parsedUsage.input && additionalTokenData.cacheReadInputTokens) {
1183
1191
  parsedUsage.input = Math.max(parsedUsage.input - additionalTokenData.cacheReadInputTokens, 0);
1184
1192
  }
1185
1193
  return [parsedUsage.input, parsedUsage.output, additionalTokenData];
1186
1194
  }
1187
- parseUsage(response) {
1195
+ parseUsage(response, provider, model) {
1188
1196
  let llmUsage = [0, 0, {}];
1189
1197
  const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage'];
1190
1198
  if (response.llmOutput != null) {
1191
1199
  const key = llmUsageKeys.find(k => response.llmOutput?.[k] != null);
1192
1200
  if (key) {
1193
- llmUsage = this._parseUsageModel(response.llmOutput[key]);
1201
+ llmUsage = this._parseUsageModel(response.llmOutput[key], provider, model);
1194
1202
  }
1195
1203
  }
1196
1204
 
@@ -1200,14 +1208,14 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
1200
1208
  for (const genChunk of generation) {
1201
1209
  // Check other paths for usage information
1202
1210
  if (genChunk.generationInfo?.usage_metadata) {
1203
- llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata);
1211
+ llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata, provider, model);
1204
1212
  return llmUsage;
1205
1213
  }
1206
1214
  const messageChunk = genChunk.generationInfo ?? {};
1207
1215
  const responseMetadata = messageChunk.response_metadata ?? {};
1208
1216
  const chunkUsage = responseMetadata['usage'] ?? responseMetadata['amazon-bedrock-invocationMetrics'] ?? messageChunk.usage_metadata;
1209
1217
  if (chunkUsage) {
1210
- llmUsage = this._parseUsageModel(chunkUsage);
1218
+ llmUsage = this._parseUsageModel(chunkUsage, provider, model);
1211
1219
  return llmUsage;
1212
1220
  }
1213
1221
  }