@posthog/ai 3.3.0 → 3.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@posthog/ai",
3
- "version": "3.3.0",
3
+ "version": "3.3.2",
4
4
  "description": "PostHog Node.js AI integrations",
5
5
  "repository": {
6
6
  "type": "git",
@@ -82,7 +82,6 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
82
82
  parentRunId?: string,
83
83
  tags?: string[],
84
84
  metadata?: Record<string, unknown>,
85
-
86
85
  runType?: string,
87
86
  runName?: string
88
87
  ): void {
@@ -432,10 +431,18 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
432
431
  eventProperties['$ai_is_error'] = true
433
432
  } else {
434
433
  // Handle token usage
435
- const [inputTokens, outputTokens] = this.parseUsage(output)
434
+ const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output)
436
435
  eventProperties['$ai_input_tokens'] = inputTokens
437
436
  eventProperties['$ai_output_tokens'] = outputTokens
438
437
 
438
+ // Add additional token data to properties
439
+ if (additionalTokenData.cacheReadInputTokens) {
440
+ eventProperties['$ai_cache_read_tokens'] = additionalTokenData.cacheReadInputTokens
441
+ }
442
+ if (additionalTokenData.reasoningTokens) {
443
+ eventProperties['$ai_reasoning_tokens'] = additionalTokenData.reasoningTokens
444
+ }
445
+
439
446
  // Handle generations/completions
440
447
  let completions
441
448
  if (output.generations && Array.isArray(output.generations)) {
@@ -471,14 +478,17 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
471
478
  }
472
479
  }
473
480
 
474
- private _getLangchainRunName(serialized: any, ...args: any[]): string | undefined {
481
+ private _getLangchainRunName(serialized: any, ...args: any): string | undefined {
475
482
  if (args && args.length > 0) {
476
483
  for (const arg of args) {
477
484
  if (arg && typeof arg === 'object' && 'name' in arg) {
478
485
  return arg.name
486
+ } else if (arg && typeof arg === 'object' && 'runName' in arg) {
487
+ return arg.runName
479
488
  }
480
489
  }
481
490
  }
491
+
482
492
  if (serialized && serialized.name) {
483
493
  return serialized.name
484
494
  }
@@ -520,7 +530,7 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
520
530
  return messageDict
521
531
  }
522
532
 
523
- private _parseUsageModel(usage: any): [number, number] {
533
+ private _parseUsageModel(usage: any): [number, number, Record<string, any>] {
524
534
  const conversionList: Array<[string, 'input' | 'output']> = [
525
535
  ['promptTokens', 'input'],
526
536
  ['completionTokens', 'output'],
@@ -548,11 +558,32 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
548
558
  { input: 0, output: 0 }
549
559
  )
550
560
 
551
- return [parsedUsage.input, parsedUsage.output]
561
+ // Extract additional token details like cached tokens and reasoning tokens
562
+ const additionalTokenData: Record<string, any> = {}
563
+
564
+ // Check for cached tokens in various formats
565
+ if (usage.prompt_tokens_details?.cached_tokens != null) {
566
+ additionalTokenData.cacheReadInputTokens = usage.prompt_tokens_details.cached_tokens
567
+ } else if (usage.input_token_details?.cache_read != null) {
568
+ additionalTokenData.cacheReadInputTokens = usage.input_token_details.cache_read
569
+ } else if (usage.cachedPromptTokens != null) {
570
+ additionalTokenData.cacheReadInputTokens = usage.cachedPromptTokens
571
+ }
572
+
573
+ // Check for reasoning tokens in various formats
574
+ if (usage.completion_tokens_details?.reasoning_tokens != null) {
575
+ additionalTokenData.reasoningTokens = usage.completion_tokens_details.reasoning_tokens
576
+ } else if (usage.output_token_details?.reasoning != null) {
577
+ additionalTokenData.reasoningTokens = usage.output_token_details.reasoning
578
+ } else if (usage.reasoningTokens != null) {
579
+ additionalTokenData.reasoningTokens = usage.reasoningTokens
580
+ }
581
+
582
+ return [parsedUsage.input, parsedUsage.output, additionalTokenData]
552
583
  }
553
584
 
554
- private parseUsage(response: LLMResult): [number, number] {
555
- let llmUsage: [number, number] = [0, 0]
585
+ private parseUsage(response: LLMResult): [number, number, Record<string, any>] {
586
+ let llmUsage: [number, number, Record<string, any>] = [0, 0, {}]
556
587
  const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage']
557
588
 
558
589
  if (response.llmOutput != null) {
@@ -566,6 +597,7 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
566
597
  if (llmUsage[0] === 0 && llmUsage[1] === 0 && response.generations) {
567
598
  for (const generation of response.generations) {
568
599
  for (const genChunk of generation) {
600
+ // Check other paths for usage information
569
601
  if (genChunk.generationInfo?.usage_metadata) {
570
602
  llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata)
571
603
  return llmUsage
package/src/utils.ts CHANGED
@@ -167,7 +167,7 @@ export const sendEventToPosthog = ({
167
167
  }
168
168
  }
169
169
 
170
- let additionalTokenValues = {
170
+ const additionalTokenValues = {
171
171
  ...(usage.reasoningTokens ? { $ai_reasoning_tokens: usage.reasoningTokens } : {}),
172
172
  ...(usage.cacheReadInputTokens ? { $ai_cache_read_input_tokens: usage.cacheReadInputTokens } : {}),
173
173
  ...(usage.cacheCreationInputTokens ? { $ai_cache_creation_input_tokens: usage.cacheCreationInputTokens } : {}),
@@ -112,6 +112,24 @@ const mapVercelPrompt = (prompt: LanguageModelV1Prompt): PostHogInput[] => {
112
112
  })
113
113
  }
114
114
 
115
+ const mapVercelOutput = (result: any): PostHogInput[] => {
116
+ const output = {
117
+ ...(result.text ? { text: result.text } : {}),
118
+ ...(result.object ? { object: result.object } : {}),
119
+ ...(result.reasoning ? { reasoning: result.reasoning } : {}),
120
+ ...(result.response ? { response: result.response } : {}),
121
+ ...(result.finishReason ? { finishReason: result.finishReason } : {}),
122
+ ...(result.usage ? { usage: result.usage } : {}),
123
+ ...(result.warnings ? { warnings: result.warnings } : {}),
124
+ ...(result.providerMetadata ? { toolCalls: result.providerMetadata } : {}),
125
+ }
126
+ // if text and no object or reasoning, return text
127
+ if (output.text && !output.object && !output.reasoning) {
128
+ return [{ content: output.text, role: 'assistant' }]
129
+ }
130
+ return [{ content: JSON.stringify(output), role: 'assistant' }]
131
+ }
132
+
115
133
  const extractProvider = (model: LanguageModelV1): string => {
116
134
  // vercel provider is in the format of provider.endpoint
117
135
  const provider = model.provider.toLowerCase()
@@ -138,14 +156,14 @@ export const createInstrumentationMiddleware = (
138
156
  options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId)
139
157
  const provider = options.posthogProviderOverride ?? extractProvider(model)
140
158
  const baseURL = '' // cannot currently get baseURL from vercel
141
- let content = result.text || JSON.stringify(result)
159
+ const content = mapVercelOutput(result)
142
160
  // let tools = result.toolCalls
143
- let providerMetadata = result.providerMetadata
144
- let additionalTokenValues = {
161
+ const providerMetadata = result.providerMetadata
162
+ const additionalTokenValues = {
145
163
  ...(providerMetadata?.openai?.reasoningTokens
146
164
  ? { reasoningTokens: providerMetadata.openai.reasoningTokens }
147
165
  : {}),
148
- ...(providerMetadata?.openai?.cachedPromptToken
166
+ ...(providerMetadata?.openai?.cachedPromptTokens
149
167
  ? { cacheReadInputTokens: providerMetadata.openai.cachedPromptTokens }
150
168
  : {}),
151
169
  ...(providerMetadata?.anthropic
@@ -233,8 +251,8 @@ export const createInstrumentationMiddleware = (
233
251
  if (chunk.providerMetadata?.openai?.reasoningTokens) {
234
252
  usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens
235
253
  }
236
- if (chunk.providerMetadata?.openai?.cachedPromptToken) {
237
- usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptToken
254
+ if (chunk.providerMetadata?.openai?.cachedPromptTokens) {
255
+ usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptTokens
238
256
  }
239
257
  if (chunk.providerMetadata?.anthropic?.cacheReadInputTokens) {
240
258
  usage.cacheReadInputTokens = chunk.providerMetadata.anthropic.cacheReadInputTokens