@posthog/ai 3.2.1 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -49,10 +49,14 @@ export type SendEventToPosthogParams = {
49
49
  usage?: {
50
50
  inputTokens?: number;
51
51
  outputTokens?: number;
52
+ reasoningTokens?: any;
53
+ cacheReadInputTokens?: any;
54
+ cacheCreationInputTokens?: any;
52
55
  };
53
56
  params: (ChatCompletionCreateParamsBase | MessageCreateParams) & MonitoringParams;
54
57
  isError?: boolean;
55
58
  error?: string;
59
+ tools?: any;
56
60
  };
57
- export declare const sendEventToPosthog: ({ client, distinctId, traceId, model, provider, input, output, latency, baseURL, params, httpStatus, usage, isError, error, }: SendEventToPosthogParams) => void;
61
+ export declare const sendEventToPosthog: ({ client, distinctId, traceId, model, provider, input, output, latency, baseURL, params, httpStatus, usage, isError, error, tools, }: SendEventToPosthogParams) => void;
58
62
  export {};
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@posthog/ai",
3
- "version": "3.2.1",
3
+ "version": "3.3.0",
4
4
  "description": "PostHog Node.js AI integrations",
5
5
  "repository": {
6
6
  "type": "git",
@@ -70,9 +70,16 @@ export class WrappedMessages extends AnthropicOriginal.Messages {
70
70
  if (anthropicParams.stream) {
71
71
  return parentPromise.then((value) => {
72
72
  let accumulatedContent = ''
73
- const usage: { inputTokens: number; outputTokens: number } = {
73
+ const usage: {
74
+ inputTokens: number
75
+ outputTokens: number
76
+ cacheCreationInputTokens?: number
77
+ cacheReadInputTokens?: number
78
+ } = {
74
79
  inputTokens: 0,
75
80
  outputTokens: 0,
81
+ cacheCreationInputTokens: 0,
82
+ cacheReadInputTokens: 0,
76
83
  }
77
84
  if ('tee' in value) {
78
85
  const [stream1, stream2] = value.tee()
@@ -87,6 +94,8 @@ export class WrappedMessages extends AnthropicOriginal.Messages {
87
94
  }
88
95
  if (chunk.type == 'message_start') {
89
96
  usage.inputTokens = chunk.message.usage.input_tokens ?? 0
97
+ usage.cacheCreationInputTokens = chunk.message.usage.cache_creation_input_tokens ?? 0
98
+ usage.cacheReadInputTokens = chunk.message.usage.cache_read_input_tokens ?? 0
90
99
  }
91
100
  if ('usage' in chunk) {
92
101
  usage.outputTokens = chunk.usage.output_tokens ?? 0
@@ -156,6 +165,8 @@ export class WrappedMessages extends AnthropicOriginal.Messages {
156
165
  usage: {
157
166
  inputTokens: result.usage.input_tokens ?? 0,
158
167
  outputTokens: result.usage.output_tokens ?? 0,
168
+ cacheCreationInputTokens: result.usage.cache_creation_input_tokens ?? 0,
169
+ cacheReadInputTokens: result.usage.cache_read_input_tokens ?? 0,
159
170
  },
160
171
  })
161
172
  }
@@ -28,6 +28,8 @@ interface GenerationMetadata extends SpanMetadata {
28
28
  modelParams?: Record<string, any>
29
29
  /** The base URL—for example, the API base used */
30
30
  baseUrl?: string
31
+ /** The tools used in the generation */
32
+ tools?: Record<string, any>
31
33
  }
32
34
 
33
35
  /** A run may either be a Span or a Generation */
@@ -420,6 +422,10 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
420
422
  $ai_base_url: run.baseUrl,
421
423
  }
422
424
 
425
+ if (run.tools) {
426
+ eventProperties['$ai_tools'] = withPrivacyMode(this.client, this.privacyMode, run.tools)
427
+ }
428
+
423
429
  if (output instanceof Error) {
424
430
  eventProperties['$ai_http_status'] = (output as any).status || 500
425
431
  eventProperties['$ai_error'] = output.toString()
@@ -86,7 +86,12 @@ export class WrappedCompletions extends AzureOpenAI.Chat.Completions {
86
86
  if (openAIParams.stream) {
87
87
  return parentPromise.then((value) => {
88
88
  let accumulatedContent = ''
89
- let usage: { inputTokens: number; outputTokens: number } = {
89
+ let usage: {
90
+ inputTokens: number
91
+ outputTokens: number
92
+ reasoningTokens?: number
93
+ cacheReadInputTokens?: number
94
+ } = {
90
95
  inputTokens: 0,
91
96
  outputTokens: 0,
92
97
  }
@@ -105,6 +110,8 @@ export class WrappedCompletions extends AzureOpenAI.Chat.Completions {
105
110
  usage = {
106
111
  inputTokens: chunk.usage.prompt_tokens ?? 0,
107
112
  outputTokens: chunk.usage.completion_tokens ?? 0,
113
+ reasoningTokens: chunk.usage.completion_tokens_details?.reasoning_tokens ?? 0,
114
+ cacheReadInputTokens: chunk.usage.prompt_tokens_details?.cached_tokens ?? 0,
108
115
  }
109
116
  }
110
117
  }
@@ -176,6 +183,8 @@ export class WrappedCompletions extends AzureOpenAI.Chat.Completions {
176
183
  usage: {
177
184
  inputTokens: result.usage?.prompt_tokens ?? 0,
178
185
  outputTokens: result.usage?.completion_tokens ?? 0,
186
+ reasoningTokens: result.usage?.completion_tokens_details?.reasoning_tokens ?? 0,
187
+ cacheReadInputTokens: result.usage?.prompt_tokens_details?.cached_tokens ?? 0,
179
188
  },
180
189
  })
181
190
  }
@@ -88,11 +88,18 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
88
88
  return parentPromise.then((value) => {
89
89
  if ('tee' in value) {
90
90
  const [stream1, stream2] = value.tee()
91
- // Use one stream for tracking
92
91
  ;(async () => {
93
92
  try {
94
93
  let accumulatedContent = ''
95
- let usage = { inputTokens: 0, outputTokens: 0 }
94
+ let usage: {
95
+ inputTokens?: number
96
+ outputTokens?: number
97
+ reasoningTokens?: number
98
+ cacheReadInputTokens?: number
99
+ } = {
100
+ inputTokens: 0,
101
+ outputTokens: 0,
102
+ }
96
103
 
97
104
  for await (const chunk of stream1) {
98
105
  const delta = chunk?.choices?.[0]?.delta?.content ?? ''
@@ -101,6 +108,8 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
101
108
  usage = {
102
109
  inputTokens: chunk.usage.prompt_tokens ?? 0,
103
110
  outputTokens: chunk.usage.completion_tokens ?? 0,
111
+ reasoningTokens: chunk.usage.completion_tokens_details?.reasoning_tokens ?? 0,
112
+ cacheReadInputTokens: chunk.usage.prompt_tokens_details?.cached_tokens ?? 0,
104
113
  }
105
114
  }
106
115
  }
@@ -165,6 +174,8 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
165
174
  usage: {
166
175
  inputTokens: result.usage?.prompt_tokens ?? 0,
167
176
  outputTokens: result.usage?.completion_tokens ?? 0,
177
+ reasoningTokens: result.usage?.completion_tokens_details?.reasoning_tokens ?? 0,
178
+ cacheReadInputTokens: result.usage?.prompt_tokens_details?.cached_tokens ?? 0,
168
179
  },
169
180
  })
170
181
  }
package/src/utils.ts CHANGED
@@ -118,10 +118,17 @@ export type SendEventToPosthogParams = {
118
118
  latency: number
119
119
  baseURL: string
120
120
  httpStatus: number
121
- usage?: { inputTokens?: number; outputTokens?: number }
121
+ usage?: {
122
+ inputTokens?: number
123
+ outputTokens?: number
124
+ reasoningTokens?: any
125
+ cacheReadInputTokens?: any
126
+ cacheCreationInputTokens?: any
127
+ }
122
128
  params: (ChatCompletionCreateParamsBase | MessageCreateParams) & MonitoringParams
123
129
  isError?: boolean
124
130
  error?: string
131
+ tools?: any
125
132
  }
126
133
 
127
134
  export const sendEventToPosthog = ({
@@ -139,6 +146,7 @@ export const sendEventToPosthog = ({
139
146
  usage = {},
140
147
  isError = false,
141
148
  error,
149
+ tools,
142
150
  }: SendEventToPosthogParams): void => {
143
151
  if (client.capture) {
144
152
  let errorData = {}
@@ -159,6 +167,12 @@ export const sendEventToPosthog = ({
159
167
  }
160
168
  }
161
169
 
170
+ let additionalTokenValues = {
171
+ ...(usage.reasoningTokens ? { $ai_reasoning_tokens: usage.reasoningTokens } : {}),
172
+ ...(usage.cacheReadInputTokens ? { $ai_cache_read_input_tokens: usage.cacheReadInputTokens } : {}),
173
+ ...(usage.cacheCreationInputTokens ? { $ai_cache_creation_input_tokens: usage.cacheCreationInputTokens } : {}),
174
+ }
175
+
162
176
  client.capture({
163
177
  distinctId: distinctId ?? traceId,
164
178
  event: '$ai_generation',
@@ -171,11 +185,13 @@ export const sendEventToPosthog = ({
171
185
  $ai_http_status: httpStatus,
172
186
  $ai_input_tokens: usage.inputTokens ?? 0,
173
187
  $ai_output_tokens: usage.outputTokens ?? 0,
188
+ ...additionalTokenValues,
174
189
  $ai_latency: latency,
175
190
  $ai_trace_id: traceId,
176
191
  $ai_base_url: baseURL,
177
192
  ...params.posthogProperties,
178
193
  ...(distinctId ? {} : { $process_person_profile: false }),
194
+ ...(tools ? { $ai_tools: tools } : {}),
179
195
  ...errorData,
180
196
  ...costOverrideData,
181
197
  },
@@ -27,8 +27,13 @@ interface CreateInstrumentationMiddlewareOptions {
27
27
  }
28
28
 
29
29
  interface PostHogInput {
30
- content: string
31
30
  role: string
31
+ type?: string
32
+ content?:
33
+ | string
34
+ | {
35
+ [key: string]: any
36
+ }
32
37
  }
33
38
 
34
39
  const mapVercelParams = (params: any): Record<string, any> => {
@@ -45,18 +50,60 @@ const mapVercelParams = (params: any): Record<string, any> => {
45
50
 
46
51
  const mapVercelPrompt = (prompt: LanguageModelV1Prompt): PostHogInput[] => {
47
52
  return prompt.map((p) => {
48
- let content = ''
53
+ let content = {}
49
54
  if (Array.isArray(p.content)) {
50
- content = p.content
51
- .map((c) => {
52
- if (c.type === 'text') {
53
- return c.text
55
+ content = p.content.map((c) => {
56
+ if (c.type === 'text') {
57
+ return {
58
+ type: 'text',
59
+ content: c.text,
54
60
  }
55
- return ''
56
- })
57
- .join('')
61
+ } else if (c.type === 'image') {
62
+ return {
63
+ type: 'image',
64
+ content: {
65
+ // if image is a url use it, or use "none supported"
66
+ image: c.image instanceof URL ? c.image.toString() : 'raw images not supported',
67
+ mimeType: c.mimeType,
68
+ },
69
+ }
70
+ } else if (c.type === 'file') {
71
+ return {
72
+ type: 'file',
73
+ content: {
74
+ file: c.data instanceof URL ? c.data.toString() : 'raw files not supported',
75
+ mimeType: c.mimeType,
76
+ },
77
+ }
78
+ } else if (c.type === 'tool-call') {
79
+ return {
80
+ type: 'tool-call',
81
+ content: {
82
+ toolCallId: c.toolCallId,
83
+ toolName: c.toolName,
84
+ args: c.args,
85
+ },
86
+ }
87
+ } else if (c.type === 'tool-result') {
88
+ return {
89
+ type: 'tool-result',
90
+ content: {
91
+ toolCallId: c.toolCallId,
92
+ toolName: c.toolName,
93
+ result: c.result,
94
+ isError: c.isError,
95
+ },
96
+ }
97
+ }
98
+ return {
99
+ content: '',
100
+ }
101
+ })
58
102
  } else {
59
- content = p.content
103
+ content = {
104
+ type: 'text',
105
+ text: p.content,
106
+ }
60
107
  }
61
108
  return {
62
109
  role: p.role,
@@ -91,10 +138,22 @@ export const createInstrumentationMiddleware = (
91
138
  options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId)
92
139
  const provider = options.posthogProviderOverride ?? extractProvider(model)
93
140
  const baseURL = '' // cannot currently get baseURL from vercel
94
- let content = result.text
95
- if (!content) {
96
- // support generate Object
97
- content = result.toolCalls?.[0].args || JSON.stringify(result)
141
+ let content = result.text || JSON.stringify(result)
142
+ // let tools = result.toolCalls
143
+ let providerMetadata = result.providerMetadata
144
+ let additionalTokenValues = {
145
+ ...(providerMetadata?.openai?.reasoningTokens
146
+ ? { reasoningTokens: providerMetadata.openai.reasoningTokens }
147
+ : {}),
148
+ ...(providerMetadata?.openai?.cachedPromptToken
149
+ ? { cacheReadInputTokens: providerMetadata.openai.cachedPromptTokens }
150
+ : {}),
151
+ ...(providerMetadata?.anthropic
152
+ ? {
153
+ cacheReadInputTokens: providerMetadata.anthropic.cacheReadInputTokens,
154
+ cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens,
155
+ }
156
+ : {}),
98
157
  }
99
158
  sendEventToPosthog({
100
159
  client: phClient,
@@ -111,6 +170,7 @@ export const createInstrumentationMiddleware = (
111
170
  usage: {
112
171
  inputTokens: result.usage.promptTokens,
113
172
  outputTokens: result.usage.completionTokens,
173
+ ...additionalTokenValues,
114
174
  },
115
175
  })
116
176
 
@@ -143,7 +203,13 @@ export const createInstrumentationMiddleware = (
143
203
  wrapStream: async ({ doStream, params }) => {
144
204
  const startTime = Date.now()
145
205
  let generatedText = ''
146
- let usage: { inputTokens?: number; outputTokens?: number } = {}
206
+ let usage: {
207
+ inputTokens?: number
208
+ outputTokens?: number
209
+ reasoningTokens?: any
210
+ cacheReadInputTokens?: any
211
+ cacheCreationInputTokens?: any
212
+ } = {}
147
213
  const mergedParams = {
148
214
  ...options,
149
215
  ...mapVercelParams(params),
@@ -164,6 +230,18 @@ export const createInstrumentationMiddleware = (
164
230
  inputTokens: chunk.usage?.promptTokens,
165
231
  outputTokens: chunk.usage?.completionTokens,
166
232
  }
233
+ if (chunk.providerMetadata?.openai?.reasoningTokens) {
234
+ usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens
235
+ }
236
+ if (chunk.providerMetadata?.openai?.cachedPromptToken) {
237
+ usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptToken
238
+ }
239
+ if (chunk.providerMetadata?.anthropic?.cacheReadInputTokens) {
240
+ usage.cacheReadInputTokens = chunk.providerMetadata.anthropic.cacheReadInputTokens
241
+ }
242
+ if (chunk.providerMetadata?.anthropic?.cacheCreationInputTokens) {
243
+ usage.cacheCreationInputTokens = chunk.providerMetadata.anthropic.cacheCreationInputTokens
244
+ }
167
245
  }
168
246
  controller.enqueue(chunk)
169
247
  },
@@ -225,4 +225,40 @@ describe('PostHogOpenAI - Jest test suite', () => {
225
225
  expect(properties['$ai_stream']).toBe(false)
226
226
  expect(properties['foo']).toBe('bar')
227
227
  })
228
+
229
+ conditionalTest('reasoning and cache tokens', async () => {
230
+ // Set up mock response with standard token usage
231
+ mockOpenAiChatResponse.usage = {
232
+ prompt_tokens: 20,
233
+ completion_tokens: 10,
234
+ total_tokens: 30,
235
+ // Add the detailed token usage that OpenAI would return
236
+ completion_tokens_details: {
237
+ reasoning_tokens: 15,
238
+ },
239
+ prompt_tokens_details: {
240
+ cached_tokens: 5,
241
+ },
242
+ }
243
+
244
+ // Create a completion with additional token tracking
245
+ await client.chat.completions.create({
246
+ model: 'gpt-4',
247
+ messages: [{ role: 'user', content: 'Hello' }],
248
+ posthogDistinctId: 'test-id',
249
+ posthogProperties: { foo: 'bar' },
250
+ })
251
+
252
+ expect(mockPostHogClient.capture).toHaveBeenCalledTimes(1)
253
+ const [captureArgs] = (mockPostHogClient.capture as jest.Mock).mock.calls
254
+ const { properties } = captureArgs[0]
255
+
256
+ // Check standard token properties
257
+ expect(properties['$ai_input_tokens']).toBe(20)
258
+ expect(properties['$ai_output_tokens']).toBe(10)
259
+
260
+ // Check the new token properties
261
+ expect(properties['$ai_reasoning_tokens']).toBe(15)
262
+ expect(properties['$ai_cache_read_input_tokens']).toBe(5)
263
+ })
228
264
  })