@posthog/ai 2.1.1 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,214 @@
1
+ import OpenAIOrignal, { AzureOpenAI } from 'openai'
2
+ import { PostHog } from 'posthog-node'
3
+ import { v4 as uuidv4 } from 'uuid'
4
+ import { PassThrough } from 'stream'
5
+ import { mergeSystemPrompt, MonitoringParams, sendEventToPosthog } from '../utils'
6
+
7
+ type ChatCompletion = OpenAIOrignal.ChatCompletion
8
+ type ChatCompletionChunk = OpenAIOrignal.ChatCompletionChunk
9
+ type ChatCompletionCreateParamsBase = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParams
10
+ type ChatCompletionCreateParamsNonStreaming = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParamsNonStreaming
11
+ type ChatCompletionCreateParamsStreaming = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParamsStreaming
12
+ import type { APIPromise, RequestOptions } from 'openai/core'
13
+ import type { Stream } from 'openai/streaming'
14
+
15
+ interface MonitoringOpenAIConfig {
16
+ apiKey: string
17
+ posthog: PostHog
18
+ baseURL?: string
19
+ }
20
+
21
+ export class PostHogAzureOpenAI extends AzureOpenAI {
22
+ private readonly phClient: PostHog
23
+ public chat: WrappedChat
24
+
25
+ constructor(config: MonitoringOpenAIConfig) {
26
+ const { posthog, ...openAIConfig } = config
27
+ super(openAIConfig)
28
+ this.phClient = posthog
29
+ this.chat = new WrappedChat(this, this.phClient)
30
+ }
31
+ }
32
+
33
+ export class WrappedChat extends AzureOpenAI.Chat {
34
+ constructor(parentClient: PostHogAzureOpenAI, phClient: PostHog) {
35
+ super(parentClient)
36
+ this.completions = new WrappedCompletions(parentClient, phClient)
37
+ }
38
+
39
+ public completions: WrappedCompletions
40
+ }
41
+
42
+ export class WrappedCompletions extends AzureOpenAI.Chat.Completions {
43
+ private readonly phClient: PostHog
44
+
45
+ constructor(client: AzureOpenAI, phClient: PostHog) {
46
+ super(client)
47
+ this.phClient = phClient
48
+ }
49
+
50
+ // --- Overload #1: Non-streaming
51
+ public create(
52
+ body: ChatCompletionCreateParamsNonStreaming & MonitoringParams,
53
+ options?: RequestOptions
54
+ ): APIPromise<ChatCompletion>
55
+
56
+ // --- Overload #2: Streaming
57
+ public create(
58
+ body: ChatCompletionCreateParamsStreaming & MonitoringParams,
59
+ options?: RequestOptions
60
+ ): APIPromise<Stream<ChatCompletionChunk>>
61
+
62
+ // --- Overload #3: Generic base
63
+ public create(
64
+ body: ChatCompletionCreateParamsBase & MonitoringParams,
65
+ options?: RequestOptions
66
+ ): APIPromise<ChatCompletion | Stream<ChatCompletionChunk>>
67
+
68
+ // --- Implementation Signature
69
+ public create(
70
+ body: ChatCompletionCreateParamsBase & MonitoringParams,
71
+ options?: RequestOptions
72
+ ): APIPromise<ChatCompletion | Stream<ChatCompletionChunk>> {
73
+ const {
74
+ posthogDistinctId,
75
+ posthogTraceId,
76
+ posthogProperties,
77
+ posthogPrivacyMode = false,
78
+ posthogGroups,
79
+ ...openAIParams
80
+ } = body
81
+
82
+ const traceId = posthogTraceId ?? uuidv4()
83
+ const startTime = Date.now()
84
+ const parentPromise = super.create(openAIParams, options)
85
+
86
+ if (openAIParams.stream) {
87
+ return parentPromise.then((value) => {
88
+ const passThroughStream = new PassThrough({ objectMode: true })
89
+ let accumulatedContent = ''
90
+ let usage: { inputTokens: number; outputTokens: number } = {
91
+ inputTokens: 0,
92
+ outputTokens: 0,
93
+ }
94
+ let model = openAIParams.model
95
+ if ('tee' in value) {
96
+ const openAIStream = value
97
+ ;(async () => {
98
+ try {
99
+ for await (const chunk of openAIStream) {
100
+ const delta = chunk?.choices?.[0]?.delta?.content ?? ''
101
+ accumulatedContent += delta
102
+ if (chunk.usage) {
103
+ if (chunk.model != model) {
104
+ model = chunk.model
105
+ }
106
+ usage = {
107
+ inputTokens: chunk.usage.prompt_tokens ?? 0,
108
+ outputTokens: chunk.usage.completion_tokens ?? 0,
109
+ }
110
+ }
111
+ passThroughStream.write(chunk)
112
+ }
113
+ const latency = (Date.now() - startTime) / 1000
114
+ sendEventToPosthog({
115
+ client: this.phClient,
116
+ distinctId: posthogDistinctId ?? traceId,
117
+ traceId,
118
+ model,
119
+ provider: 'azure',
120
+ input: mergeSystemPrompt(openAIParams, 'azure'),
121
+ output: [{ content: accumulatedContent, role: 'assistant' }],
122
+ latency,
123
+ baseURL: (this as any).baseURL ?? '',
124
+ params: body,
125
+ httpStatus: 200,
126
+ usage,
127
+ })
128
+ passThroughStream.end()
129
+ } catch (error: any) {
130
+ // error handling
131
+ sendEventToPosthog({
132
+ client: this.phClient,
133
+ distinctId: posthogDistinctId ?? traceId,
134
+ traceId,
135
+ model,
136
+ provider: 'azure',
137
+ input: mergeSystemPrompt(openAIParams, 'azure'),
138
+ output: JSON.stringify(error),
139
+ latency: 0,
140
+ baseURL: (this as any).baseURL ?? '',
141
+ params: body,
142
+ httpStatus: error?.status ? error.status : 500,
143
+ usage: {
144
+ inputTokens: 0,
145
+ outputTokens: 0,
146
+ },
147
+ isError: true,
148
+ error: JSON.stringify(error),
149
+ })
150
+ passThroughStream.emit('error', error)
151
+ }
152
+ })()
153
+ }
154
+ return passThroughStream as unknown as Stream<ChatCompletionChunk>
155
+ }) as APIPromise<Stream<ChatCompletionChunk>>
156
+ } else {
157
+ const wrappedPromise = parentPromise.then(
158
+ (result) => {
159
+ if ('choices' in result) {
160
+ const latency = (Date.now() - startTime) / 1000
161
+ let model = openAIParams.model
162
+ if (result.model != model) {
163
+ model = result.model
164
+ }
165
+ sendEventToPosthog({
166
+ client: this.phClient,
167
+ distinctId: posthogDistinctId ?? traceId,
168
+ traceId,
169
+ model,
170
+ provider: '1234',
171
+ input: mergeSystemPrompt(openAIParams, 'azure'),
172
+ output: [{ content: result.choices[0].message.content, role: 'assistant' }],
173
+ latency,
174
+ baseURL: (this as any).baseURL ?? '',
175
+ params: body,
176
+ httpStatus: 200,
177
+ usage: {
178
+ inputTokens: result.usage?.prompt_tokens ?? 0,
179
+ outputTokens: result.usage?.completion_tokens ?? 0,
180
+ },
181
+ })
182
+ }
183
+ return result
184
+ },
185
+ (error: any) => {
186
+ sendEventToPosthog({
187
+ client: this.phClient,
188
+ distinctId: posthogDistinctId ?? traceId,
189
+ traceId,
190
+ model: openAIParams.model,
191
+ provider: 'azure',
192
+ input: mergeSystemPrompt(openAIParams, 'azure'),
193
+ output: [],
194
+ latency: 0,
195
+ baseURL: (this as any).baseURL ?? '',
196
+ params: body,
197
+ httpStatus: error?.status ? error.status : 500,
198
+ usage: {
199
+ inputTokens: 0,
200
+ outputTokens: 0,
201
+ },
202
+ isError: true,
203
+ error: JSON.stringify(error),
204
+ })
205
+ throw error
206
+ }
207
+ ) as APIPromise<ChatCompletion>
208
+
209
+ return wrappedPromise
210
+ }
211
+ }
212
+ }
213
+
214
+ export default PostHogAzureOpenAI
@@ -93,8 +93,8 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
93
93
  outputTokens: 0,
94
94
  }
95
95
  if ('tee' in value) {
96
- const openAIStream = value;
97
- (async () => {
96
+ const openAIStream = value
97
+ ;(async () => {
98
98
  try {
99
99
  for await (const chunk of openAIStream) {
100
100
  const delta = chunk?.choices?.[0]?.delta?.content ?? ''
@@ -123,7 +123,7 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
123
123
  usage,
124
124
  })
125
125
  passThroughStream.end()
126
- } catch (error) {
126
+ } catch (error: any) {
127
127
  // error handling
128
128
  sendEventToPosthog({
129
129
  client: this.phClient,
@@ -136,11 +136,13 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
136
136
  latency: 0,
137
137
  baseURL: (this as any).baseURL ?? '',
138
138
  params: body,
139
- httpStatus: 500,
139
+ httpStatus: error?.status ? error.status : 500,
140
140
  usage: {
141
141
  inputTokens: 0,
142
142
  outputTokens: 0,
143
143
  },
144
+ isError: true,
145
+ error: JSON.stringify(error),
144
146
  })
145
147
  passThroughStream.emit('error', error)
146
148
  }
@@ -173,7 +175,7 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
173
175
  }
174
176
  return result
175
177
  },
176
- (error) => {
178
+ (error: any) => {
177
179
  sendEventToPosthog({
178
180
  client: this.phClient,
179
181
  distinctId: posthogDistinctId ?? traceId,
@@ -185,11 +187,13 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
185
187
  latency: 0,
186
188
  baseURL: (this as any).baseURL ?? '',
187
189
  params: body,
188
- httpStatus: 500,
190
+ httpStatus: error?.status ? error.status : 500,
189
191
  usage: {
190
192
  inputTokens: 0,
191
193
  outputTokens: 0,
192
194
  },
195
+ isError: true,
196
+ error: JSON.stringify(error),
193
197
  })
194
198
  throw error
195
199
  }
@@ -198,7 +202,6 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
198
202
  return wrappedPromise
199
203
  }
200
204
  }
201
-
202
205
  }
203
206
 
204
207
  export default PostHogOpenAI
package/src/utils.ts CHANGED
@@ -77,15 +77,15 @@ export const formatResponseOpenAI = (response: any): Array<{ role: string; conte
77
77
  }
78
78
 
79
79
  export const mergeSystemPrompt = (params: ChatCompletionCreateParamsBase & MonitoringParams, provider: string): any => {
80
- if (provider !== 'anthropic') {
81
- return params.messages
82
- }
83
- const messages = params.messages || []
84
- if (!(params as any).system) {
85
- return messages
80
+ if (provider == 'anthropic') {
81
+ const messages = params.messages || []
82
+ if (!(params as any).system) {
83
+ return messages
84
+ }
85
+ const systemMessage = (params as any).system
86
+ return [{ role: 'system', content: systemMessage }, ...messages]
86
87
  }
87
- const systemMessage = (params as any).system
88
- return [{ role: 'system', content: systemMessage }, ...messages]
88
+ return params.messages
89
89
  }
90
90
 
91
91
  export const withPrivacyMode = (client: PostHog, privacyMode: boolean, input: any): any => {
@@ -105,6 +105,8 @@ export type SendEventToPosthogParams = {
105
105
  httpStatus: number
106
106
  usage?: { inputTokens?: number; outputTokens?: number }
107
107
  params: ChatCompletionCreateParamsBase & MonitoringParams
108
+ isError?: boolean
109
+ error?: string
108
110
  }
109
111
 
110
112
  export const sendEventToPosthog = ({
@@ -120,8 +122,17 @@ export const sendEventToPosthog = ({
120
122
  params,
121
123
  httpStatus = 200,
122
124
  usage = {},
125
+ isError = false,
126
+ error,
123
127
  }: SendEventToPosthogParams): void => {
124
128
  if (client.capture) {
129
+ let errorData = {}
130
+ if (isError) {
131
+ errorData = {
132
+ $ai_is_error: true,
133
+ $ai_error: error,
134
+ }
135
+ }
125
136
  client.capture({
126
137
  distinctId: distinctId ?? traceId,
127
138
  event: '$ai_generation',
@@ -139,8 +150,9 @@ export const sendEventToPosthog = ({
139
150
  $ai_base_url: baseURL,
140
151
  ...params.posthogProperties,
141
152
  ...(distinctId ? {} : { $process_person_profile: false }),
153
+ ...errorData,
142
154
  },
143
155
  groups: params.posthogGroups,
144
156
  })
145
157
  }
146
- }
158
+ }
@@ -9,8 +9,16 @@ import { v4 as uuidv4 } from 'uuid'
9
9
  import { PostHog } from 'posthog-node'
10
10
  import { sendEventToPosthog } from '../utils'
11
11
 
12
- interface CreateInstrumentationMiddlewareOptions {
12
+ interface ClientOptions {
13
13
  posthogDistinctId?: string
14
+ posthogTraceId?: string
15
+ posthogProperties?: Record<string, any>
16
+ posthogPrivacyMode?: boolean
17
+ posthogGroups?: Record<string, any>
18
+ }
19
+
20
+ interface CreateInstrumentationMiddlewareOptions {
21
+ posthogDistinctId: string
14
22
  posthogTraceId: string
15
23
  posthogProperties?: Record<string, any>
16
24
  posthogPrivacyMode?: boolean
@@ -72,12 +80,14 @@ export const createInstrumentationMiddleware = (
72
80
  const result = await doGenerate()
73
81
  const latency = (Date.now() - startTime) / 1000
74
82
 
83
+ const modelId = result.response?.modelId ? result.response.modelId : model.modelId
84
+
75
85
  sendEventToPosthog({
76
86
  client: phClient,
77
87
  distinctId: options.posthogDistinctId,
78
88
  traceId: options.posthogTraceId,
79
- model: model.modelId,
80
- provider: 'vercel',
89
+ model: modelId,
90
+ provider: model.provider,
81
91
  input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
82
92
  output: [{ content: result.text, role: 'assistant' }],
83
93
  latency,
@@ -91,23 +101,26 @@ export const createInstrumentationMiddleware = (
91
101
  })
92
102
 
93
103
  return result
94
- } catch (error) {
104
+ } catch (error: any) {
105
+ const modelId = model.modelId
95
106
  sendEventToPosthog({
96
107
  client: phClient,
97
108
  distinctId: options.posthogDistinctId,
98
109
  traceId: options.posthogTraceId,
99
- model: model.modelId,
100
- provider: 'vercel',
110
+ model: modelId,
111
+ provider: model.provider,
101
112
  input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
102
113
  output: [],
103
114
  latency: 0,
104
115
  baseURL: '',
105
116
  params: mergedParams as any,
106
- httpStatus: 500,
117
+ httpStatus: error?.status ? error.status : 500,
107
118
  usage: {
108
119
  inputTokens: 0,
109
120
  outputTokens: 0,
110
121
  },
122
+ isError: true,
123
+ error: JSON.stringify(error),
111
124
  })
112
125
  throw error
113
126
  }
@@ -145,7 +158,7 @@ export const createInstrumentationMiddleware = (
145
158
  distinctId: options.posthogDistinctId,
146
159
  traceId: options.posthogTraceId,
147
160
  model: model.modelId,
148
- provider: 'vercel',
161
+ provider: model.provider,
149
162
  input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
150
163
  output: [{ content: generatedText, role: 'assistant' }],
151
164
  latency,
@@ -161,23 +174,25 @@ export const createInstrumentationMiddleware = (
161
174
  stream: stream.pipeThrough(transformStream),
162
175
  ...rest,
163
176
  }
164
- } catch (error) {
177
+ } catch (error: any) {
165
178
  sendEventToPosthog({
166
179
  client: phClient,
167
180
  distinctId: options.posthogDistinctId,
168
181
  traceId: options.posthogTraceId,
169
182
  model: model.modelId,
170
- provider: 'vercel',
183
+ provider: model.provider,
171
184
  input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
172
185
  output: [],
173
186
  latency: 0,
174
187
  baseURL: '',
175
188
  params: mergedParams as any,
176
- httpStatus: 500,
189
+ httpStatus: error?.status ? error.status : 500,
177
190
  usage: {
178
191
  inputTokens: 0,
179
192
  outputTokens: 0,
180
193
  },
194
+ isError: true,
195
+ error: JSON.stringify(error),
181
196
  })
182
197
  throw error
183
198
  }
@@ -190,7 +205,7 @@ export const createInstrumentationMiddleware = (
190
205
  export const wrapVercelLanguageModel = (
191
206
  model: LanguageModelV1,
192
207
  phClient: PostHog,
193
- options: CreateInstrumentationMiddlewareOptions
208
+ options: ClientOptions
194
209
  ): LanguageModelV1 => {
195
210
  const traceId = options.posthogTraceId ?? uuidv4()
196
211
  const middleware = createInstrumentationMiddleware(phClient, model, {