@posthog/ai 2.1.2 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,214 @@
1
+ import OpenAIOrignal, { AzureOpenAI } from 'openai'
2
+ import { PostHog } from 'posthog-node'
3
+ import { v4 as uuidv4 } from 'uuid'
4
+ import { PassThrough } from 'stream'
5
+ import { mergeSystemPrompt, MonitoringParams, sendEventToPosthog } from '../utils'
6
+
7
+ type ChatCompletion = OpenAIOrignal.ChatCompletion
8
+ type ChatCompletionChunk = OpenAIOrignal.ChatCompletionChunk
9
+ type ChatCompletionCreateParamsBase = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParams
10
+ type ChatCompletionCreateParamsNonStreaming = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParamsNonStreaming
11
+ type ChatCompletionCreateParamsStreaming = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParamsStreaming
12
+ import type { APIPromise, RequestOptions } from 'openai/core'
13
+ import type { Stream } from 'openai/streaming'
14
+
15
+ interface MonitoringOpenAIConfig {
16
+ apiKey: string
17
+ posthog: PostHog
18
+ baseURL?: string
19
+ }
20
+
21
+ export class PostHogAzureOpenAI extends AzureOpenAI {
22
+ private readonly phClient: PostHog
23
+ public chat: WrappedChat
24
+
25
+ constructor(config: MonitoringOpenAIConfig) {
26
+ const { posthog, ...openAIConfig } = config
27
+ super(openAIConfig)
28
+ this.phClient = posthog
29
+ this.chat = new WrappedChat(this, this.phClient)
30
+ }
31
+ }
32
+
33
+ export class WrappedChat extends AzureOpenAI.Chat {
34
+ constructor(parentClient: PostHogAzureOpenAI, phClient: PostHog) {
35
+ super(parentClient)
36
+ this.completions = new WrappedCompletions(parentClient, phClient)
37
+ }
38
+
39
+ public completions: WrappedCompletions
40
+ }
41
+
42
+ export class WrappedCompletions extends AzureOpenAI.Chat.Completions {
43
+ private readonly phClient: PostHog
44
+
45
+ constructor(client: AzureOpenAI, phClient: PostHog) {
46
+ super(client)
47
+ this.phClient = phClient
48
+ }
49
+
50
+ // --- Overload #1: Non-streaming
51
+ public create(
52
+ body: ChatCompletionCreateParamsNonStreaming & MonitoringParams,
53
+ options?: RequestOptions
54
+ ): APIPromise<ChatCompletion>
55
+
56
+ // --- Overload #2: Streaming
57
+ public create(
58
+ body: ChatCompletionCreateParamsStreaming & MonitoringParams,
59
+ options?: RequestOptions
60
+ ): APIPromise<Stream<ChatCompletionChunk>>
61
+
62
+ // --- Overload #3: Generic base
63
+ public create(
64
+ body: ChatCompletionCreateParamsBase & MonitoringParams,
65
+ options?: RequestOptions
66
+ ): APIPromise<ChatCompletion | Stream<ChatCompletionChunk>>
67
+
68
+ // --- Implementation Signature
69
+ public create(
70
+ body: ChatCompletionCreateParamsBase & MonitoringParams,
71
+ options?: RequestOptions
72
+ ): APIPromise<ChatCompletion | Stream<ChatCompletionChunk>> {
73
+ const {
74
+ posthogDistinctId,
75
+ posthogTraceId,
76
+ posthogProperties,
77
+ posthogPrivacyMode = false,
78
+ posthogGroups,
79
+ ...openAIParams
80
+ } = body
81
+
82
+ const traceId = posthogTraceId ?? uuidv4()
83
+ const startTime = Date.now()
84
+ const parentPromise = super.create(openAIParams, options)
85
+
86
+ if (openAIParams.stream) {
87
+ return parentPromise.then((value) => {
88
+ const passThroughStream = new PassThrough({ objectMode: true })
89
+ let accumulatedContent = ''
90
+ let usage: { inputTokens: number; outputTokens: number } = {
91
+ inputTokens: 0,
92
+ outputTokens: 0,
93
+ }
94
+ let model = openAIParams.model
95
+ if ('tee' in value) {
96
+ const openAIStream = value
97
+ ;(async () => {
98
+ try {
99
+ for await (const chunk of openAIStream) {
100
+ const delta = chunk?.choices?.[0]?.delta?.content ?? ''
101
+ accumulatedContent += delta
102
+ if (chunk.usage) {
103
+ if (chunk.model != model) {
104
+ model = chunk.model
105
+ }
106
+ usage = {
107
+ inputTokens: chunk.usage.prompt_tokens ?? 0,
108
+ outputTokens: chunk.usage.completion_tokens ?? 0,
109
+ }
110
+ }
111
+ passThroughStream.write(chunk)
112
+ }
113
+ const latency = (Date.now() - startTime) / 1000
114
+ sendEventToPosthog({
115
+ client: this.phClient,
116
+ distinctId: posthogDistinctId ?? traceId,
117
+ traceId,
118
+ model,
119
+ provider: 'azure',
120
+ input: mergeSystemPrompt(openAIParams, 'azure'),
121
+ output: [{ content: accumulatedContent, role: 'assistant' }],
122
+ latency,
123
+ baseURL: (this as any).baseURL ?? '',
124
+ params: body,
125
+ httpStatus: 200,
126
+ usage,
127
+ })
128
+ passThroughStream.end()
129
+ } catch (error: any) {
130
+ // error handling
131
+ sendEventToPosthog({
132
+ client: this.phClient,
133
+ distinctId: posthogDistinctId ?? traceId,
134
+ traceId,
135
+ model,
136
+ provider: 'azure',
137
+ input: mergeSystemPrompt(openAIParams, 'azure'),
138
+ output: JSON.stringify(error),
139
+ latency: 0,
140
+ baseURL: (this as any).baseURL ?? '',
141
+ params: body,
142
+ httpStatus: error?.status ? error.status : 500,
143
+ usage: {
144
+ inputTokens: 0,
145
+ outputTokens: 0,
146
+ },
147
+ isError: true,
148
+ error: JSON.stringify(error),
149
+ })
150
+ passThroughStream.emit('error', error)
151
+ }
152
+ })()
153
+ }
154
+ return passThroughStream as unknown as Stream<ChatCompletionChunk>
155
+ }) as APIPromise<Stream<ChatCompletionChunk>>
156
+ } else {
157
+ const wrappedPromise = parentPromise.then(
158
+ (result) => {
159
+ if ('choices' in result) {
160
+ const latency = (Date.now() - startTime) / 1000
161
+ let model = openAIParams.model
162
+ if (result.model != model) {
163
+ model = result.model
164
+ }
165
+ sendEventToPosthog({
166
+ client: this.phClient,
167
+ distinctId: posthogDistinctId ?? traceId,
168
+ traceId,
169
+ model,
170
+ provider: '1234',
171
+ input: mergeSystemPrompt(openAIParams, 'azure'),
172
+ output: [{ content: result.choices[0].message.content, role: 'assistant' }],
173
+ latency,
174
+ baseURL: (this as any).baseURL ?? '',
175
+ params: body,
176
+ httpStatus: 200,
177
+ usage: {
178
+ inputTokens: result.usage?.prompt_tokens ?? 0,
179
+ outputTokens: result.usage?.completion_tokens ?? 0,
180
+ },
181
+ })
182
+ }
183
+ return result
184
+ },
185
+ (error: any) => {
186
+ sendEventToPosthog({
187
+ client: this.phClient,
188
+ distinctId: posthogDistinctId ?? traceId,
189
+ traceId,
190
+ model: openAIParams.model,
191
+ provider: 'azure',
192
+ input: mergeSystemPrompt(openAIParams, 'azure'),
193
+ output: [],
194
+ latency: 0,
195
+ baseURL: (this as any).baseURL ?? '',
196
+ params: body,
197
+ httpStatus: error?.status ? error.status : 500,
198
+ usage: {
199
+ inputTokens: 0,
200
+ outputTokens: 0,
201
+ },
202
+ isError: true,
203
+ error: JSON.stringify(error),
204
+ })
205
+ throw error
206
+ }
207
+ ) as APIPromise<ChatCompletion>
208
+
209
+ return wrappedPromise
210
+ }
211
+ }
212
+ }
213
+
214
+ export default PostHogAzureOpenAI
@@ -123,7 +123,7 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
123
123
  usage,
124
124
  })
125
125
  passThroughStream.end()
126
- } catch (error) {
126
+ } catch (error: any) {
127
127
  // error handling
128
128
  sendEventToPosthog({
129
129
  client: this.phClient,
@@ -136,11 +136,13 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
136
136
  latency: 0,
137
137
  baseURL: (this as any).baseURL ?? '',
138
138
  params: body,
139
- httpStatus: 500,
139
+ httpStatus: error?.status ? error.status : 500,
140
140
  usage: {
141
141
  inputTokens: 0,
142
142
  outputTokens: 0,
143
143
  },
144
+ isError: true,
145
+ error: JSON.stringify(error),
144
146
  })
145
147
  passThroughStream.emit('error', error)
146
148
  }
@@ -173,7 +175,7 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
173
175
  }
174
176
  return result
175
177
  },
176
- (error) => {
178
+ (error: any) => {
177
179
  sendEventToPosthog({
178
180
  client: this.phClient,
179
181
  distinctId: posthogDistinctId ?? traceId,
@@ -185,11 +187,13 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
185
187
  latency: 0,
186
188
  baseURL: (this as any).baseURL ?? '',
187
189
  params: body,
188
- httpStatus: 500,
190
+ httpStatus: error?.status ? error.status : 500,
189
191
  usage: {
190
192
  inputTokens: 0,
191
193
  outputTokens: 0,
192
194
  },
195
+ isError: true,
196
+ error: JSON.stringify(error),
193
197
  })
194
198
  throw error
195
199
  }
package/src/utils.ts CHANGED
@@ -77,15 +77,15 @@ export const formatResponseOpenAI = (response: any): Array<{ role: string; conte
77
77
  }
78
78
 
79
79
  export const mergeSystemPrompt = (params: ChatCompletionCreateParamsBase & MonitoringParams, provider: string): any => {
80
- if (provider !== 'anthropic') {
81
- return params.messages
82
- }
83
- const messages = params.messages || []
84
- if (!(params as any).system) {
85
- return messages
80
+ if (provider == 'anthropic') {
81
+ const messages = params.messages || []
82
+ if (!(params as any).system) {
83
+ return messages
84
+ }
85
+ const systemMessage = (params as any).system
86
+ return [{ role: 'system', content: systemMessage }, ...messages]
86
87
  }
87
- const systemMessage = (params as any).system
88
- return [{ role: 'system', content: systemMessage }, ...messages]
88
+ return params.messages
89
89
  }
90
90
 
91
91
  export const withPrivacyMode = (client: PostHog, privacyMode: boolean, input: any): any => {
@@ -105,6 +105,8 @@ export type SendEventToPosthogParams = {
105
105
  httpStatus: number
106
106
  usage?: { inputTokens?: number; outputTokens?: number }
107
107
  params: ChatCompletionCreateParamsBase & MonitoringParams
108
+ isError?: boolean
109
+ error?: string
108
110
  }
109
111
 
110
112
  export const sendEventToPosthog = ({
@@ -120,8 +122,17 @@ export const sendEventToPosthog = ({
120
122
  params,
121
123
  httpStatus = 200,
122
124
  usage = {},
125
+ isError = false,
126
+ error,
123
127
  }: SendEventToPosthogParams): void => {
124
128
  if (client.capture) {
129
+ let errorData = {}
130
+ if (isError) {
131
+ errorData = {
132
+ $ai_is_error: true,
133
+ $ai_error: error,
134
+ }
135
+ }
125
136
  client.capture({
126
137
  distinctId: distinctId ?? traceId,
127
138
  event: '$ai_generation',
@@ -139,6 +150,7 @@ export const sendEventToPosthog = ({
139
150
  $ai_base_url: baseURL,
140
151
  ...params.posthogProperties,
141
152
  ...(distinctId ? {} : { $process_person_profile: false }),
153
+ ...errorData,
142
154
  },
143
155
  groups: params.posthogGroups,
144
156
  })
@@ -101,7 +101,7 @@ export const createInstrumentationMiddleware = (
101
101
  })
102
102
 
103
103
  return result
104
- } catch (error) {
104
+ } catch (error: any) {
105
105
  const modelId = model.modelId
106
106
  sendEventToPosthog({
107
107
  client: phClient,
@@ -114,11 +114,13 @@ export const createInstrumentationMiddleware = (
114
114
  latency: 0,
115
115
  baseURL: '',
116
116
  params: mergedParams as any,
117
- httpStatus: 500,
117
+ httpStatus: error?.status ? error.status : 500,
118
118
  usage: {
119
119
  inputTokens: 0,
120
120
  outputTokens: 0,
121
121
  },
122
+ isError: true,
123
+ error: JSON.stringify(error),
122
124
  })
123
125
  throw error
124
126
  }
@@ -172,7 +174,7 @@ export const createInstrumentationMiddleware = (
172
174
  stream: stream.pipeThrough(transformStream),
173
175
  ...rest,
174
176
  }
175
- } catch (error) {
177
+ } catch (error: any) {
176
178
  sendEventToPosthog({
177
179
  client: phClient,
178
180
  distinctId: options.posthogDistinctId,
@@ -184,11 +186,13 @@ export const createInstrumentationMiddleware = (
184
186
  latency: 0,
185
187
  baseURL: '',
186
188
  params: mergedParams as any,
187
- httpStatus: 500,
189
+ httpStatus: error?.status ? error.status : 500,
188
190
  usage: {
189
191
  inputTokens: 0,
190
192
  outputTokens: 0,
191
193
  },
194
+ isError: true,
195
+ error: JSON.stringify(error),
192
196
  })
193
197
  throw error
194
198
  }