@posthog/ai 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +62 -0
- package/index.ts +1 -0
- package/package.json +48 -0
- package/src/azure-openai/index.ts +204 -0
- package/src/index.ts +7 -0
- package/src/openai/index.ts +204 -0
- package/src/utils.ts +166 -0
- package/src/vercel/middleware.ts +157 -0
- package/tests/openai.test.ts +228 -0
- package/tsconfig.json +10 -0
package/README.md
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
# PostHog Node AI
|
|
2
|
+
|
|
3
|
+
Initial Typescript SDK for LLM Observability
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
yarn add @posthog/posthog-ai
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
### Before
|
|
14
|
+
|
|
15
|
+
```typescript
|
|
16
|
+
import { OpenAI } from 'openai'
|
|
17
|
+
|
|
18
|
+
const client = new OpenAI({
|
|
19
|
+
apiKey: process.env.OPENAI_API_KEY || '',
|
|
20
|
+
})
|
|
21
|
+
|
|
22
|
+
await client.chat.completions.create({
|
|
23
|
+
model: 'gpt-4',
|
|
24
|
+
messages: [{ role: 'user', content: 'Hello, world!' }],
|
|
25
|
+
})
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
### After
|
|
29
|
+
|
|
30
|
+
```typescript
|
|
31
|
+
import { OpenAI } from 'posthog-node-ai'
|
|
32
|
+
import { PostHog } from 'posthog-node'
|
|
33
|
+
|
|
34
|
+
const phClient = new PostHog(
|
|
35
|
+
process.env.POSTHOG_API_KEY, {
|
|
36
|
+
host: process.env.POSTHOG_HOST || 'https://us.posthog.com',
|
|
37
|
+
}
|
|
38
|
+
})
|
|
39
|
+
|
|
40
|
+
const client = new OpenAI({
|
|
41
|
+
apiKey: process.env.OPENAI_API_KEY || '',
|
|
42
|
+
posthog: phClient,
|
|
43
|
+
})
|
|
44
|
+
|
|
45
|
+
await client.chat.completions.create({
|
|
46
|
+
model: 'gpt-4',
|
|
47
|
+
messages: [{ role: 'user', content: 'Hello, world!' }],
|
|
48
|
+
posthog_distinct_id: 'test-user-id',
|
|
49
|
+
posthog_properties: {
|
|
50
|
+
test_property: 'test_value',
|
|
51
|
+
}
|
|
52
|
+
})
|
|
53
|
+
|
|
54
|
+
// YOU HAVE TO HAVE THIS OR THE CLIENT MAY NOT SEND EVENTS
|
|
55
|
+
await phClient.shutdown()
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
Please see the main [PostHog docs](https://www.posthog.com/docs).
|
|
59
|
+
|
|
60
|
+
## Questions?
|
|
61
|
+
|
|
62
|
+
### [Check out our community page.](https://posthog.com/posts)
|
package/index.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from './src'
|
package/package.json
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@posthog/ai",
|
|
3
|
+
"version": "1.0.1",
|
|
4
|
+
"description": "PostHog Node.js AI integrations",
|
|
5
|
+
"repository": {
|
|
6
|
+
"type": "git",
|
|
7
|
+
"url": "git+https://github.com/PostHog/posthog-js-lite.git",
|
|
8
|
+
"directory": "posthog-ai"
|
|
9
|
+
},
|
|
10
|
+
"main": "./lib/index.js",
|
|
11
|
+
"module": "./lib/index.mjs",
|
|
12
|
+
"types": "./lib/index.d.ts",
|
|
13
|
+
"license": "MIT",
|
|
14
|
+
"devDependencies": {
|
|
15
|
+
"@types/jest": "^28.1.5",
|
|
16
|
+
"@types/node": "^18.0.0",
|
|
17
|
+
"jest": "^29.0.0",
|
|
18
|
+
"node-fetch": "^3.3.2",
|
|
19
|
+
"ts-jest": "^29.0.0",
|
|
20
|
+
"typescript": "^4.7.4"
|
|
21
|
+
},
|
|
22
|
+
"keywords": [
|
|
23
|
+
"posthog",
|
|
24
|
+
"ai",
|
|
25
|
+
"openai",
|
|
26
|
+
"anthropic",
|
|
27
|
+
"llm"
|
|
28
|
+
],
|
|
29
|
+
"dependencies": {
|
|
30
|
+
"ai": "^4.1.0",
|
|
31
|
+
"openai": "^4.79.1",
|
|
32
|
+
"uuid": "^11.0.5",
|
|
33
|
+
"zod": "^3.24.1"
|
|
34
|
+
},
|
|
35
|
+
"scripts": {
|
|
36
|
+
"test": "jest",
|
|
37
|
+
"prepublishOnly": "cd .. && yarn build"
|
|
38
|
+
},
|
|
39
|
+
"directories": {
|
|
40
|
+
"lib": "lib",
|
|
41
|
+
"test": "tests"
|
|
42
|
+
},
|
|
43
|
+
"author": "PostHog",
|
|
44
|
+
"bugs": {
|
|
45
|
+
"url": "https://github.com/PostHog/posthog-js-lite/issues"
|
|
46
|
+
},
|
|
47
|
+
"homepage": "https://github.com/PostHog/posthog-js-lite#readme"
|
|
48
|
+
}
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
import OpenAIOrignal, { AzureOpenAI } from 'openai'
|
|
2
|
+
import { PostHog } from 'posthog-node'
|
|
3
|
+
import { v4 as uuidv4 } from 'uuid'
|
|
4
|
+
import { PassThrough } from 'stream'
|
|
5
|
+
import { mergeSystemPrompt, type MonitoringParams, sendEventToPosthog } from '../utils'
|
|
6
|
+
|
|
7
|
+
type ChatCompletion = OpenAIOrignal.ChatCompletion
|
|
8
|
+
type ChatCompletionChunk = OpenAIOrignal.ChatCompletionChunk
|
|
9
|
+
type ChatCompletionCreateParamsBase = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParams
|
|
10
|
+
type ChatCompletionCreateParamsNonStreaming = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParamsNonStreaming
|
|
11
|
+
type ChatCompletionCreateParamsStreaming = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParamsStreaming
|
|
12
|
+
import type { APIPromise, RequestOptions } from 'openai/core'
|
|
13
|
+
import type { Stream } from 'openai/streaming'
|
|
14
|
+
|
|
15
|
+
interface MonitoringOpenAIConfig {
|
|
16
|
+
apiKey: string
|
|
17
|
+
posthog: PostHog
|
|
18
|
+
baseURL?: string
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export class PostHogAzureOpenAI extends AzureOpenAI {
|
|
22
|
+
private readonly phClient: PostHog
|
|
23
|
+
|
|
24
|
+
constructor(config: MonitoringOpenAIConfig) {
|
|
25
|
+
const { posthog, ...openAIConfig } = config
|
|
26
|
+
super(openAIConfig)
|
|
27
|
+
this.phClient = posthog
|
|
28
|
+
this.chat = new WrappedChat(this, this.phClient)
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
public chat: WrappedChat
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export class WrappedChat extends AzureOpenAI.Chat {
|
|
35
|
+
constructor(parentClient: PostHogAzureOpenAI, phClient: PostHog) {
|
|
36
|
+
super(parentClient)
|
|
37
|
+
this.completions = new WrappedCompletions(parentClient, phClient)
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
public completions: WrappedCompletions
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
export class WrappedCompletions extends AzureOpenAI.Chat.Completions {
|
|
44
|
+
private readonly phClient: PostHog
|
|
45
|
+
|
|
46
|
+
constructor(client: AzureOpenAI, phClient: PostHog) {
|
|
47
|
+
super(client)
|
|
48
|
+
this.phClient = phClient
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// --- Overload #1: Non-streaming
|
|
52
|
+
public create(
|
|
53
|
+
body: ChatCompletionCreateParamsNonStreaming & MonitoringParams,
|
|
54
|
+
options?: RequestOptions
|
|
55
|
+
): APIPromise<ChatCompletion>
|
|
56
|
+
|
|
57
|
+
// --- Overload #2: Streaming
|
|
58
|
+
public create(
|
|
59
|
+
body: ChatCompletionCreateParamsStreaming & MonitoringParams,
|
|
60
|
+
options?: RequestOptions
|
|
61
|
+
): APIPromise<Stream<ChatCompletionChunk>>
|
|
62
|
+
|
|
63
|
+
// --- Overload #3: Generic base
|
|
64
|
+
public create(
|
|
65
|
+
body: ChatCompletionCreateParamsBase & MonitoringParams,
|
|
66
|
+
options?: RequestOptions
|
|
67
|
+
): APIPromise<ChatCompletion | Stream<ChatCompletionChunk>>
|
|
68
|
+
|
|
69
|
+
// --- Implementation Signature
|
|
70
|
+
public create(
|
|
71
|
+
body: ChatCompletionCreateParamsBase & MonitoringParams,
|
|
72
|
+
options?: RequestOptions
|
|
73
|
+
): APIPromise<ChatCompletion | Stream<ChatCompletionChunk>> {
|
|
74
|
+
const {
|
|
75
|
+
posthog_distinct_id,
|
|
76
|
+
posthog_trace_id,
|
|
77
|
+
posthog_properties,
|
|
78
|
+
posthog_privacy_mode = false,
|
|
79
|
+
posthog_groups,
|
|
80
|
+
...openAIParams
|
|
81
|
+
} = body
|
|
82
|
+
|
|
83
|
+
const traceId = posthog_trace_id ?? uuidv4()
|
|
84
|
+
const startTime = Date.now()
|
|
85
|
+
|
|
86
|
+
const parentPromise = super.create(openAIParams, options)
|
|
87
|
+
|
|
88
|
+
if (openAIParams.stream) {
|
|
89
|
+
return parentPromise.then((value) => {
|
|
90
|
+
const passThroughStream = new PassThrough({ objectMode: true })
|
|
91
|
+
let accumulatedContent = ''
|
|
92
|
+
let usage: { input_tokens: number; output_tokens: number } = {
|
|
93
|
+
input_tokens: 0,
|
|
94
|
+
output_tokens: 0,
|
|
95
|
+
}
|
|
96
|
+
if ('tee' in value) {
|
|
97
|
+
const openAIStream = value
|
|
98
|
+
;(async () => {
|
|
99
|
+
try {
|
|
100
|
+
for await (const chunk of openAIStream) {
|
|
101
|
+
const delta = chunk?.choices?.[0]?.delta?.content ?? ''
|
|
102
|
+
accumulatedContent += delta
|
|
103
|
+
if (chunk.usage) {
|
|
104
|
+
usage = {
|
|
105
|
+
input_tokens: chunk.usage.prompt_tokens ?? 0,
|
|
106
|
+
output_tokens: chunk.usage.completion_tokens ?? 0,
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
passThroughStream.write(chunk)
|
|
110
|
+
}
|
|
111
|
+
const latency = (Date.now() - startTime) / 1000
|
|
112
|
+
sendEventToPosthog({
|
|
113
|
+
client: this.phClient,
|
|
114
|
+
distinctId: posthog_distinct_id ?? traceId,
|
|
115
|
+
traceId,
|
|
116
|
+
model: openAIParams.model,
|
|
117
|
+
provider: 'azure',
|
|
118
|
+
input: posthog_privacy_mode ? '' : mergeSystemPrompt(openAIParams, 'openai'),
|
|
119
|
+
output: [{ content: accumulatedContent, role: 'assistant' }],
|
|
120
|
+
latency,
|
|
121
|
+
baseURL: (this as any).baseURL ?? '',
|
|
122
|
+
params: body,
|
|
123
|
+
httpStatus: 200,
|
|
124
|
+
usage,
|
|
125
|
+
})
|
|
126
|
+
passThroughStream.end()
|
|
127
|
+
} catch (error) {
|
|
128
|
+
// error handling
|
|
129
|
+
sendEventToPosthog({
|
|
130
|
+
client: this.phClient,
|
|
131
|
+
distinctId: posthog_distinct_id ?? traceId,
|
|
132
|
+
traceId,
|
|
133
|
+
model: openAIParams.model,
|
|
134
|
+
provider: 'azure',
|
|
135
|
+
input: posthog_privacy_mode ? '' : mergeSystemPrompt(openAIParams, 'openai'),
|
|
136
|
+
output: [],
|
|
137
|
+
latency: 0,
|
|
138
|
+
baseURL: (this as any).baseURL ?? '',
|
|
139
|
+
params: body,
|
|
140
|
+
httpStatus: 500,
|
|
141
|
+
usage: {
|
|
142
|
+
input_tokens: 0,
|
|
143
|
+
output_tokens: 0,
|
|
144
|
+
},
|
|
145
|
+
})
|
|
146
|
+
passThroughStream.emit('error', error)
|
|
147
|
+
}
|
|
148
|
+
})()
|
|
149
|
+
}
|
|
150
|
+
return passThroughStream as unknown as Stream<ChatCompletionChunk>
|
|
151
|
+
}) as APIPromise<Stream<ChatCompletionChunk>>
|
|
152
|
+
} else {
|
|
153
|
+
const wrappedPromise = parentPromise.then(
|
|
154
|
+
(result) => {
|
|
155
|
+
if ('choices' in result) {
|
|
156
|
+
const latency = (Date.now() - startTime) / 1000
|
|
157
|
+
sendEventToPosthog({
|
|
158
|
+
client: this.phClient,
|
|
159
|
+
distinctId: posthog_distinct_id ?? traceId,
|
|
160
|
+
traceId,
|
|
161
|
+
model: openAIParams.model,
|
|
162
|
+
provider: 'azure',
|
|
163
|
+
input: posthog_privacy_mode ? '' : mergeSystemPrompt(openAIParams, 'openai'),
|
|
164
|
+
output: [{ content: result.choices[0].message.content, role: 'assistant' }],
|
|
165
|
+
latency,
|
|
166
|
+
baseURL: (this as any).baseURL ?? '',
|
|
167
|
+
params: body,
|
|
168
|
+
httpStatus: 200,
|
|
169
|
+
usage: {
|
|
170
|
+
input_tokens: result.usage?.prompt_tokens ?? 0,
|
|
171
|
+
output_tokens: result.usage?.completion_tokens ?? 0,
|
|
172
|
+
},
|
|
173
|
+
})
|
|
174
|
+
}
|
|
175
|
+
return result
|
|
176
|
+
},
|
|
177
|
+
(error) => {
|
|
178
|
+
sendEventToPosthog({
|
|
179
|
+
client: this.phClient,
|
|
180
|
+
distinctId: posthog_distinct_id ?? traceId,
|
|
181
|
+
traceId,
|
|
182
|
+
model: openAIParams.model,
|
|
183
|
+
provider: 'azure',
|
|
184
|
+
input: posthog_privacy_mode ? '' : mergeSystemPrompt(openAIParams, 'openai'),
|
|
185
|
+
output: [],
|
|
186
|
+
latency: 0,
|
|
187
|
+
baseURL: (this as any).baseURL ?? '',
|
|
188
|
+
params: body,
|
|
189
|
+
httpStatus: 500,
|
|
190
|
+
usage: {
|
|
191
|
+
input_tokens: 0,
|
|
192
|
+
output_tokens: 0,
|
|
193
|
+
},
|
|
194
|
+
})
|
|
195
|
+
throw error
|
|
196
|
+
}
|
|
197
|
+
) as APIPromise<ChatCompletion>
|
|
198
|
+
|
|
199
|
+
return wrappedPromise
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
export default PostHogAzureOpenAI
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import PostHogOpenAI from './openai'
|
|
2
|
+
import PostHogAzureOpenAI from './azure-openai'
|
|
3
|
+
import { wrapVercelLanguageModel } from './vercel/middleware'
|
|
4
|
+
|
|
5
|
+
export { PostHogOpenAI as OpenAI }
|
|
6
|
+
export { PostHogAzureOpenAI as AzureOpenAI }
|
|
7
|
+
export { wrapVercelLanguageModel as posthogWrappedLanguageModel }
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
import OpenAIOrignal from 'openai'
|
|
2
|
+
import { PostHog } from 'posthog-node'
|
|
3
|
+
import { v4 as uuidv4 } from 'uuid'
|
|
4
|
+
import { PassThrough } from 'stream'
|
|
5
|
+
import { mergeSystemPrompt, type MonitoringParams, sendEventToPosthog } from '../utils'
|
|
6
|
+
|
|
7
|
+
type ChatCompletion = OpenAIOrignal.ChatCompletion
|
|
8
|
+
type ChatCompletionChunk = OpenAIOrignal.ChatCompletionChunk
|
|
9
|
+
type ChatCompletionCreateParamsBase = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParams
|
|
10
|
+
type ChatCompletionCreateParamsNonStreaming = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParamsNonStreaming
|
|
11
|
+
type ChatCompletionCreateParamsStreaming = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParamsStreaming
|
|
12
|
+
import type { APIPromise, RequestOptions } from 'openai/core'
|
|
13
|
+
import type { Stream } from 'openai/streaming'
|
|
14
|
+
|
|
15
|
+
interface MonitoringOpenAIConfig {
|
|
16
|
+
apiKey: string
|
|
17
|
+
posthog: PostHog
|
|
18
|
+
baseURL?: string
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export class PostHogOpenAI extends OpenAIOrignal {
|
|
22
|
+
private readonly phClient: PostHog
|
|
23
|
+
|
|
24
|
+
constructor(config: MonitoringOpenAIConfig) {
|
|
25
|
+
const { posthog, ...openAIConfig } = config
|
|
26
|
+
super(openAIConfig)
|
|
27
|
+
this.phClient = posthog
|
|
28
|
+
this.chat = new WrappedChat(this, this.phClient)
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
public chat: WrappedChat
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export class WrappedChat extends OpenAIOrignal.Chat {
|
|
35
|
+
constructor(parentClient: PostHogOpenAI, phClient: PostHog) {
|
|
36
|
+
super(parentClient)
|
|
37
|
+
this.completions = new WrappedCompletions(parentClient, phClient)
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
public completions: WrappedCompletions
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
|
|
44
|
+
private readonly phClient: PostHog
|
|
45
|
+
|
|
46
|
+
constructor(client: OpenAIOrignal, phClient: PostHog) {
|
|
47
|
+
super(client)
|
|
48
|
+
this.phClient = phClient
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// --- Overload #1: Non-streaming
|
|
52
|
+
public create(
|
|
53
|
+
body: ChatCompletionCreateParamsNonStreaming & MonitoringParams,
|
|
54
|
+
options?: RequestOptions
|
|
55
|
+
): APIPromise<ChatCompletion>
|
|
56
|
+
|
|
57
|
+
// --- Overload #2: Streaming
|
|
58
|
+
public create(
|
|
59
|
+
body: ChatCompletionCreateParamsStreaming & MonitoringParams,
|
|
60
|
+
options?: RequestOptions
|
|
61
|
+
): APIPromise<Stream<ChatCompletionChunk>>
|
|
62
|
+
|
|
63
|
+
// --- Overload #3: Generic base
|
|
64
|
+
public create(
|
|
65
|
+
body: ChatCompletionCreateParamsBase & MonitoringParams,
|
|
66
|
+
options?: RequestOptions
|
|
67
|
+
): APIPromise<ChatCompletion | Stream<ChatCompletionChunk>>
|
|
68
|
+
|
|
69
|
+
// --- Implementation Signature
|
|
70
|
+
public create(
|
|
71
|
+
body: ChatCompletionCreateParamsBase & MonitoringParams,
|
|
72
|
+
options?: RequestOptions
|
|
73
|
+
): APIPromise<ChatCompletion | Stream<ChatCompletionChunk>> {
|
|
74
|
+
const {
|
|
75
|
+
posthog_distinct_id,
|
|
76
|
+
posthog_trace_id,
|
|
77
|
+
posthog_properties,
|
|
78
|
+
posthog_privacy_mode = false,
|
|
79
|
+
posthog_groups,
|
|
80
|
+
...openAIParams
|
|
81
|
+
} = body
|
|
82
|
+
|
|
83
|
+
const traceId = posthog_trace_id ?? uuidv4()
|
|
84
|
+
const startTime = Date.now()
|
|
85
|
+
|
|
86
|
+
const parentPromise = super.create(openAIParams, options)
|
|
87
|
+
|
|
88
|
+
if (openAIParams.stream) {
|
|
89
|
+
return parentPromise.then((value) => {
|
|
90
|
+
const passThroughStream = new PassThrough({ objectMode: true })
|
|
91
|
+
let accumulatedContent = ''
|
|
92
|
+
let usage: { input_tokens: number; output_tokens: number } = {
|
|
93
|
+
input_tokens: 0,
|
|
94
|
+
output_tokens: 0,
|
|
95
|
+
}
|
|
96
|
+
if ('tee' in value) {
|
|
97
|
+
const openAIStream = value
|
|
98
|
+
;(async () => {
|
|
99
|
+
try {
|
|
100
|
+
for await (const chunk of openAIStream) {
|
|
101
|
+
const delta = chunk?.choices?.[0]?.delta?.content ?? ''
|
|
102
|
+
accumulatedContent += delta
|
|
103
|
+
if (chunk.usage) {
|
|
104
|
+
usage = {
|
|
105
|
+
input_tokens: chunk.usage.prompt_tokens ?? 0,
|
|
106
|
+
output_tokens: chunk.usage.completion_tokens ?? 0,
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
passThroughStream.write(chunk)
|
|
110
|
+
}
|
|
111
|
+
const latency = (Date.now() - startTime) / 1000
|
|
112
|
+
sendEventToPosthog({
|
|
113
|
+
client: this.phClient,
|
|
114
|
+
distinctId: posthog_distinct_id ?? traceId,
|
|
115
|
+
traceId,
|
|
116
|
+
model: openAIParams.model,
|
|
117
|
+
provider: 'openai',
|
|
118
|
+
input: posthog_privacy_mode ? '' : mergeSystemPrompt(openAIParams, 'openai'),
|
|
119
|
+
output: [{ content: accumulatedContent, role: 'assistant' }],
|
|
120
|
+
latency,
|
|
121
|
+
baseURL: (this as any).baseURL ?? '',
|
|
122
|
+
params: body,
|
|
123
|
+
httpStatus: 200,
|
|
124
|
+
usage,
|
|
125
|
+
})
|
|
126
|
+
passThroughStream.end()
|
|
127
|
+
} catch (error) {
|
|
128
|
+
// error handling
|
|
129
|
+
sendEventToPosthog({
|
|
130
|
+
client: this.phClient,
|
|
131
|
+
distinctId: posthog_distinct_id ?? traceId,
|
|
132
|
+
traceId,
|
|
133
|
+
model: openAIParams.model,
|
|
134
|
+
provider: 'openai',
|
|
135
|
+
input: posthog_privacy_mode ? '' : mergeSystemPrompt(openAIParams, 'openai'),
|
|
136
|
+
output: [],
|
|
137
|
+
latency: 0,
|
|
138
|
+
baseURL: (this as any).baseURL ?? '',
|
|
139
|
+
params: body,
|
|
140
|
+
httpStatus: 500,
|
|
141
|
+
usage: {
|
|
142
|
+
input_tokens: 0,
|
|
143
|
+
output_tokens: 0,
|
|
144
|
+
},
|
|
145
|
+
})
|
|
146
|
+
passThroughStream.emit('error', error)
|
|
147
|
+
}
|
|
148
|
+
})()
|
|
149
|
+
}
|
|
150
|
+
return passThroughStream as unknown as Stream<ChatCompletionChunk>
|
|
151
|
+
}) as APIPromise<Stream<ChatCompletionChunk>>
|
|
152
|
+
} else {
|
|
153
|
+
const wrappedPromise = parentPromise.then(
|
|
154
|
+
(result) => {
|
|
155
|
+
if ('choices' in result) {
|
|
156
|
+
const latency = (Date.now() - startTime) / 1000
|
|
157
|
+
sendEventToPosthog({
|
|
158
|
+
client: this.phClient,
|
|
159
|
+
distinctId: posthog_distinct_id ?? traceId,
|
|
160
|
+
traceId,
|
|
161
|
+
model: openAIParams.model,
|
|
162
|
+
provider: 'openai',
|
|
163
|
+
input: posthog_privacy_mode ? '' : mergeSystemPrompt(openAIParams, 'openai'),
|
|
164
|
+
output: [{ content: result.choices[0].message.content, role: 'assistant' }],
|
|
165
|
+
latency,
|
|
166
|
+
baseURL: (this as any).baseURL ?? '',
|
|
167
|
+
params: body,
|
|
168
|
+
httpStatus: 200,
|
|
169
|
+
usage: {
|
|
170
|
+
input_tokens: result.usage?.prompt_tokens ?? 0,
|
|
171
|
+
output_tokens: result.usage?.completion_tokens ?? 0,
|
|
172
|
+
},
|
|
173
|
+
})
|
|
174
|
+
}
|
|
175
|
+
return result
|
|
176
|
+
},
|
|
177
|
+
(error) => {
|
|
178
|
+
sendEventToPosthog({
|
|
179
|
+
client: this.phClient,
|
|
180
|
+
distinctId: posthog_distinct_id ?? traceId,
|
|
181
|
+
traceId,
|
|
182
|
+
model: openAIParams.model,
|
|
183
|
+
provider: 'openai',
|
|
184
|
+
input: posthog_privacy_mode ? '' : mergeSystemPrompt(openAIParams, 'openai'),
|
|
185
|
+
output: [],
|
|
186
|
+
latency: 0,
|
|
187
|
+
baseURL: (this as any).baseURL ?? '',
|
|
188
|
+
params: body,
|
|
189
|
+
httpStatus: 500,
|
|
190
|
+
usage: {
|
|
191
|
+
input_tokens: 0,
|
|
192
|
+
output_tokens: 0,
|
|
193
|
+
},
|
|
194
|
+
})
|
|
195
|
+
throw error
|
|
196
|
+
}
|
|
197
|
+
) as APIPromise<ChatCompletion>
|
|
198
|
+
|
|
199
|
+
return wrappedPromise
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
export default PostHogOpenAI
|
package/src/utils.ts
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
import { PostHog } from 'posthog-node'
|
|
2
|
+
import OpenAIOrignal from 'openai'
|
|
3
|
+
|
|
4
|
+
type ChatCompletionCreateParamsBase = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParams
|
|
5
|
+
|
|
6
|
+
export interface MonitoringParams {
|
|
7
|
+
posthog_distinct_id?: string
|
|
8
|
+
posthog_trace_id?: string
|
|
9
|
+
posthog_properties?: Record<string, any>
|
|
10
|
+
posthog_privacy_mode?: boolean
|
|
11
|
+
posthog_groups?: Record<string, any>
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export const getModelParams = (params: ChatCompletionCreateParamsBase & MonitoringParams): Record<string, any> => {
|
|
15
|
+
const modelParams: Record<string, any> = {}
|
|
16
|
+
const paramKeys = [
|
|
17
|
+
'temperature',
|
|
18
|
+
'max_tokens',
|
|
19
|
+
'max_completion_tokens',
|
|
20
|
+
'top_p',
|
|
21
|
+
'frequency_penalty',
|
|
22
|
+
'presence_penalty',
|
|
23
|
+
'n',
|
|
24
|
+
'stop',
|
|
25
|
+
'stream',
|
|
26
|
+
'streaming',
|
|
27
|
+
] as const
|
|
28
|
+
|
|
29
|
+
for (const key of paramKeys) {
|
|
30
|
+
if (key in params && (params as any)[key] !== undefined) {
|
|
31
|
+
modelParams[key] = (params as any)[key]
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
return modelParams
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export const getUsage = (response: any, provider: string): { input_tokens: number; output_tokens: number } => {
|
|
38
|
+
if (!response?.usage) {
|
|
39
|
+
return { input_tokens: 0, output_tokens: 0 }
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
if (provider === 'anthropic') {
|
|
43
|
+
return {
|
|
44
|
+
input_tokens: response.usage.input_tokens ?? 0,
|
|
45
|
+
output_tokens: response.usage.output_tokens ?? 0,
|
|
46
|
+
}
|
|
47
|
+
} else if (provider === 'openai') {
|
|
48
|
+
return {
|
|
49
|
+
input_tokens: response.usage.prompt_tokens ?? 0,
|
|
50
|
+
output_tokens: response.usage.completion_tokens ?? 0,
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
return { input_tokens: 0, output_tokens: 0 }
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Helper to format responses (non-streaming) for consumption, mirroring Python's openai vs. anthropic approach.
|
|
59
|
+
*/
|
|
60
|
+
export const formatResponse = (response: any, provider: string): Array<{ role: string; content: string }> => {
|
|
61
|
+
if (!response) {
|
|
62
|
+
return []
|
|
63
|
+
}
|
|
64
|
+
if (provider === 'anthropic') {
|
|
65
|
+
return formatResponseAnthropic(response)
|
|
66
|
+
} else if (provider === 'openai') {
|
|
67
|
+
return formatResponseOpenAI(response)
|
|
68
|
+
}
|
|
69
|
+
return []
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
export const formatResponseAnthropic = (response: any): Array<{ role: string; content: string }> => {
|
|
73
|
+
// Example approach if "response.content" holds array of text segments, etc.
|
|
74
|
+
const output: Array<{ role: string; content: string }> = []
|
|
75
|
+
for (const choice of response.content ?? []) {
|
|
76
|
+
if (choice?.text) {
|
|
77
|
+
output.push({
|
|
78
|
+
role: 'assistant',
|
|
79
|
+
content: choice.text,
|
|
80
|
+
})
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
return output
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
export const formatResponseOpenAI = (response: any): Array<{ role: string; content: string }> => {
|
|
87
|
+
const output: Array<{ role: string; content: string }> = []
|
|
88
|
+
for (const choice of response.choices ?? []) {
|
|
89
|
+
if (choice.message?.content) {
|
|
90
|
+
output.push({
|
|
91
|
+
role: choice.message.role,
|
|
92
|
+
content: choice.message.content,
|
|
93
|
+
})
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
return output
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
export const mergeSystemPrompt = (params: ChatCompletionCreateParamsBase & MonitoringParams, provider: string): any => {
|
|
100
|
+
if (provider !== 'anthropic') {
|
|
101
|
+
return params.messages
|
|
102
|
+
}
|
|
103
|
+
const messages = params.messages || []
|
|
104
|
+
if (!(params as any).system) {
|
|
105
|
+
return messages
|
|
106
|
+
}
|
|
107
|
+
const systemMessage = (params as any).system
|
|
108
|
+
return [{ role: 'system', content: systemMessage }, ...messages]
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
export const withPrivacyMode = (client: PostHog, privacyMode: boolean, input: any): any => {
|
|
112
|
+
return (client as any).privacy_mode || privacyMode ? null : input
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
export type SendEventToPosthogParams = {
|
|
116
|
+
client: PostHog
|
|
117
|
+
distinctId?: string
|
|
118
|
+
traceId: string
|
|
119
|
+
model: string
|
|
120
|
+
provider: string
|
|
121
|
+
input: any
|
|
122
|
+
output: any
|
|
123
|
+
latency: number
|
|
124
|
+
baseURL: string
|
|
125
|
+
httpStatus: number
|
|
126
|
+
usage: { input_tokens?: number; output_tokens?: number }
|
|
127
|
+
params: ChatCompletionCreateParamsBase & MonitoringParams
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
export const sendEventToPosthog = ({
|
|
131
|
+
client,
|
|
132
|
+
distinctId,
|
|
133
|
+
traceId,
|
|
134
|
+
model,
|
|
135
|
+
provider,
|
|
136
|
+
input,
|
|
137
|
+
output,
|
|
138
|
+
latency,
|
|
139
|
+
baseURL,
|
|
140
|
+
params,
|
|
141
|
+
httpStatus = 200,
|
|
142
|
+
usage = {},
|
|
143
|
+
}: SendEventToPosthogParams): void => {
|
|
144
|
+
if (client.capture) {
|
|
145
|
+
client.capture({
|
|
146
|
+
distinctId: distinctId ?? traceId,
|
|
147
|
+
event: '$ai_generation',
|
|
148
|
+
properties: {
|
|
149
|
+
$ai_provider: provider,
|
|
150
|
+
$ai_model: model,
|
|
151
|
+
$ai_model_parameters: getModelParams(params),
|
|
152
|
+
$ai_input: withPrivacyMode(client, params.posthog_privacy_mode ?? false, input),
|
|
153
|
+
$ai_output_choices: withPrivacyMode(client, params.posthog_privacy_mode ?? false, output),
|
|
154
|
+
$ai_http_status: httpStatus,
|
|
155
|
+
$ai_input_tokens: usage.input_tokens ?? 0,
|
|
156
|
+
$ai_output_tokens: usage.output_tokens ?? 0,
|
|
157
|
+
$ai_latency: latency,
|
|
158
|
+
$ai_trace_id: traceId,
|
|
159
|
+
$ai_base_url: baseURL,
|
|
160
|
+
...params.posthog_properties,
|
|
161
|
+
...(distinctId ? {} : { $process_person_profile: false }),
|
|
162
|
+
},
|
|
163
|
+
groups: params.posthog_groups,
|
|
164
|
+
})
|
|
165
|
+
}
|
|
166
|
+
}
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
import { experimental_wrapLanguageModel as wrapLanguageModel } from 'ai';
|
|
2
|
+
import type {
|
|
3
|
+
LanguageModelV1,
|
|
4
|
+
Experimental_LanguageModelV1Middleware as LanguageModelV1Middleware,
|
|
5
|
+
LanguageModelV1StreamPart,
|
|
6
|
+
} from 'ai';
|
|
7
|
+
import { v4 as uuidv4 } from 'uuid';
|
|
8
|
+
import type { PostHog } from 'posthog-node';
|
|
9
|
+
import { sendEventToPosthog } from '../utils';
|
|
10
|
+
|
|
11
|
+
interface CreateInstrumentationMiddlewareOptions {
|
|
12
|
+
posthog_distinct_id: string;
|
|
13
|
+
posthog_trace_id: string;
|
|
14
|
+
posthog_properties: Record<string, any>;
|
|
15
|
+
posthog_privacy_mode: boolean;
|
|
16
|
+
posthog_groups: string[];
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export const createInstrumentationMiddleware = (phClient: PostHog, model: LanguageModelV1, options: CreateInstrumentationMiddlewareOptions) => {
|
|
20
|
+
const middleware: LanguageModelV1Middleware = {
|
|
21
|
+
wrapGenerate: async ({ doGenerate, params }) => {
|
|
22
|
+
const startTime = Date.now();
|
|
23
|
+
|
|
24
|
+
try {
|
|
25
|
+
const result = await doGenerate();
|
|
26
|
+
const latency = (Date.now() - startTime) / 1000;
|
|
27
|
+
|
|
28
|
+
sendEventToPosthog({
|
|
29
|
+
client: phClient,
|
|
30
|
+
distinctId: options.posthog_distinct_id,
|
|
31
|
+
traceId: options.posthog_trace_id,
|
|
32
|
+
model: model.modelId,
|
|
33
|
+
provider: 'vercel',
|
|
34
|
+
input: options.posthog_privacy_mode ? '' : params.prompt,
|
|
35
|
+
output: [{ content: result.text, role: 'assistant' }],
|
|
36
|
+
latency,
|
|
37
|
+
baseURL: "",
|
|
38
|
+
params: { posthog_properties: options } as any,
|
|
39
|
+
httpStatus: 200,
|
|
40
|
+
usage: {
|
|
41
|
+
input_tokens: 0,
|
|
42
|
+
output_tokens: 0,
|
|
43
|
+
},
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
return result;
|
|
47
|
+
} catch (error) {
|
|
48
|
+
sendEventToPosthog({
|
|
49
|
+
client: phClient,
|
|
50
|
+
distinctId: options.posthog_distinct_id,
|
|
51
|
+
traceId: options.posthog_trace_id,
|
|
52
|
+
model: model.modelId,
|
|
53
|
+
provider: 'vercel',
|
|
54
|
+
input: options.posthog_privacy_mode ? '' : params.prompt,
|
|
55
|
+
output: [],
|
|
56
|
+
latency: 0,
|
|
57
|
+
baseURL: "",
|
|
58
|
+
params: { posthog_properties: options } as any,
|
|
59
|
+
httpStatus: 500,
|
|
60
|
+
usage: {
|
|
61
|
+
input_tokens: 0,
|
|
62
|
+
output_tokens: 0,
|
|
63
|
+
},
|
|
64
|
+
});
|
|
65
|
+
throw error;
|
|
66
|
+
}
|
|
67
|
+
},
|
|
68
|
+
|
|
69
|
+
wrapStream: async ({ doStream, params }) => {
|
|
70
|
+
const startTime = Date.now();
|
|
71
|
+
let generatedText = '';
|
|
72
|
+
|
|
73
|
+
try {
|
|
74
|
+
const { stream, ...rest } = await doStream();
|
|
75
|
+
|
|
76
|
+
const transformStream = new TransformStream<
|
|
77
|
+
LanguageModelV1StreamPart,
|
|
78
|
+
LanguageModelV1StreamPart
|
|
79
|
+
>({
|
|
80
|
+
transform(chunk, controller) {
|
|
81
|
+
if (chunk.type === 'text-delta') {
|
|
82
|
+
generatedText += chunk.textDelta;
|
|
83
|
+
}
|
|
84
|
+
controller.enqueue(chunk);
|
|
85
|
+
},
|
|
86
|
+
|
|
87
|
+
flush() {
|
|
88
|
+
const latency = (Date.now() - startTime) / 1000;
|
|
89
|
+
sendEventToPosthog({
|
|
90
|
+
client: phClient,
|
|
91
|
+
distinctId: options.posthog_distinct_id,
|
|
92
|
+
traceId: options.posthog_trace_id,
|
|
93
|
+
model: model.modelId,
|
|
94
|
+
provider: 'vercel',
|
|
95
|
+
input: options.posthog_privacy_mode ? '' : params.prompt,
|
|
96
|
+
output: [{ content: generatedText, role: 'assistant' }],
|
|
97
|
+
latency,
|
|
98
|
+
baseURL: "",
|
|
99
|
+
params: { posthog_properties: options } as any,
|
|
100
|
+
httpStatus: 200,
|
|
101
|
+
usage: {
|
|
102
|
+
input_tokens: 0,
|
|
103
|
+
output_tokens: 0,
|
|
104
|
+
},
|
|
105
|
+
});
|
|
106
|
+
},
|
|
107
|
+
});
|
|
108
|
+
|
|
109
|
+
return {
|
|
110
|
+
stream: stream.pipeThrough(transformStream),
|
|
111
|
+
...rest,
|
|
112
|
+
};
|
|
113
|
+
} catch (error) {
|
|
114
|
+
sendEventToPosthog({
|
|
115
|
+
client: phClient,
|
|
116
|
+
distinctId: options.posthog_distinct_id,
|
|
117
|
+
traceId: options.posthog_trace_id,
|
|
118
|
+
model: model.modelId,
|
|
119
|
+
provider: 'vercel',
|
|
120
|
+
input: options.posthog_privacy_mode ? '' : params.prompt,
|
|
121
|
+
output: [],
|
|
122
|
+
latency: 0,
|
|
123
|
+
baseURL: "",
|
|
124
|
+
params: { posthog_properties: options } as any,
|
|
125
|
+
httpStatus: 500,
|
|
126
|
+
usage: {
|
|
127
|
+
input_tokens: 0,
|
|
128
|
+
output_tokens: 0,
|
|
129
|
+
},
|
|
130
|
+
});
|
|
131
|
+
throw error;
|
|
132
|
+
}
|
|
133
|
+
},
|
|
134
|
+
};
|
|
135
|
+
|
|
136
|
+
return middleware;
|
|
137
|
+
};
|
|
138
|
+
|
|
139
|
+
export const wrapVercelLanguageModel = (
|
|
140
|
+
model: LanguageModelV1,
|
|
141
|
+
phClient: PostHog,
|
|
142
|
+
options: CreateInstrumentationMiddlewareOptions
|
|
143
|
+
) => {
|
|
144
|
+
const traceId = options.posthog_trace_id ?? uuidv4();
|
|
145
|
+
const middleware = createInstrumentationMiddleware(phClient, model, {
|
|
146
|
+
...options,
|
|
147
|
+
posthog_trace_id: traceId,
|
|
148
|
+
posthog_distinct_id: options.posthog_distinct_id ?? traceId,
|
|
149
|
+
});
|
|
150
|
+
|
|
151
|
+
const wrappedModel = wrapLanguageModel({
|
|
152
|
+
model,
|
|
153
|
+
middleware,
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
return wrappedModel;
|
|
157
|
+
};
|
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
import { PostHog } from 'posthog-node'
|
|
2
|
+
import PostHogOpenAI from '../src/openai'
|
|
3
|
+
|
|
4
|
+
jest.mock('posthog-node', () => {
|
|
5
|
+
return {
|
|
6
|
+
PostHog: jest.fn().mockImplementation(() => {
|
|
7
|
+
return {
|
|
8
|
+
capture: jest.fn(),
|
|
9
|
+
privacy_mode: false,
|
|
10
|
+
}
|
|
11
|
+
}),
|
|
12
|
+
}
|
|
13
|
+
})
|
|
14
|
+
|
|
15
|
+
let mockOpenAiChatResponse: any = {}
|
|
16
|
+
let mockOpenAiEmbeddingResponse: any = {}
|
|
17
|
+
|
|
18
|
+
describe('PostHogOpenAI - Jest test suite', () => {
|
|
19
|
+
let mockPostHogClient: PostHog
|
|
20
|
+
let client: PostHogOpenAI
|
|
21
|
+
|
|
22
|
+
beforeAll(() => {
|
|
23
|
+
if (!process.env.OPENAI_API_KEY) {
|
|
24
|
+
console.warn('⚠️ Skipping OpenAI tests: No OPENAI_API_KEY environment variable set')
|
|
25
|
+
}
|
|
26
|
+
})
|
|
27
|
+
|
|
28
|
+
beforeEach(() => {
|
|
29
|
+
// Skip all tests if no API key is present
|
|
30
|
+
if (!process.env.OPENAI_API_KEY) {
|
|
31
|
+
return
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
jest.clearAllMocks()
|
|
35
|
+
|
|
36
|
+
// Reset the default mocks
|
|
37
|
+
mockPostHogClient = new (PostHog as any)()
|
|
38
|
+
client = new PostHogOpenAI({
|
|
39
|
+
apiKey: process.env.OPENAI_API_KEY || '',
|
|
40
|
+
posthog: mockPostHogClient as any,
|
|
41
|
+
})
|
|
42
|
+
|
|
43
|
+
// Some default chat completion mock
|
|
44
|
+
mockOpenAiChatResponse = {
|
|
45
|
+
id: 'test-response-id',
|
|
46
|
+
model: 'gpt-4',
|
|
47
|
+
object: 'chat.completion',
|
|
48
|
+
created: Date.now() / 1000,
|
|
49
|
+
choices: [
|
|
50
|
+
{
|
|
51
|
+
index: 0,
|
|
52
|
+
finish_reason: 'stop',
|
|
53
|
+
message: {
|
|
54
|
+
role: 'assistant',
|
|
55
|
+
content: 'Hello from OpenAI!',
|
|
56
|
+
},
|
|
57
|
+
},
|
|
58
|
+
],
|
|
59
|
+
usage: {
|
|
60
|
+
prompt_tokens: 20,
|
|
61
|
+
completion_tokens: 10,
|
|
62
|
+
total_tokens: 30,
|
|
63
|
+
},
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// Some default embedding mock
|
|
67
|
+
mockOpenAiEmbeddingResponse = {
|
|
68
|
+
data: [
|
|
69
|
+
{
|
|
70
|
+
object: 'embedding',
|
|
71
|
+
index: 0,
|
|
72
|
+
embedding: [0.1, 0.2, 0.3],
|
|
73
|
+
},
|
|
74
|
+
],
|
|
75
|
+
model: 'text-embedding-3-small',
|
|
76
|
+
object: 'list',
|
|
77
|
+
usage: {
|
|
78
|
+
prompt_tokens: 10,
|
|
79
|
+
total_tokens: 10,
|
|
80
|
+
},
|
|
81
|
+
}
|
|
82
|
+
})
|
|
83
|
+
|
|
84
|
+
// Wrap each test with conditional skip
|
|
85
|
+
const conditionalTest = process.env.OPENAI_API_KEY ? test : test.skip
|
|
86
|
+
|
|
87
|
+
conditionalTest('basic completion', async () => {
|
|
88
|
+
// We ensure calls to create a completion return our mock
|
|
89
|
+
// This is handled by the inherited Chat.Completions mock in openai
|
|
90
|
+
const response = await client.chat.completions.create({
|
|
91
|
+
model: 'gpt-4',
|
|
92
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
93
|
+
posthog_distinct_id: 'test-id',
|
|
94
|
+
posthog_properties: { foo: 'bar' },
|
|
95
|
+
})
|
|
96
|
+
|
|
97
|
+
expect(response).toEqual(mockOpenAiChatResponse)
|
|
98
|
+
// We expect 1 capture call
|
|
99
|
+
expect(mockPostHogClient.capture).toHaveBeenCalledTimes(1)
|
|
100
|
+
// Check the capture arguments
|
|
101
|
+
const [captureArgs] = (mockPostHogClient.capture as jest.Mock).mock.calls
|
|
102
|
+
const { distinctId, event, properties } = captureArgs[0]
|
|
103
|
+
|
|
104
|
+
expect(distinctId).toBe('test-id')
|
|
105
|
+
expect(event).toBe('$ai_generation')
|
|
106
|
+
expect(properties['$ai_provider']).toBe('openai')
|
|
107
|
+
expect(properties['$ai_model']).toBe('gpt-4')
|
|
108
|
+
expect(properties['$ai_input']).toEqual([{ role: 'user', content: 'Hello' }])
|
|
109
|
+
expect(properties['$ai_output_choices']).toEqual([{ role: 'assistant', content: 'Hello from OpenAI!' }])
|
|
110
|
+
expect(properties['$ai_input_tokens']).toBe(20)
|
|
111
|
+
expect(properties['$ai_output_tokens']).toBe(10)
|
|
112
|
+
expect(properties['$ai_http_status']).toBe(200)
|
|
113
|
+
expect(properties['foo']).toBe('bar')
|
|
114
|
+
expect(typeof properties['$ai_latency']).toBe('number')
|
|
115
|
+
})
|
|
116
|
+
|
|
117
|
+
conditionalTest('embeddings', async () => {
|
|
118
|
+
// Since embeddings calls are not implemented in the snippet by default,
|
|
119
|
+
// we'll demonstrate how you *would* do it if WrappedEmbeddings is used.
|
|
120
|
+
// Let's override the internal embeddings to return our mock.
|
|
121
|
+
const mockEmbeddingsCreate = jest.fn().mockResolvedValue(mockOpenAiEmbeddingResponse)
|
|
122
|
+
;(client as any).embeddings = {
|
|
123
|
+
create: mockEmbeddingsCreate,
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
const response = await (client as any).embeddings.create({
|
|
127
|
+
model: 'text-embedding-3-small',
|
|
128
|
+
input: 'Hello world',
|
|
129
|
+
posthog_distinct_id: 'test-id',
|
|
130
|
+
posthog_properties: { foo: 'bar' },
|
|
131
|
+
})
|
|
132
|
+
|
|
133
|
+
expect(response).toEqual(mockOpenAiEmbeddingResponse)
|
|
134
|
+
expect(mockPostHogClient.capture).toHaveBeenCalledTimes(1)
|
|
135
|
+
|
|
136
|
+
const [captureArgs] = (mockPostHogClient.capture as jest.Mock).mock.calls
|
|
137
|
+
const { distinctId, event, properties } = captureArgs[0]
|
|
138
|
+
|
|
139
|
+
expect(distinctId).toBe('test-id')
|
|
140
|
+
expect(event).toBe('$ai_embedding')
|
|
141
|
+
expect(properties['$ai_provider']).toBe('openai')
|
|
142
|
+
expect(properties['$ai_model']).toBe('text-embedding-3-small')
|
|
143
|
+
expect(properties['$ai_input']).toBe('Hello world')
|
|
144
|
+
expect(properties['$ai_input_tokens']).toBe(10)
|
|
145
|
+
expect(properties['$ai_http_status']).toBe(200)
|
|
146
|
+
expect(properties['foo']).toBe('bar')
|
|
147
|
+
expect(typeof properties['$ai_latency']).toBe('number')
|
|
148
|
+
})
|
|
149
|
+
|
|
150
|
+
conditionalTest('groups', async () => {
|
|
151
|
+
await client.chat.completions.create({
|
|
152
|
+
model: 'gpt-4',
|
|
153
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
154
|
+
posthog_distinct_id: 'test-id',
|
|
155
|
+
posthog_groups: { company: 'test_company' },
|
|
156
|
+
})
|
|
157
|
+
expect(mockPostHogClient.capture).toHaveBeenCalledTimes(1)
|
|
158
|
+
const [captureArgs] = (mockPostHogClient.capture as jest.Mock).mock.calls
|
|
159
|
+
const { groups } = captureArgs[0]
|
|
160
|
+
expect(groups).toEqual({ company: 'test_company' })
|
|
161
|
+
})
|
|
162
|
+
|
|
163
|
+
conditionalTest('privacy mode local', async () => {
|
|
164
|
+
await client.chat.completions.create({
|
|
165
|
+
model: 'gpt-4',
|
|
166
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
167
|
+
posthog_distinct_id: 'test-id',
|
|
168
|
+
posthog_privacy_mode: true,
|
|
169
|
+
})
|
|
170
|
+
|
|
171
|
+
expect(mockPostHogClient.capture).toHaveBeenCalledTimes(1)
|
|
172
|
+
const [captureArgs] = (mockPostHogClient.capture as jest.Mock).mock.calls
|
|
173
|
+
const { properties } = captureArgs[0]
|
|
174
|
+
expect(properties['$ai_input']).toBeNull()
|
|
175
|
+
expect(properties['$ai_output_choices']).toBeNull()
|
|
176
|
+
})
|
|
177
|
+
|
|
178
|
+
conditionalTest('privacy mode global', async () => {
|
|
179
|
+
// override mock to appear globally in privacy mode
|
|
180
|
+
;(mockPostHogClient as any).privacy_mode = true
|
|
181
|
+
|
|
182
|
+
await client.chat.completions.create({
|
|
183
|
+
model: 'gpt-4',
|
|
184
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
185
|
+
posthog_distinct_id: 'test-id',
|
|
186
|
+
// we attempt to override locally, but it should still be null if global is true
|
|
187
|
+
posthog_privacy_mode: false,
|
|
188
|
+
})
|
|
189
|
+
|
|
190
|
+
expect(mockPostHogClient.capture).toHaveBeenCalledTimes(1)
|
|
191
|
+
const [captureArgs] = (mockPostHogClient.capture as jest.Mock).mock.calls
|
|
192
|
+
const { properties } = captureArgs[0]
|
|
193
|
+
expect(properties['$ai_input']).toBeNull()
|
|
194
|
+
expect(properties['$ai_output_choices']).toBeNull()
|
|
195
|
+
})
|
|
196
|
+
|
|
197
|
+
conditionalTest('core model params', async () => {
|
|
198
|
+
mockOpenAiChatResponse.usage = {
|
|
199
|
+
prompt_tokens: 20,
|
|
200
|
+
completion_tokens: 10,
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
await client.chat.completions.create({
|
|
204
|
+
model: 'gpt-4',
|
|
205
|
+
// using openai-like params
|
|
206
|
+
temperature: 0.5,
|
|
207
|
+
max_completion_tokens: 100,
|
|
208
|
+
stream: false,
|
|
209
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
210
|
+
posthog_distinct_id: 'test-id',
|
|
211
|
+
posthog_properties: { foo: 'bar' },
|
|
212
|
+
})
|
|
213
|
+
|
|
214
|
+
expect(mockPostHogClient.capture).toHaveBeenCalledTimes(1)
|
|
215
|
+
const [captureArgs] = (mockPostHogClient.capture as jest.Mock).mock.calls
|
|
216
|
+
const { properties } = captureArgs[0]
|
|
217
|
+
|
|
218
|
+
expect(properties['$ai_model_parameters']).toEqual({
|
|
219
|
+
temperature: 0.5,
|
|
220
|
+
max_completion_tokens: 100,
|
|
221
|
+
stream: false,
|
|
222
|
+
})
|
|
223
|
+
expect(properties['$ai_temperature']).toBe(0.5)
|
|
224
|
+
expect(properties['$ai_max_tokens']).toBe(100)
|
|
225
|
+
expect(properties['$ai_stream']).toBe(false)
|
|
226
|
+
expect(properties['foo']).toBe('bar')
|
|
227
|
+
})
|
|
228
|
+
})
|
package/tsconfig.json
ADDED