@posthog/ai 1.2.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -28,7 +28,7 @@ await client.chat.completions.create({
28
28
  ### After
29
29
 
30
30
  ```typescript
31
- import { OpenAI } from 'posthog-node-ai'
31
+ import { OpenAI } from '@posthog/ai'
32
32
  import { PostHog } from 'posthog-node'
33
33
 
34
34
  const phClient = new PostHog(
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@posthog/ai",
3
- "version": "1.2.0",
3
+ "version": "1.3.0",
4
4
  "description": "PostHog Node.js AI integrations",
5
5
  "repository": {
6
6
  "type": "git",
@@ -17,7 +17,9 @@
17
17
  "jest": "^29.0.0",
18
18
  "node-fetch": "^3.3.2",
19
19
  "ts-jest": "^29.0.0",
20
- "typescript": "^4.7.4"
20
+ "typescript": "^4.7.4",
21
+ "ai": "^4.0.0",
22
+ "openai": "^4.0.0"
21
23
  },
22
24
  "keywords": [
23
25
  "posthog",
@@ -27,11 +29,13 @@
27
29
  "llm"
28
30
  ],
29
31
  "dependencies": {
30
- "ai": "^4.1.0",
31
- "openai": "^4.79.1",
32
32
  "uuid": "^11.0.5",
33
33
  "zod": "^3.24.1"
34
34
  },
35
+ "peerDependencies": {
36
+ "ai": "^4.0.0",
37
+ "openai": "^4.0.0"
38
+ },
35
39
  "scripts": {
36
40
  "test": "jest",
37
41
  "prepublishOnly": "cd .. && yarn build"
package/src/index.ts CHANGED
@@ -2,4 +2,4 @@ import PostHogOpenAI from './openai'
2
2
  import { wrapVercelLanguageModel } from './vercel/middleware'
3
3
 
4
4
  export { PostHogOpenAI as OpenAI }
5
- export { wrapVercelLanguageModel as posthogWrappedLanguageModel }
5
+ export { wrapVercelLanguageModel as withTracing }
@@ -2,7 +2,7 @@ import OpenAIOrignal from 'openai'
2
2
  import { PostHog } from 'posthog-node'
3
3
  import { v4 as uuidv4 } from 'uuid'
4
4
  import { PassThrough } from 'stream'
5
- import { mergeSystemPrompt, type MonitoringParams, sendEventToPosthog } from '../utils'
5
+ import { mergeSystemPrompt, MonitoringParams, sendEventToPosthog } from '../utils'
6
6
 
7
7
  type ChatCompletion = OpenAIOrignal.ChatCompletion
8
8
  type ChatCompletionChunk = OpenAIOrignal.ChatCompletionChunk
package/src/utils.ts CHANGED
@@ -123,7 +123,7 @@ export type SendEventToPosthogParams = {
123
123
  latency: number
124
124
  baseURL: string
125
125
  httpStatus: number
126
- usage: { input_tokens?: number; output_tokens?: number }
126
+ usage?: { input_tokens?: number; output_tokens?: number }
127
127
  params: ChatCompletionCreateParamsBase & MonitoringParams
128
128
  }
129
129
 
@@ -1,157 +1,162 @@
1
- import { experimental_wrapLanguageModel as wrapLanguageModel } from 'ai';
1
+ import { experimental_wrapLanguageModel as wrapLanguageModel } from 'ai'
2
2
  import type {
3
- LanguageModelV1,
4
- Experimental_LanguageModelV1Middleware as LanguageModelV1Middleware,
5
- LanguageModelV1StreamPart,
6
- } from 'ai';
7
- import { v4 as uuidv4 } from 'uuid';
8
- import type { PostHog } from 'posthog-node';
9
- import { sendEventToPosthog } from '../utils';
3
+ LanguageModelV1,
4
+ Experimental_LanguageModelV1Middleware as LanguageModelV1Middleware,
5
+ LanguageModelV1StreamPart,
6
+ } from 'ai'
7
+ import { v4 as uuidv4 } from 'uuid'
8
+ import type { PostHog } from 'posthog-node'
9
+ import { sendEventToPosthog } from '../utils'
10
10
 
11
11
  interface CreateInstrumentationMiddlewareOptions {
12
- posthog_distinct_id: string;
13
- posthog_trace_id: string;
14
- posthog_properties: Record<string, any>;
15
- posthog_privacy_mode: boolean;
16
- posthog_groups: string[];
12
+ posthogDistinctId?: string
13
+ posthogTraceId: string
14
+ posthogProperties?: Record<string, any>
15
+ posthogPrivacyMode?: boolean
16
+ posthogGroups?: string[]
17
17
  }
18
18
 
19
- export const createInstrumentationMiddleware = (phClient: PostHog, model: LanguageModelV1, options: CreateInstrumentationMiddlewareOptions) => {
20
- const middleware: LanguageModelV1Middleware = {
21
- wrapGenerate: async ({ doGenerate, params }) => {
22
- const startTime = Date.now();
19
+ export const createInstrumentationMiddleware = (
20
+ phClient: PostHog,
21
+ model: LanguageModelV1,
22
+ options: CreateInstrumentationMiddlewareOptions
23
+ ): LanguageModelV1Middleware => {
24
+ const middleware: LanguageModelV1Middleware = {
25
+ wrapGenerate: async ({ doGenerate, params }) => {
26
+ const startTime = Date.now()
23
27
 
24
- try {
25
- const result = await doGenerate();
26
- const latency = (Date.now() - startTime) / 1000;
28
+ try {
29
+ const result = await doGenerate()
30
+ const latency = (Date.now() - startTime) / 1000
27
31
 
28
- sendEventToPosthog({
29
- client: phClient,
30
- distinctId: options.posthog_distinct_id,
31
- traceId: options.posthog_trace_id,
32
- model: model.modelId,
33
- provider: 'vercel',
34
- input: options.posthog_privacy_mode ? '' : params.prompt,
35
- output: [{ content: result.text, role: 'assistant' }],
36
- latency,
37
- baseURL: "",
38
- params: { posthog_properties: options } as any,
39
- httpStatus: 200,
40
- usage: {
41
- input_tokens: 0,
42
- output_tokens: 0,
43
- },
44
- });
32
+ sendEventToPosthog({
33
+ client: phClient,
34
+ distinctId: options.posthogDistinctId,
35
+ traceId: options.posthogTraceId,
36
+ model: model.modelId,
37
+ provider: 'vercel',
38
+ input: options.posthogPrivacyMode ? '' : params.prompt,
39
+ output: [{ content: result.text, role: 'assistant' }],
40
+ latency,
41
+ baseURL: '',
42
+ params: { posthog_properties: options } as any,
43
+ httpStatus: 200,
44
+ usage: {
45
+ input_tokens: result.usage.promptTokens,
46
+ output_tokens: result.usage.completionTokens,
47
+ },
48
+ })
45
49
 
46
- return result;
47
- } catch (error) {
48
- sendEventToPosthog({
49
- client: phClient,
50
- distinctId: options.posthog_distinct_id,
51
- traceId: options.posthog_trace_id,
52
- model: model.modelId,
53
- provider: 'vercel',
54
- input: options.posthog_privacy_mode ? '' : params.prompt,
55
- output: [],
56
- latency: 0,
57
- baseURL: "",
58
- params: { posthog_properties: options } as any,
59
- httpStatus: 500,
60
- usage: {
61
- input_tokens: 0,
62
- output_tokens: 0,
63
- },
64
- });
65
- throw error;
66
- }
67
- },
50
+ return result
51
+ } catch (error) {
52
+ sendEventToPosthog({
53
+ client: phClient,
54
+ distinctId: options.posthogDistinctId,
55
+ traceId: options.posthogTraceId,
56
+ model: model.modelId,
57
+ provider: 'vercel',
58
+ input: options.posthogPrivacyMode ? '' : params.prompt,
59
+ output: [],
60
+ latency: 0,
61
+ baseURL: '',
62
+ params: { posthog_properties: options } as any,
63
+ httpStatus: 500,
64
+ usage: {
65
+ input_tokens: 0,
66
+ output_tokens: 0,
67
+ },
68
+ })
69
+ throw error
70
+ }
71
+ },
68
72
 
69
- wrapStream: async ({ doStream, params }) => {
70
- const startTime = Date.now();
71
- let generatedText = '';
73
+ wrapStream: async ({ doStream, params }) => {
74
+ const startTime = Date.now()
75
+ let generatedText = ''
76
+ let usage: { input_tokens?: number; output_tokens?: number } = {}
72
77
 
73
- try {
74
- const { stream, ...rest } = await doStream();
78
+ try {
79
+ const { stream, ...rest } = await doStream()
75
80
 
76
- const transformStream = new TransformStream<
77
- LanguageModelV1StreamPart,
78
- LanguageModelV1StreamPart
79
- >({
80
- transform(chunk, controller) {
81
- if (chunk.type === 'text-delta') {
82
- generatedText += chunk.textDelta;
83
- }
84
- controller.enqueue(chunk);
85
- },
81
+ const transformStream = new TransformStream<LanguageModelV1StreamPart, LanguageModelV1StreamPart>({
82
+ transform(chunk, controller) {
83
+ if (chunk.type === 'text-delta') {
84
+ generatedText += chunk.textDelta
85
+ }
86
+ if (chunk.type === 'finish') {
87
+ usage = {
88
+ input_tokens: chunk.usage?.promptTokens,
89
+ output_tokens: chunk.usage?.completionTokens,
90
+ }
91
+ }
92
+ controller.enqueue(chunk)
93
+ },
86
94
 
87
- flush() {
88
- const latency = (Date.now() - startTime) / 1000;
89
- sendEventToPosthog({
90
- client: phClient,
91
- distinctId: options.posthog_distinct_id,
92
- traceId: options.posthog_trace_id,
93
- model: model.modelId,
94
- provider: 'vercel',
95
- input: options.posthog_privacy_mode ? '' : params.prompt,
96
- output: [{ content: generatedText, role: 'assistant' }],
97
- latency,
98
- baseURL: "",
99
- params: { posthog_properties: options } as any,
100
- httpStatus: 200,
101
- usage: {
102
- input_tokens: 0,
103
- output_tokens: 0,
104
- },
105
- });
106
- },
107
- });
95
+ flush() {
96
+ const latency = (Date.now() - startTime) / 1000
97
+ sendEventToPosthog({
98
+ client: phClient,
99
+ distinctId: options.posthogDistinctId,
100
+ traceId: options.posthogTraceId,
101
+ model: model.modelId,
102
+ provider: 'vercel',
103
+ input: options.posthogPrivacyMode ? '' : params.prompt,
104
+ output: [{ content: generatedText, role: 'assistant' }],
105
+ latency,
106
+ baseURL: '',
107
+ params: { posthog_properties: options } as any,
108
+ httpStatus: 200,
109
+ usage,
110
+ })
111
+ },
112
+ })
108
113
 
109
- return {
110
- stream: stream.pipeThrough(transformStream),
111
- ...rest,
112
- };
113
- } catch (error) {
114
- sendEventToPosthog({
115
- client: phClient,
116
- distinctId: options.posthog_distinct_id,
117
- traceId: options.posthog_trace_id,
118
- model: model.modelId,
119
- provider: 'vercel',
120
- input: options.posthog_privacy_mode ? '' : params.prompt,
121
- output: [],
122
- latency: 0,
123
- baseURL: "",
124
- params: { posthog_properties: options } as any,
125
- httpStatus: 500,
126
- usage: {
127
- input_tokens: 0,
128
- output_tokens: 0,
129
- },
130
- });
131
- throw error;
132
- }
133
- },
134
- };
114
+ return {
115
+ stream: stream.pipeThrough(transformStream),
116
+ ...rest,
117
+ }
118
+ } catch (error) {
119
+ sendEventToPosthog({
120
+ client: phClient,
121
+ distinctId: options.posthogDistinctId,
122
+ traceId: options.posthogTraceId,
123
+ model: model.modelId,
124
+ provider: 'vercel',
125
+ input: options.posthogPrivacyMode ? '' : params.prompt,
126
+ output: [],
127
+ latency: 0,
128
+ baseURL: '',
129
+ params: { posthog_properties: options } as any,
130
+ httpStatus: 500,
131
+ usage: {
132
+ input_tokens: 0,
133
+ output_tokens: 0,
134
+ },
135
+ })
136
+ throw error
137
+ }
138
+ },
139
+ }
135
140
 
136
- return middleware;
137
- };
141
+ return middleware
142
+ }
138
143
 
139
144
  export const wrapVercelLanguageModel = (
140
- model: LanguageModelV1,
141
- phClient: PostHog,
142
- options: CreateInstrumentationMiddlewareOptions
143
- ) => {
144
- const traceId = options.posthog_trace_id ?? uuidv4();
145
- const middleware = createInstrumentationMiddleware(phClient, model, {
146
- ...options,
147
- posthog_trace_id: traceId,
148
- posthog_distinct_id: options.posthog_distinct_id ?? traceId,
149
- });
145
+ model: LanguageModelV1,
146
+ phClient: PostHog,
147
+ options: CreateInstrumentationMiddlewareOptions
148
+ ): LanguageModelV1 => {
149
+ const traceId = options.posthogTraceId ?? uuidv4()
150
+ const middleware = createInstrumentationMiddleware(phClient, model, {
151
+ ...options,
152
+ posthogTraceId: traceId,
153
+ posthogDistinctId: options.posthogDistinctId ?? traceId,
154
+ })
150
155
 
151
- const wrappedModel = wrapLanguageModel({
152
- model,
153
- middleware,
154
- });
156
+ const wrappedModel = wrapLanguageModel({
157
+ model,
158
+ middleware,
159
+ })
155
160
 
156
- return wrappedModel;
157
- };
161
+ return wrappedModel
162
+ }