@posthog/ai 5.2.2 → 6.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/LICENSE +245 -0
  2. package/{lib → dist}/anthropic/index.cjs +44 -17
  3. package/dist/anthropic/index.cjs.map +1 -0
  4. package/{lib → dist}/anthropic/index.mjs +41 -10
  5. package/dist/anthropic/index.mjs.map +1 -0
  6. package/{lib → dist}/gemini/index.cjs +68 -26
  7. package/dist/gemini/index.cjs.map +1 -0
  8. package/{lib → dist}/gemini/index.d.ts +0 -1
  9. package/{lib → dist}/gemini/index.mjs +67 -25
  10. package/dist/gemini/index.mjs.map +1 -0
  11. package/{lib → dist}/index.cjs +875 -601
  12. package/dist/index.cjs.map +1 -0
  13. package/{lib → dist}/index.d.ts +3 -3
  14. package/{lib → dist}/index.mjs +859 -579
  15. package/dist/index.mjs.map +1 -0
  16. package/{lib → dist}/langchain/index.cjs +178 -118
  17. package/dist/langchain/index.cjs.map +1 -0
  18. package/{lib → dist}/langchain/index.d.ts +1 -0
  19. package/{lib → dist}/langchain/index.mjs +175 -112
  20. package/dist/langchain/index.mjs.map +1 -0
  21. package/{lib → dist}/openai/index.cjs +113 -6
  22. package/dist/openai/index.cjs.map +1 -0
  23. package/{lib → dist}/openai/index.mjs +112 -5
  24. package/dist/openai/index.mjs.map +1 -0
  25. package/{lib → dist}/vercel/index.cjs +117 -82
  26. package/dist/vercel/index.cjs.map +1 -0
  27. package/{lib → dist}/vercel/index.d.ts +2 -2
  28. package/{lib → dist}/vercel/index.mjs +118 -81
  29. package/dist/vercel/index.mjs.map +1 -0
  30. package/package.json +45 -35
  31. package/CHANGELOG.md +0 -89
  32. package/index.ts +0 -1
  33. package/lib/anthropic/index.cjs.map +0 -1
  34. package/lib/anthropic/index.mjs.map +0 -1
  35. package/lib/gemini/index.cjs.map +0 -1
  36. package/lib/gemini/index.mjs.map +0 -1
  37. package/lib/index.cjs.map +0 -1
  38. package/lib/index.mjs.map +0 -1
  39. package/lib/langchain/index.cjs.map +0 -1
  40. package/lib/langchain/index.mjs.map +0 -1
  41. package/lib/openai/index.cjs.map +0 -1
  42. package/lib/openai/index.mjs.map +0 -1
  43. package/lib/vercel/index.cjs.map +0 -1
  44. package/lib/vercel/index.mjs.map +0 -1
  45. package/src/anthropic/index.ts +0 -211
  46. package/src/gemini/index.ts +0 -254
  47. package/src/index.ts +0 -13
  48. package/src/langchain/callbacks.ts +0 -640
  49. package/src/langchain/index.ts +0 -1
  50. package/src/openai/azure.ts +0 -481
  51. package/src/openai/index.ts +0 -498
  52. package/src/utils.ts +0 -287
  53. package/src/vercel/index.ts +0 -1
  54. package/src/vercel/middleware.ts +0 -393
  55. package/tests/callbacks.test.ts +0 -48
  56. package/tests/gemini.test.ts +0 -344
  57. package/tests/openai.test.ts +0 -403
  58. package/tsconfig.json +0 -10
  59. /package/{lib → dist}/anthropic/index.d.ts +0 -0
  60. /package/{lib → dist}/openai/index.d.ts +0 -0
package/src/utils.ts DELETED
@@ -1,287 +0,0 @@
1
- import { PostHog } from 'posthog-node'
2
- import { Buffer } from 'buffer'
3
- import OpenAIOrignal from 'openai'
4
- import AnthropicOriginal from '@anthropic-ai/sdk'
5
-
6
- type ChatCompletionCreateParamsBase = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParams
7
- type MessageCreateParams = AnthropicOriginal.Messages.MessageCreateParams
8
- type ResponseCreateParams = OpenAIOrignal.Responses.ResponseCreateParams
9
-
10
- // limit large outputs by truncating to 200kb (approx 200k bytes)
11
- export const MAX_OUTPUT_SIZE = 200000
12
- const STRING_FORMAT = 'utf8'
13
-
14
- export interface MonitoringParams {
15
- posthogDistinctId?: string
16
- posthogTraceId?: string
17
- posthogProperties?: Record<string, any>
18
- posthogPrivacyMode?: boolean
19
- posthogGroups?: Record<string, any>
20
- posthogModelOverride?: string
21
- posthogProviderOverride?: string
22
- posthogCostOverride?: CostOverride
23
- posthogCaptureImmediate?: boolean
24
- }
25
-
26
- export interface CostOverride {
27
- inputCost: number
28
- outputCost: number
29
- }
30
-
31
- export const getModelParams = (
32
- params: ((ChatCompletionCreateParamsBase | MessageCreateParams | ResponseCreateParams) & MonitoringParams) | null
33
- ): Record<string, any> => {
34
- if (!params) {
35
- return {}
36
- }
37
- const modelParams: Record<string, any> = {}
38
- const paramKeys = [
39
- 'temperature',
40
- 'max_tokens',
41
- 'max_completion_tokens',
42
- 'top_p',
43
- 'frequency_penalty',
44
- 'presence_penalty',
45
- 'n',
46
- 'stop',
47
- 'stream',
48
- 'streaming',
49
- ] as const
50
-
51
- for (const key of paramKeys) {
52
- if (key in params && (params as any)[key] !== undefined) {
53
- modelParams[key] = (params as any)[key]
54
- }
55
- }
56
- return modelParams
57
- }
58
-
59
- /**
60
- * Helper to format responses (non-streaming) for consumption, mirroring Python's openai vs. anthropic approach.
61
- */
62
- export const formatResponse = (response: any, provider: string): Array<{ role: string; content: string }> => {
63
- if (!response) {
64
- return []
65
- }
66
- if (provider === 'anthropic') {
67
- return formatResponseAnthropic(response)
68
- } else if (provider === 'openai') {
69
- return formatResponseOpenAI(response)
70
- } else if (provider === 'gemini') {
71
- return formatResponseGemini(response)
72
- }
73
- return []
74
- }
75
-
76
- export const formatResponseAnthropic = (response: any): Array<{ role: string; content: string }> => {
77
- // Example approach if "response.content" holds array of text segments, etc.
78
- const output: Array<{ role: string; content: string }> = []
79
- for (const choice of response.content ?? []) {
80
- if (choice?.text) {
81
- output.push({
82
- role: 'assistant',
83
- content: choice.text,
84
- })
85
- }
86
- }
87
- return output
88
- }
89
-
90
- export const formatResponseOpenAI = (response: any): Array<{ role: string; content: string }> => {
91
- const output: Array<{ role: string; content: string }> = []
92
- for (const choice of response.choices ?? []) {
93
- if (choice.message?.content) {
94
- output.push({
95
- role: choice.message.role,
96
- content: choice.message.content,
97
- })
98
- }
99
- }
100
- return output
101
- }
102
-
103
- export const formatResponseGemini = (response: any): Array<{ role: string; content: string }> => {
104
- const output: Array<{ role: string; content: string }> = []
105
-
106
- if (response.text) {
107
- output.push({
108
- role: 'assistant',
109
- content: response.text,
110
- })
111
- return output
112
- }
113
-
114
- if (response.candidates && Array.isArray(response.candidates)) {
115
- for (const candidate of response.candidates) {
116
- if (candidate.content && candidate.content.parts) {
117
- const text = candidate.content.parts
118
- .filter((part: any) => part.text)
119
- .map((part: any) => part.text)
120
- .join('')
121
- if (text) {
122
- output.push({
123
- role: 'assistant',
124
- content: text,
125
- })
126
- }
127
- }
128
- }
129
- }
130
-
131
- return output
132
- }
133
-
134
- export const mergeSystemPrompt = (params: MessageCreateParams & MonitoringParams, provider: string): any => {
135
- if (provider == 'anthropic') {
136
- const messages = params.messages || []
137
- if (!(params as any).system) {
138
- return messages
139
- }
140
- const systemMessage = (params as any).system
141
- return [{ role: 'system', content: systemMessage }, ...messages]
142
- }
143
- return params.messages
144
- }
145
-
146
- export const withPrivacyMode = (client: PostHog, privacyMode: boolean, input: any): any => {
147
- return (client as any).privacy_mode || privacyMode ? null : input
148
- }
149
-
150
- export const truncate = (str: string): string => {
151
- try {
152
- const buffer = Buffer.from(str, STRING_FORMAT)
153
- if (buffer.length <= MAX_OUTPUT_SIZE) {
154
- return str
155
- }
156
- const truncatedBuffer = buffer.slice(0, MAX_OUTPUT_SIZE)
157
- return `${truncatedBuffer.toString(STRING_FORMAT)}... [truncated]`
158
- } catch (error) {
159
- console.error('Error truncating, likely not a string')
160
- return str
161
- }
162
- }
163
-
164
- export type SendEventToPosthogParams = {
165
- client: PostHog
166
- distinctId?: string
167
- traceId: string
168
- model: string
169
- provider: string
170
- input: any
171
- output: any
172
- latency: number
173
- baseURL: string
174
- httpStatus: number
175
- usage?: {
176
- inputTokens?: number
177
- outputTokens?: number
178
- reasoningTokens?: any
179
- cacheReadInputTokens?: any
180
- cacheCreationInputTokens?: any
181
- }
182
- params: (ChatCompletionCreateParamsBase | MessageCreateParams | ResponseCreateParams) & MonitoringParams
183
- isError?: boolean
184
- error?: string
185
- tools?: any
186
- captureImmediate?: boolean
187
- }
188
-
189
- function sanitizeValues(obj: any): any {
190
- if (obj === undefined || obj === null) {
191
- return obj
192
- }
193
- const jsonSafe = JSON.parse(JSON.stringify(obj))
194
- if (typeof jsonSafe === 'string') {
195
- return Buffer.from(jsonSafe, STRING_FORMAT).toString(STRING_FORMAT)
196
- } else if (Array.isArray(jsonSafe)) {
197
- return jsonSafe.map(sanitizeValues)
198
- } else if (jsonSafe && typeof jsonSafe === 'object') {
199
- return Object.fromEntries(Object.entries(jsonSafe).map(([k, v]) => [k, sanitizeValues(v)]))
200
- }
201
- return jsonSafe
202
- }
203
-
204
- export const sendEventToPosthog = async ({
205
- client,
206
- distinctId,
207
- traceId,
208
- model,
209
- provider,
210
- input,
211
- output,
212
- latency,
213
- baseURL,
214
- params,
215
- httpStatus = 200,
216
- usage = {},
217
- isError = false,
218
- error,
219
- tools,
220
- captureImmediate = false,
221
- }: SendEventToPosthogParams): Promise<void> => {
222
- if (!client.capture) {
223
- return Promise.resolve()
224
- }
225
- // sanitize input and output for UTF-8 validity
226
- const safeInput = sanitizeValues(input)
227
- const safeOutput = sanitizeValues(output)
228
- const safeError = sanitizeValues(error)
229
-
230
- let errorData = {}
231
- if (isError) {
232
- errorData = {
233
- $ai_is_error: true,
234
- $ai_error: safeError,
235
- }
236
- }
237
- let costOverrideData = {}
238
- if (params.posthogCostOverride) {
239
- const inputCostUSD = (params.posthogCostOverride.inputCost ?? 0) * (usage.inputTokens ?? 0)
240
- const outputCostUSD = (params.posthogCostOverride.outputCost ?? 0) * (usage.outputTokens ?? 0)
241
- costOverrideData = {
242
- $ai_input_cost_usd: inputCostUSD,
243
- $ai_output_cost_usd: outputCostUSD,
244
- $ai_total_cost_usd: inputCostUSD + outputCostUSD,
245
- }
246
- }
247
-
248
- const additionalTokenValues = {
249
- ...(usage.reasoningTokens ? { $ai_reasoning_tokens: usage.reasoningTokens } : {}),
250
- ...(usage.cacheReadInputTokens ? { $ai_cache_read_input_tokens: usage.cacheReadInputTokens } : {}),
251
- ...(usage.cacheCreationInputTokens ? { $ai_cache_creation_input_tokens: usage.cacheCreationInputTokens } : {}),
252
- }
253
-
254
- const properties = {
255
- $ai_provider: params.posthogProviderOverride ?? provider,
256
- $ai_model: params.posthogModelOverride ?? model,
257
- $ai_model_parameters: getModelParams(params),
258
- $ai_input: withPrivacyMode(client, params.posthogPrivacyMode ?? false, safeInput),
259
- $ai_output_choices: withPrivacyMode(client, params.posthogPrivacyMode ?? false, safeOutput),
260
- $ai_http_status: httpStatus,
261
- $ai_input_tokens: usage.inputTokens ?? 0,
262
- $ai_output_tokens: usage.outputTokens ?? 0,
263
- ...additionalTokenValues,
264
- $ai_latency: latency,
265
- $ai_trace_id: traceId,
266
- $ai_base_url: baseURL,
267
- ...params.posthogProperties,
268
- ...(distinctId ? {} : { $process_person_profile: false }),
269
- ...(tools ? { $ai_tools: tools } : {}),
270
- ...errorData,
271
- ...costOverrideData,
272
- }
273
-
274
- const event = {
275
- distinctId: distinctId ?? traceId,
276
- event: '$ai_generation',
277
- properties,
278
- groups: params.posthogGroups,
279
- }
280
-
281
- if (captureImmediate) {
282
- // await capture promise to send single event in serverless environments
283
- await client.captureImmediate(event)
284
- } else {
285
- client.capture(event)
286
- }
287
- }
@@ -1 +0,0 @@
1
- export { wrapVercelLanguageModel as withTracing } from './middleware'
@@ -1,393 +0,0 @@
1
- import { experimental_wrapLanguageModel as wrapLanguageModel } from 'ai'
2
- import type { LanguageModelV1, LanguageModelV1Middleware, LanguageModelV1Prompt, LanguageModelV1StreamPart } from 'ai'
3
- import { v4 as uuidv4 } from 'uuid'
4
- import { PostHog } from 'posthog-node'
5
- import { CostOverride, sendEventToPosthog, truncate, MAX_OUTPUT_SIZE } from '../utils'
6
- import { Buffer } from 'buffer'
7
-
8
- interface ClientOptions {
9
- posthogDistinctId?: string
10
- posthogTraceId?: string
11
- posthogProperties?: Record<string, any>
12
- posthogPrivacyMode?: boolean
13
- posthogGroups?: Record<string, any>
14
- posthogModelOverride?: string
15
- posthogProviderOverride?: string
16
- posthogCostOverride?: CostOverride
17
- posthogCaptureImmediate?: boolean
18
- }
19
-
20
- interface CreateInstrumentationMiddlewareOptions {
21
- posthogDistinctId?: string
22
- posthogTraceId?: string
23
- posthogProperties?: Record<string, any>
24
- posthogPrivacyMode?: boolean
25
- posthogGroups?: Record<string, any>
26
- posthogModelOverride?: string
27
- posthogProviderOverride?: string
28
- posthogCostOverride?: CostOverride
29
- posthogCaptureImmediate?: boolean
30
- }
31
-
32
- interface PostHogInput {
33
- role: string
34
- type?: string
35
- content?:
36
- | string
37
- | {
38
- [key: string]: any
39
- }
40
- }
41
-
42
- const mapVercelParams = (params: any): Record<string, any> => {
43
- return {
44
- temperature: params.temperature,
45
- max_tokens: params.maxTokens,
46
- top_p: params.topP,
47
- frequency_penalty: params.frequencyPenalty,
48
- presence_penalty: params.presencePenalty,
49
- stop: params.stopSequences,
50
- stream: params.stream,
51
- }
52
- }
53
-
54
- const mapVercelPrompt = (prompt: LanguageModelV1Prompt): PostHogInput[] => {
55
- // normalize single inputs into an array of messages
56
- let promptsArray: any[]
57
- if (typeof prompt === 'string') {
58
- promptsArray = [{ role: 'user', content: prompt }]
59
- } else if (!Array.isArray(prompt)) {
60
- promptsArray = [prompt]
61
- } else {
62
- promptsArray = prompt
63
- }
64
-
65
- // Map and truncate individual content
66
- const inputs: PostHogInput[] = promptsArray.map((p) => {
67
- let content = {}
68
- if (Array.isArray(p.content)) {
69
- content = p.content.map((c: any) => {
70
- if (c.type === 'text') {
71
- return {
72
- type: 'text',
73
- content: truncate(c.text),
74
- }
75
- } else if (c.type === 'image') {
76
- return {
77
- type: 'image',
78
- content: {
79
- // if image is a url use it, or use "none supported"
80
- image: c.image instanceof URL ? c.image.toString() : 'raw images not supported',
81
- mimeType: c.mimeType,
82
- },
83
- }
84
- } else if (c.type === 'file') {
85
- return {
86
- type: 'file',
87
- content: {
88
- file: c.data instanceof URL ? c.data.toString() : 'raw files not supported',
89
- mimeType: c.mimeType,
90
- },
91
- }
92
- } else if (c.type === 'tool-call') {
93
- return {
94
- type: 'tool-call',
95
- content: {
96
- toolCallId: c.toolCallId,
97
- toolName: c.toolName,
98
- args: c.args,
99
- },
100
- }
101
- } else if (c.type === 'tool-result') {
102
- return {
103
- type: 'tool-result',
104
- content: {
105
- toolCallId: c.toolCallId,
106
- toolName: c.toolName,
107
- result: c.result,
108
- isError: c.isError,
109
- },
110
- }
111
- }
112
- return {
113
- content: '',
114
- }
115
- })
116
- } else {
117
- content = {
118
- type: 'text',
119
- text: truncate(p.content),
120
- }
121
- }
122
- return {
123
- role: p.role,
124
- content,
125
- }
126
- })
127
- try {
128
- // Trim the inputs array until its JSON size fits within MAX_OUTPUT_SIZE
129
- let serialized = JSON.stringify(inputs)
130
- let removedCount = 0
131
- // We need to keep track of the initial size of the inputs array because we're going to be mutating it
132
- const initialSize = inputs.length
133
- for (let i = 0; i < initialSize && Buffer.byteLength(serialized, 'utf8') > MAX_OUTPUT_SIZE; i++) {
134
- inputs.shift()
135
- removedCount++
136
- serialized = JSON.stringify(inputs)
137
- }
138
- if (removedCount > 0) {
139
- // Add one placeholder to indicate how many were removed
140
- inputs.unshift({
141
- role: 'posthog',
142
- content: `[${removedCount} message${removedCount === 1 ? '' : 's'} removed due to size limit]`,
143
- })
144
- }
145
- } catch (error) {
146
- console.error('Error stringifying inputs', error)
147
- return [{ role: 'posthog', content: 'An error occurred while processing your request. Please try again.' }]
148
- }
149
- return inputs
150
- }
151
-
152
- const mapVercelOutput = (result: any): PostHogInput[] => {
153
- // normalize string results to object
154
- const normalizedResult = typeof result === 'string' ? { text: result } : result
155
- const output = {
156
- ...(normalizedResult.text ? { text: normalizedResult.text } : {}),
157
- ...(normalizedResult.object ? { object: normalizedResult.object } : {}),
158
- ...(normalizedResult.reasoning ? { reasoning: normalizedResult.reasoning } : {}),
159
- ...(normalizedResult.response ? { response: normalizedResult.response } : {}),
160
- ...(normalizedResult.finishReason ? { finishReason: normalizedResult.finishReason } : {}),
161
- ...(normalizedResult.usage ? { usage: normalizedResult.usage } : {}),
162
- ...(normalizedResult.warnings ? { warnings: normalizedResult.warnings } : {}),
163
- ...(normalizedResult.providerMetadata ? { toolCalls: normalizedResult.providerMetadata } : {}),
164
- ...(normalizedResult.files
165
- ? {
166
- files: normalizedResult.files.map((file: any) => ({
167
- name: file.name,
168
- size: file.size,
169
- type: file.type,
170
- })),
171
- }
172
- : {}),
173
- }
174
- if (output.text && !output.object && !output.reasoning) {
175
- return [{ content: truncate(output.text as string), role: 'assistant' }]
176
- }
177
- // otherwise stringify and truncate
178
- try {
179
- const jsonOutput = JSON.stringify(output)
180
- return [{ content: truncate(jsonOutput), role: 'assistant' }]
181
- } catch (error) {
182
- console.error('Error stringifying output')
183
- return []
184
- }
185
- }
186
-
187
- const extractProvider = (model: LanguageModelV1): string => {
188
- const provider = model.provider.toLowerCase()
189
- const providerName = provider.split('.')[0]
190
- return providerName
191
- }
192
-
193
- export const createInstrumentationMiddleware = (
194
- phClient: PostHog,
195
- model: LanguageModelV1,
196
- options: CreateInstrumentationMiddlewareOptions
197
- ): LanguageModelV1Middleware => {
198
- const middleware: LanguageModelV1Middleware = {
199
- wrapGenerate: async ({ doGenerate, params }) => {
200
- const startTime = Date.now()
201
- const mergedParams = {
202
- ...options,
203
- ...mapVercelParams(params),
204
- }
205
- try {
206
- const result = await doGenerate()
207
- const latency = (Date.now() - startTime) / 1000
208
- const modelId =
209
- options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId)
210
- const provider = options.posthogProviderOverride ?? extractProvider(model)
211
- const baseURL = '' // cannot currently get baseURL from vercel
212
- const content = mapVercelOutput(result)
213
- // let tools = result.toolCalls
214
- const providerMetadata = result.providerMetadata
215
- const additionalTokenValues = {
216
- ...(providerMetadata?.openai?.reasoningTokens
217
- ? { reasoningTokens: providerMetadata.openai.reasoningTokens }
218
- : {}),
219
- ...(providerMetadata?.openai?.cachedPromptTokens
220
- ? { cacheReadInputTokens: providerMetadata.openai.cachedPromptTokens }
221
- : {}),
222
- ...(providerMetadata?.anthropic
223
- ? {
224
- cacheReadInputTokens: providerMetadata.anthropic.cacheReadInputTokens,
225
- cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens,
226
- }
227
- : {}),
228
- }
229
- await sendEventToPosthog({
230
- client: phClient,
231
- distinctId: options.posthogDistinctId,
232
- traceId: options.posthogTraceId ?? uuidv4(),
233
- model: modelId,
234
- provider: provider,
235
- input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
236
- output: [{ content, role: 'assistant' }],
237
- latency,
238
- baseURL,
239
- params: mergedParams as any,
240
- httpStatus: 200,
241
- usage: {
242
- inputTokens: result.usage.promptTokens,
243
- outputTokens: result.usage.completionTokens,
244
- ...additionalTokenValues,
245
- },
246
- captureImmediate: options.posthogCaptureImmediate,
247
- })
248
-
249
- return result
250
- } catch (error: any) {
251
- const modelId = model.modelId
252
- await sendEventToPosthog({
253
- client: phClient,
254
- distinctId: options.posthogDistinctId,
255
- traceId: options.posthogTraceId ?? uuidv4(),
256
- model: modelId,
257
- provider: model.provider,
258
- input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
259
- output: [],
260
- latency: 0,
261
- baseURL: '',
262
- params: mergedParams as any,
263
- httpStatus: error?.status ? error.status : 500,
264
- usage: {
265
- inputTokens: 0,
266
- outputTokens: 0,
267
- },
268
- isError: true,
269
- error: truncate(JSON.stringify(error)),
270
- captureImmediate: options.posthogCaptureImmediate,
271
- })
272
- throw error
273
- }
274
- },
275
-
276
- wrapStream: async ({ doStream, params }) => {
277
- const startTime = Date.now()
278
- let generatedText = ''
279
- let usage: {
280
- inputTokens?: number
281
- outputTokens?: number
282
- reasoningTokens?: any
283
- cacheReadInputTokens?: any
284
- cacheCreationInputTokens?: any
285
- } = {}
286
- const mergedParams = {
287
- ...options,
288
- ...mapVercelParams(params),
289
- }
290
-
291
- const modelId = options.posthogModelOverride ?? model.modelId
292
- const provider = options.posthogProviderOverride ?? extractProvider(model)
293
- const baseURL = '' // cannot currently get baseURL from vercel
294
- try {
295
- const { stream, ...rest } = await doStream()
296
- const transformStream = new TransformStream<LanguageModelV1StreamPart, LanguageModelV1StreamPart>({
297
- transform(chunk, controller) {
298
- if (chunk.type === 'text-delta') {
299
- generatedText += chunk.textDelta
300
- }
301
- if (chunk.type === 'finish') {
302
- usage = {
303
- inputTokens: chunk.usage?.promptTokens,
304
- outputTokens: chunk.usage?.completionTokens,
305
- }
306
- if (chunk.providerMetadata?.openai?.reasoningTokens) {
307
- usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens
308
- }
309
- if (chunk.providerMetadata?.openai?.cachedPromptTokens) {
310
- usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptTokens
311
- }
312
- if (chunk.providerMetadata?.anthropic?.cacheReadInputTokens) {
313
- usage.cacheReadInputTokens = chunk.providerMetadata.anthropic.cacheReadInputTokens
314
- }
315
- if (chunk.providerMetadata?.anthropic?.cacheCreationInputTokens) {
316
- usage.cacheCreationInputTokens = chunk.providerMetadata.anthropic.cacheCreationInputTokens
317
- }
318
- }
319
- controller.enqueue(chunk)
320
- },
321
-
322
- flush: async () => {
323
- const latency = (Date.now() - startTime) / 1000
324
- await sendEventToPosthog({
325
- client: phClient,
326
- distinctId: options.posthogDistinctId,
327
- traceId: options.posthogTraceId ?? uuidv4(),
328
- model: modelId,
329
- provider: provider,
330
- input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
331
- output: [{ content: generatedText, role: 'assistant' }],
332
- latency,
333
- baseURL,
334
- params: mergedParams as any,
335
- httpStatus: 200,
336
- usage,
337
- captureImmediate: options.posthogCaptureImmediate,
338
- })
339
- },
340
- })
341
-
342
- return {
343
- stream: stream.pipeThrough(transformStream),
344
- ...rest,
345
- }
346
- } catch (error: any) {
347
- await sendEventToPosthog({
348
- client: phClient,
349
- distinctId: options.posthogDistinctId,
350
- traceId: options.posthogTraceId ?? uuidv4(),
351
- model: modelId,
352
- provider: provider,
353
- input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
354
- output: [],
355
- latency: 0,
356
- baseURL: '',
357
- params: mergedParams as any,
358
- httpStatus: error?.status ? error.status : 500,
359
- usage: {
360
- inputTokens: 0,
361
- outputTokens: 0,
362
- },
363
- isError: true,
364
- error: truncate(JSON.stringify(error)),
365
- captureImmediate: options.posthogCaptureImmediate,
366
- })
367
- throw error
368
- }
369
- },
370
- }
371
-
372
- return middleware
373
- }
374
-
375
- export const wrapVercelLanguageModel = (
376
- model: LanguageModelV1,
377
- phClient: PostHog,
378
- options: ClientOptions
379
- ): LanguageModelV1 => {
380
- const traceId = options.posthogTraceId ?? uuidv4()
381
- const middleware = createInstrumentationMiddleware(phClient, model, {
382
- ...options,
383
- posthogTraceId: traceId,
384
- posthogDistinctId: options.posthogDistinctId,
385
- })
386
-
387
- const wrappedModel = wrapLanguageModel({
388
- model,
389
- middleware,
390
- })
391
-
392
- return wrappedModel
393
- }