@posthog/ai 4.0.1 → 4.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,7 +2,8 @@ import { experimental_wrapLanguageModel as wrapLanguageModel } from 'ai'
2
2
  import type { LanguageModelV1, LanguageModelV1Middleware, LanguageModelV1Prompt, LanguageModelV1StreamPart } from 'ai'
3
3
  import { v4 as uuidv4 } from 'uuid'
4
4
  import { PostHog } from 'posthog-node'
5
- import { CostOverride, sendEventToPosthog } from '../utils'
5
+ import { CostOverride, sendEventToPosthog, truncate, MAX_OUTPUT_SIZE } from '../utils'
6
+ import { Buffer } from 'buffer'
6
7
 
7
8
  interface ClientOptions {
8
9
  posthogDistinctId?: string
@@ -13,6 +14,7 @@ interface ClientOptions {
13
14
  posthogModelOverride?: string
14
15
  posthogProviderOverride?: string
15
16
  posthogCostOverride?: CostOverride
17
+ fullDebug?: boolean
16
18
  }
17
19
 
18
20
  interface CreateInstrumentationMiddlewareOptions {
@@ -24,6 +26,7 @@ interface CreateInstrumentationMiddlewareOptions {
24
26
  posthogModelOverride?: string
25
27
  posthogProviderOverride?: string
26
28
  posthogCostOverride?: CostOverride
29
+ fullDebug?: boolean
27
30
  }
28
31
 
29
32
  interface PostHogInput {
@@ -49,14 +52,25 @@ const mapVercelParams = (params: any): Record<string, any> => {
49
52
  }
50
53
 
51
54
  const mapVercelPrompt = (prompt: LanguageModelV1Prompt): PostHogInput[] => {
52
- return prompt.map((p) => {
55
+ // normalize single inputs into an array of messages
56
+ let promptsArray: any[]
57
+ if (typeof prompt === 'string') {
58
+ promptsArray = [{ role: 'user', content: prompt }]
59
+ } else if (!Array.isArray(prompt)) {
60
+ promptsArray = [prompt]
61
+ } else {
62
+ promptsArray = prompt
63
+ }
64
+
65
+ // Map and truncate individual content
66
+ const inputs: PostHogInput[] = promptsArray.map((p) => {
53
67
  let content = {}
54
68
  if (Array.isArray(p.content)) {
55
- content = p.content.map((c) => {
69
+ content = p.content.map((c: any) => {
56
70
  if (c.type === 'text') {
57
71
  return {
58
72
  type: 'text',
59
- content: c.text,
73
+ content: truncate(c.text),
60
74
  }
61
75
  } else if (c.type === 'image') {
62
76
  return {
@@ -102,7 +116,7 @@ const mapVercelPrompt = (prompt: LanguageModelV1Prompt): PostHogInput[] => {
102
116
  } else {
103
117
  content = {
104
118
  type: 'text',
105
- text: p.content,
119
+ text: truncate(p.content),
106
120
  }
107
121
  }
108
122
  return {
@@ -110,28 +124,59 @@ const mapVercelPrompt = (prompt: LanguageModelV1Prompt): PostHogInput[] => {
110
124
  content,
111
125
  }
112
126
  })
127
+ try {
128
+ // Trim the inputs array until its JSON size fits within MAX_OUTPUT_SIZE
129
+ let serialized = JSON.stringify(inputs)
130
+ while (Buffer.byteLength(serialized, 'utf8') > MAX_OUTPUT_SIZE && inputs.length > 0) {
131
+ // Remove oldest message
132
+ inputs.shift()
133
+ // add blank message to beginning of array
134
+ inputs.unshift({ role: 'assistant', content: '[removed message due to size limit]' })
135
+ serialized = JSON.stringify(inputs)
136
+ }
137
+ } catch (error) {
138
+ console.error('Error stringifying inputs')
139
+ return [{ role: 'posthog', content: 'An error occurred while processing your request. Please try again.' }]
140
+ }
141
+ return inputs
113
142
  }
114
143
 
115
144
  const mapVercelOutput = (result: any): PostHogInput[] => {
145
+ // normalize string results to object
146
+ const normalizedResult = typeof result === 'string' ? { text: result } : result
116
147
  const output = {
117
- ...(result.text ? { text: result.text } : {}),
118
- ...(result.object ? { object: result.object } : {}),
119
- ...(result.reasoning ? { reasoning: result.reasoning } : {}),
120
- ...(result.response ? { response: result.response } : {}),
121
- ...(result.finishReason ? { finishReason: result.finishReason } : {}),
122
- ...(result.usage ? { usage: result.usage } : {}),
123
- ...(result.warnings ? { warnings: result.warnings } : {}),
124
- ...(result.providerMetadata ? { toolCalls: result.providerMetadata } : {}),
148
+ ...(normalizedResult.text ? { text: normalizedResult.text } : {}),
149
+ ...(normalizedResult.object ? { object: normalizedResult.object } : {}),
150
+ ...(normalizedResult.reasoning ? { reasoning: normalizedResult.reasoning } : {}),
151
+ ...(normalizedResult.response ? { response: normalizedResult.response } : {}),
152
+ ...(normalizedResult.finishReason ? { finishReason: normalizedResult.finishReason } : {}),
153
+ ...(normalizedResult.usage ? { usage: normalizedResult.usage } : {}),
154
+ ...(normalizedResult.warnings ? { warnings: normalizedResult.warnings } : {}),
155
+ ...(normalizedResult.providerMetadata ? { toolCalls: normalizedResult.providerMetadata } : {}),
156
+ ...(normalizedResult.files
157
+ ? {
158
+ files: normalizedResult.files.map((file: any) => ({
159
+ name: file.name,
160
+ size: file.size,
161
+ type: file.type,
162
+ })),
163
+ }
164
+ : {}),
125
165
  }
126
- // if text and no object or reasoning, return text
127
166
  if (output.text && !output.object && !output.reasoning) {
128
- return [{ content: output.text, role: 'assistant' }]
167
+ return [{ content: truncate(output.text as string), role: 'assistant' }]
168
+ }
169
+ // otherwise stringify and truncate
170
+ try {
171
+ const jsonOutput = JSON.stringify(output)
172
+ return [{ content: truncate(jsonOutput), role: 'assistant' }]
173
+ } catch (error) {
174
+ console.error('Error stringifying output')
175
+ return []
129
176
  }
130
- return [{ content: JSON.stringify(output), role: 'assistant' }]
131
177
  }
132
178
 
133
179
  const extractProvider = (model: LanguageModelV1): string => {
134
- // vercel provider is in the format of provider.endpoint
135
180
  const provider = model.provider.toLowerCase()
136
181
  const providerName = provider.split('.')[0]
137
182
  return providerName
@@ -190,6 +235,7 @@ export const createInstrumentationMiddleware = (
190
235
  outputTokens: result.usage.completionTokens,
191
236
  ...additionalTokenValues,
192
237
  },
238
+ fullDebug: options.fullDebug,
193
239
  })
194
240
 
195
241
  return result
@@ -212,7 +258,8 @@ export const createInstrumentationMiddleware = (
212
258
  outputTokens: 0,
213
259
  },
214
260
  isError: true,
215
- error: JSON.stringify(error),
261
+ error: truncate(JSON.stringify(error)),
262
+ fullDebug: options.fullDebug,
216
263
  })
217
264
  throw error
218
265
  }
@@ -279,6 +326,7 @@ export const createInstrumentationMiddleware = (
279
326
  params: mergedParams as any,
280
327
  httpStatus: 200,
281
328
  usage,
329
+ fullDebug: options.fullDebug,
282
330
  })
283
331
  },
284
332
  })
@@ -305,7 +353,8 @@ export const createInstrumentationMiddleware = (
305
353
  outputTokens: 0,
306
354
  },
307
355
  isError: true,
308
- error: JSON.stringify(error),
356
+ error: truncate(JSON.stringify(error)),
357
+ fullDebug: options.fullDebug,
309
358
  })
310
359
  throw error
311
360
  }