@posthog/ai 5.2.3 → 6.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -43,30 +43,171 @@ const getModelParams = params => {
43
43
  return modelParams;
44
44
  };
45
45
  const formatResponseAnthropic = response => {
46
- // Example approach if "response.content" holds array of text segments, etc.
47
46
  const output = [];
47
+ const content = [];
48
48
  for (const choice of response.content ?? []) {
49
- if (choice?.text) {
50
- output.push({
51
- role: 'assistant',
52
- content: choice.text
49
+ if (choice?.type === 'text' && choice?.text) {
50
+ content.push({
51
+ type: 'text',
52
+ text: choice.text
53
+ });
54
+ } else if (choice?.type === 'tool_use' && choice?.name && choice?.id) {
55
+ content.push({
56
+ type: 'function',
57
+ id: choice.id,
58
+ function: {
59
+ name: choice.name,
60
+ arguments: choice.input || {}
61
+ }
53
62
  });
54
63
  }
55
64
  }
65
+ if (content.length > 0) {
66
+ output.push({
67
+ role: 'assistant',
68
+ content
69
+ });
70
+ }
56
71
  return output;
57
72
  };
58
73
  const formatResponseOpenAI = response => {
59
74
  const output = [];
60
- for (const choice of response.choices ?? []) {
61
- if (choice.message?.content) {
75
+ if (response.choices) {
76
+ for (const choice of response.choices) {
77
+ const content = [];
78
+ let role = 'assistant';
79
+ if (choice.message) {
80
+ if (choice.message.role) {
81
+ role = choice.message.role;
82
+ }
83
+ if (choice.message.content) {
84
+ content.push({
85
+ type: 'text',
86
+ text: choice.message.content
87
+ });
88
+ }
89
+ if (choice.message.tool_calls) {
90
+ for (const toolCall of choice.message.tool_calls) {
91
+ content.push({
92
+ type: 'function',
93
+ id: toolCall.id,
94
+ function: {
95
+ name: toolCall.function.name,
96
+ arguments: toolCall.function.arguments
97
+ }
98
+ });
99
+ }
100
+ }
101
+ }
102
+ if (content.length > 0) {
103
+ output.push({
104
+ role,
105
+ content
106
+ });
107
+ }
108
+ }
109
+ }
110
+ // Handle Responses API format
111
+ if (response.output) {
112
+ const content = [];
113
+ let role = 'assistant';
114
+ for (const item of response.output) {
115
+ if (item.type === 'message') {
116
+ role = item.role;
117
+ if (item.content && Array.isArray(item.content)) {
118
+ for (const contentItem of item.content) {
119
+ if (contentItem.type === 'output_text' && contentItem.text) {
120
+ content.push({
121
+ type: 'text',
122
+ text: contentItem.text
123
+ });
124
+ } else if (contentItem.text) {
125
+ content.push({
126
+ type: 'text',
127
+ text: contentItem.text
128
+ });
129
+ } else if (contentItem.type === 'input_image' && contentItem.image_url) {
130
+ content.push({
131
+ type: 'image',
132
+ image: contentItem.image_url
133
+ });
134
+ }
135
+ }
136
+ } else if (item.content) {
137
+ content.push({
138
+ type: 'text',
139
+ text: String(item.content)
140
+ });
141
+ }
142
+ } else if (item.type === 'function_call') {
143
+ content.push({
144
+ type: 'function',
145
+ id: item.call_id || item.id || '',
146
+ function: {
147
+ name: item.name,
148
+ arguments: item.arguments || {}
149
+ }
150
+ });
151
+ }
152
+ }
153
+ if (content.length > 0) {
62
154
  output.push({
63
- role: choice.message.role,
64
- content: choice.message.content
155
+ role,
156
+ content
65
157
  });
66
158
  }
67
159
  }
68
160
  return output;
69
161
  };
162
+ const formatResponseGemini = response => {
163
+ const output = [];
164
+ if (response.candidates && Array.isArray(response.candidates)) {
165
+ for (const candidate of response.candidates) {
166
+ if (candidate.content && candidate.content.parts) {
167
+ const content = [];
168
+ for (const part of candidate.content.parts) {
169
+ if (part.text) {
170
+ content.push({
171
+ type: 'text',
172
+ text: part.text
173
+ });
174
+ } else if (part.functionCall) {
175
+ content.push({
176
+ type: 'function',
177
+ function: {
178
+ name: part.functionCall.name,
179
+ arguments: part.functionCall.args
180
+ }
181
+ });
182
+ }
183
+ }
184
+ if (content.length > 0) {
185
+ output.push({
186
+ role: 'assistant',
187
+ content
188
+ });
189
+ }
190
+ } else if (candidate.text) {
191
+ output.push({
192
+ role: 'assistant',
193
+ content: [{
194
+ type: 'text',
195
+ text: candidate.text
196
+ }]
197
+ });
198
+ }
199
+ }
200
+ } else if (response.text) {
201
+ output.push({
202
+ role: 'assistant',
203
+ content: [{
204
+ type: 'text',
205
+ text: response.text
206
+ }]
207
+ });
208
+ }
209
+ return output;
210
+ };
70
211
  const mergeSystemPrompt = (params, provider) => {
71
212
  {
72
213
  const messages = params.messages || [];
@@ -96,6 +237,35 @@ const truncate = str => {
96
237
  return str;
97
238
  }
98
239
  };
240
+ /**
241
+ * Extract available tool calls from the request parameters.
242
+ * These are the tools provided to the LLM, not the tool calls in the response.
243
+ */
244
+ const extractAvailableToolCalls = (provider, params) => {
245
+ if (provider === 'anthropic') {
246
+ if (params.tools) {
247
+ return params.tools;
248
+ }
249
+ return null;
250
+ } else if (provider === 'gemini') {
251
+ if (params.config && params.config.tools) {
252
+ return params.config.tools;
253
+ }
254
+ return null;
255
+ } else if (provider === 'openai') {
256
+ if (params.tools) {
257
+ return params.tools;
258
+ }
259
+ return null;
260
+ } else if (provider === 'vercel') {
261
+ // Vercel AI SDK stores tools in params.mode.tools when mode type is 'regular'
262
+ if (params.mode?.type === 'regular' && params.mode.tools) {
263
+ return params.mode.tools;
264
+ }
265
+ return null;
266
+ }
267
+ return null;
268
+ };
99
269
  function sanitizeValues(obj) {
100
270
  if (obj === undefined || obj === null) {
101
271
  return obj;
@@ -265,6 +435,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
265
435
  }
266
436
  }
267
437
  const latency = (Date.now() - startTime) / 1000;
438
+ const availableTools = extractAvailableToolCalls('openai', openAIParams);
268
439
  await sendEventToPosthog({
269
440
  client: this.phClient,
270
441
  distinctId: posthogDistinctId,
@@ -281,6 +452,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
281
452
  params: body,
282
453
  httpStatus: 200,
283
454
  usage,
455
+ tools: availableTools,
284
456
  captureImmediate: posthogCaptureImmediate
285
457
  });
286
458
  } catch (error) {
@@ -315,6 +487,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
315
487
  const wrappedPromise = parentPromise.then(async result => {
316
488
  if ('choices' in result) {
317
489
  const latency = (Date.now() - startTime) / 1000;
490
+ const availableTools = extractAvailableToolCalls('openai', openAIParams);
318
491
  await sendEventToPosthog({
319
492
  client: this.phClient,
320
493
  distinctId: posthogDistinctId,
@@ -333,6 +506,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
333
506
  reasoningTokens: result.usage?.completion_tokens_details?.reasoning_tokens ?? 0,
334
507
  cacheReadInputTokens: result.usage?.prompt_tokens_details?.cached_tokens ?? 0
335
508
  },
509
+ tools: availableTools,
336
510
  captureImmediate: posthogCaptureImmediate
337
511
  });
338
512
  }
@@ -409,6 +583,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
409
583
  }
410
584
  }
411
585
  const latency = (Date.now() - startTime) / 1000;
586
+ const availableTools = extractAvailableToolCalls('openai', openAIParams);
412
587
  await sendEventToPosthog({
413
588
  client: this.phClient,
414
589
  distinctId: posthogDistinctId,
@@ -423,6 +598,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
423
598
  params: body,
424
599
  httpStatus: 200,
425
600
  usage,
601
+ tools: availableTools,
426
602
  captureImmediate: posthogCaptureImmediate
427
603
  });
428
604
  } catch (error) {
@@ -457,6 +633,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
457
633
  const wrappedPromise = parentPromise.then(async result => {
458
634
  if ('output' in result) {
459
635
  const latency = (Date.now() - startTime) / 1000;
636
+ const availableTools = extractAvailableToolCalls('openai', openAIParams);
460
637
  await sendEventToPosthog({
461
638
  client: this.phClient,
462
639
  distinctId: posthogDistinctId,
@@ -465,7 +642,9 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
465
642
  model: openAIParams.model,
466
643
  provider: 'openai',
467
644
  input: openAIParams.input,
468
- output: result.output,
645
+ output: formatResponseOpenAI({
646
+ output: result.output
647
+ }),
469
648
  latency,
470
649
  baseURL: this.baseURL ?? '',
471
650
  params: body,
@@ -476,6 +655,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
476
655
  reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0,
477
656
  cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0
478
657
  },
658
+ tools: availableTools,
479
659
  captureImmediate: posthogCaptureImmediate
480
660
  });
481
661
  }
@@ -955,7 +1135,7 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
955
1135
  const mapVercelParams = params => {
956
1136
  return {
957
1137
  temperature: params.temperature,
958
- max_tokens: params.maxTokens,
1138
+ max_output_tokens: params.maxOutputTokens,
959
1139
  top_p: params.topP,
960
1140
  frequency_penalty: params.frequencyPenalty,
961
1141
  presence_penalty: params.presencePenalty,
@@ -963,78 +1143,67 @@ const mapVercelParams = params => {
963
1143
  stream: params.stream
964
1144
  };
965
1145
  };
966
- const mapVercelPrompt = prompt => {
967
- // normalize single inputs into an array of messages
968
- let promptsArray;
969
- if (typeof prompt === 'string') {
970
- promptsArray = [{
971
- role: 'user',
972
- content: prompt
973
- }];
974
- } else if (!Array.isArray(prompt)) {
975
- promptsArray = [prompt];
976
- } else {
977
- promptsArray = prompt;
978
- }
1146
+ const mapVercelPrompt = messages => {
979
1147
  // Map and truncate individual content
980
- const inputs = promptsArray.map(p => {
981
- let content = {};
982
- if (Array.isArray(p.content)) {
983
- content = p.content.map(c => {
984
- if (c.type === 'text') {
985
- return {
986
- type: 'text',
987
- content: truncate(c.text)
988
- };
989
- } else if (c.type === 'image') {
990
- return {
991
- type: 'image',
992
- content: {
993
- // if image is a url use it, or use "none supported"
994
- image: c.image instanceof URL ? c.image.toString() : 'raw images not supported',
995
- mimeType: c.mimeType
996
- }
997
- };
998
- } else if (c.type === 'file') {
999
- return {
1000
- type: 'file',
1001
- content: {
1148
+ const inputs = messages.map(message => {
1149
+ let content;
1150
+ // Handle system role which has string content
1151
+ if (message.role === 'system') {
1152
+ content = [{
1153
+ type: 'text',
1154
+ text: truncate(String(message.content))
1155
+ }];
1156
+ } else {
1157
+ // Handle other roles which have array content
1158
+ if (Array.isArray(message.content)) {
1159
+ content = message.content.map(c => {
1160
+ if (c.type === 'text') {
1161
+ return {
1162
+ type: 'text',
1163
+ text: truncate(c.text)
1164
+ };
1165
+ } else if (c.type === 'file') {
1166
+ return {
1167
+ type: 'file',
1002
1168
  file: c.data instanceof URL ? c.data.toString() : 'raw files not supported',
1003
- mimeType: c.mimeType
1004
- }
1005
- };
1006
- } else if (c.type === 'tool-call') {
1007
- return {
1008
- type: 'tool-call',
1009
- content: {
1169
+ mediaType: c.mediaType
1170
+ };
1171
+ } else if (c.type === 'reasoning') {
1172
+ return {
1173
+ type: 'reasoning',
1174
+ text: truncate(c.reasoning)
1175
+ };
1176
+ } else if (c.type === 'tool-call') {
1177
+ return {
1178
+ type: 'tool-call',
1010
1179
  toolCallId: c.toolCallId,
1011
1180
  toolName: c.toolName,
1012
- args: c.args
1013
- }
1014
- };
1015
- } else if (c.type === 'tool-result') {
1016
- return {
1017
- type: 'tool-result',
1018
- content: {
1181
+ input: c.input
1182
+ };
1183
+ } else if (c.type === 'tool-result') {
1184
+ return {
1185
+ type: 'tool-result',
1019
1186
  toolCallId: c.toolCallId,
1020
1187
  toolName: c.toolName,
1021
- result: c.result,
1188
+ output: c.output,
1022
1189
  isError: c.isError
1023
- }
1190
+ };
1191
+ }
1192
+ return {
1193
+ type: 'text',
1194
+ text: ''
1024
1195
  };
1025
- }
1026
- return {
1027
- content: ''
1028
- };
1029
- });
1030
- } else {
1031
- content = {
1032
- type: 'text',
1033
- text: truncate(p.content)
1034
- };
1196
+ });
1197
+ } else {
1198
+ // Fallback for non-array content
1199
+ content = [{
1200
+ type: 'text',
1201
+ text: truncate(String(message.content))
1202
+ }];
1203
+ }
1035
1204
  }
1036
1205
  return {
1037
- role: p.role,
1206
+ role: message.role,
1038
1207
  content
1039
1208
  };
1040
1209
  });
@@ -1066,52 +1235,75 @@ const mapVercelPrompt = prompt => {
1066
1235
  return inputs;
1067
1236
  };
1068
1237
  const mapVercelOutput = result => {
1069
- // normalize string results to object
1070
- const normalizedResult = typeof result === 'string' ? {
1071
- text: result
1072
- } : result;
1073
- const output = {
1074
- ...(normalizedResult.text ? {
1075
- text: normalizedResult.text
1076
- } : {}),
1077
- ...(normalizedResult.object ? {
1078
- object: normalizedResult.object
1079
- } : {}),
1080
- ...(normalizedResult.reasoning ? {
1081
- reasoning: normalizedResult.reasoning
1082
- } : {}),
1083
- ...(normalizedResult.response ? {
1084
- response: normalizedResult.response
1085
- } : {}),
1086
- ...(normalizedResult.finishReason ? {
1087
- finishReason: normalizedResult.finishReason
1088
- } : {}),
1089
- ...(normalizedResult.usage ? {
1090
- usage: normalizedResult.usage
1091
- } : {}),
1092
- ...(normalizedResult.warnings ? {
1093
- warnings: normalizedResult.warnings
1094
- } : {}),
1095
- ...(normalizedResult.providerMetadata ? {
1096
- toolCalls: normalizedResult.providerMetadata
1097
- } : {}),
1098
- ...(normalizedResult.files ? {
1099
- files: normalizedResult.files.map(file => ({
1100
- name: file.name,
1101
- size: file.size,
1102
- type: file.type
1103
- }))
1104
- } : {})
1105
- };
1106
- if (output.text && !output.object && !output.reasoning) {
1238
+ const content = result.map(item => {
1239
+ if (item.type === 'text') {
1240
+ return {
1241
+ type: 'text',
1242
+ text: truncate(item.text)
1243
+ };
1244
+ }
1245
+ if (item.type === 'tool-call') {
1246
+ return {
1247
+ type: 'tool-call',
1248
+ id: item.toolCallId,
1249
+ function: {
1250
+ name: item.toolName,
1251
+ arguments: item.args || JSON.stringify(item.arguments || {})
1252
+ }
1253
+ };
1254
+ }
1255
+ if (item.type === 'reasoning') {
1256
+ return {
1257
+ type: 'reasoning',
1258
+ text: truncate(item.text)
1259
+ };
1260
+ }
1261
+ if (item.type === 'file') {
1262
+ // Handle files similar to input mapping - avoid large base64 data
1263
+ let fileData;
1264
+ if (item.data instanceof URL) {
1265
+ fileData = item.data.toString();
1266
+ } else if (typeof item.data === 'string') {
1267
+ // Check if it's base64 data and potentially large
1268
+ if (item.data.startsWith('data:') || item.data.length > 1000) {
1269
+ fileData = `[${item.mediaType} file - ${item.data.length} bytes]`;
1270
+ } else {
1271
+ fileData = item.data;
1272
+ }
1273
+ } else {
1274
+ fileData = `[binary ${item.mediaType} file]`;
1275
+ }
1276
+ return {
1277
+ type: 'file',
1278
+ name: 'generated_file',
1279
+ mediaType: item.mediaType,
1280
+ data: fileData
1281
+ };
1282
+ }
1283
+ if (item.type === 'source') {
1284
+ return {
1285
+ type: 'source',
1286
+ sourceType: item.sourceType,
1287
+ id: item.id,
1288
+ url: item.url || '',
1289
+ title: item.title || ''
1290
+ };
1291
+ }
1292
+ // Fallback for unknown types - try to extract text if possible
1293
+ return {
1294
+ type: 'text',
1295
+ text: truncate(JSON.stringify(item))
1296
+ };
1297
+ });
1298
+ if (content.length > 0) {
1107
1299
  return [{
1108
- content: truncate(output.text),
1109
- role: 'assistant'
1300
+ role: 'assistant',
1301
+ content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
1110
1302
  }];
1111
1303
  }
1112
1304
  // otherwise stringify and truncate
1113
1305
  try {
1114
- const jsonOutput = JSON.stringify(output);
1306
+ const jsonOutput = JSON.stringify(result);
1115
1307
  return [{
1116
1308
  content: truncate(jsonOutput),
1117
1309
  role: 'assistant'
@@ -1137,14 +1329,14 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1137
1329
  ...options,
1138
1330
  ...mapVercelParams(params)
1139
1331
  };
1332
+ const availableTools = extractAvailableToolCalls('vercel', params);
1140
1333
  try {
1141
1334
  const result = await doGenerate();
1142
- const latency = (Date.now() - startTime) / 1000;
1143
1335
  const modelId = options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
1144
1336
  const provider = options.posthogProviderOverride ?? extractProvider(model);
1145
1337
  const baseURL = ''; // cannot currently get baseURL from vercel
1146
- const content = mapVercelOutput(result);
1147
- // let tools = result.toolCalls
1338
+ const content = mapVercelOutput(result.content);
1339
+ const latency = (Date.now() - startTime) / 1000;
1148
1340
  const providerMetadata = result.providerMetadata;
1149
1341
  const additionalTokenValues = {
1150
1342
  ...(providerMetadata?.openai?.reasoningTokens ? {
@@ -1165,19 +1357,17 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1165
1357
  model: modelId,
1166
1358
  provider: provider,
1167
1359
  input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
1168
- output: [{
1169
- content,
1170
- role: 'assistant'
1171
- }],
1360
+ output: content,
1172
1361
  latency,
1173
1362
  baseURL,
1174
1363
  params: mergedParams,
1175
1364
  httpStatus: 200,
1176
1365
  usage: {
1177
- inputTokens: result.usage.promptTokens,
1178
- outputTokens: result.usage.completionTokens,
1366
+ inputTokens: result.usage.inputTokens,
1367
+ outputTokens: result.usage.outputTokens,
1179
1368
  ...additionalTokenValues
1180
1369
  },
1370
+ tools: availableTools,
1181
1371
  captureImmediate: options.posthogCaptureImmediate
1182
1372
  });
1183
1373
  return result;
@@ -1201,6 +1391,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1201
1391
  },
1202
1392
  isError: true,
1203
1393
  error: truncate(JSON.stringify(error)),
1394
+ tools: availableTools,
1204
1395
  captureImmediate: options.posthogCaptureImmediate
1205
1396
  });
1206
1397
  throw error;
@@ -1212,6 +1403,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1212
1403
  }) => {
1213
1404
  const startTime = Date.now();
1214
1405
  let generatedText = '';
1406
+ let reasoningText = '';
1215
1407
  let usage = {};
1216
1408
  const mergedParams = {
1217
1409
  ...options,
@@ -1219,6 +1411,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1219
1411
  };
1220
1412
  const modelId = options.posthogModelOverride ?? model.modelId;
1221
1413
  const provider = options.posthogProviderOverride ?? extractProvider(model);
1414
+ const availableTools = extractAvailableToolCalls('vercel', params);
1222
1415
  const baseURL = ''; // cannot currently get baseURL from vercel
1223
1416
  try {
1224
1417
  const {
@@ -1227,13 +1420,17 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1227
1420
  } = await doStream();
1228
1421
  const transformStream = new TransformStream({
1229
1422
  transform(chunk, controller) {
1423
+ // Handle new v5 streaming patterns
1230
1424
  if (chunk.type === 'text-delta') {
1231
- generatedText += chunk.textDelta;
1425
+ generatedText += chunk.delta;
1426
+ }
1427
+ if (chunk.type === 'reasoning-delta') {
1428
+ reasoningText += chunk.delta; // New in v5
1232
1429
  }
1233
1430
  if (chunk.type === 'finish') {
1234
1431
  usage = {
1235
- inputTokens: chunk.usage?.promptTokens,
1236
- outputTokens: chunk.usage?.completionTokens
1432
+ inputTokens: chunk.usage?.inputTokens,
1433
+ outputTokens: chunk.usage?.outputTokens
1237
1434
  };
1238
1435
  if (chunk.providerMetadata?.openai?.reasoningTokens) {
1239
1436
  usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens;
@@ -1252,6 +1449,25 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1252
1449
  },
1253
1450
  flush: async () => {
1254
1451
  const latency = (Date.now() - startTime) / 1000;
1452
+ // Build content array similar to mapVercelOutput structure
1453
+ const content = [];
1454
+ if (reasoningText) {
1455
+ content.push({
1456
+ type: 'reasoning',
1457
+ text: truncate(reasoningText)
1458
+ });
1459
+ }
1460
+ if (generatedText) {
1461
+ content.push({
1462
+ type: 'text',
1463
+ text: truncate(generatedText)
1464
+ });
1465
+ }
1466
+ // Structure output like mapVercelOutput does
1467
+ const output = content.length > 0 ? [{
1468
+ role: 'assistant',
1469
+ content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
1470
+ }] : [];
1255
1471
  await sendEventToPosthog({
1256
1472
  client: phClient,
1257
1473
  distinctId: options.posthogDistinctId,
@@ -1259,15 +1475,13 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1259
1475
  model: modelId,
1260
1476
  provider: provider,
1261
1477
  input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
1262
- output: [{
1263
- content: generatedText,
1264
- role: 'assistant'
1265
- }],
1478
+ output: output,
1266
1479
  latency,
1267
1480
  baseURL,
1268
1481
  params: mergedParams,
1269
1482
  httpStatus: 200,
1270
1483
  usage,
1484
+ tools: availableTools,
1271
1485
  captureImmediate: options.posthogCaptureImmediate
1272
1486
  });
1273
1487
  }
@@ -1295,6 +1509,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1295
1509
  },
1296
1510
  isError: true,
1297
1511
  error: truncate(JSON.stringify(error)),
1512
+ tools: availableTools,
1298
1513
  captureImmediate: options.posthogCaptureImmediate
1299
1514
  });
1300
1515
  throw error;
@@ -1310,7 +1525,7 @@ const wrapVercelLanguageModel = (model, phClient, options) => {
1310
1525
  posthogTraceId: traceId,
1311
1526
  posthogDistinctId: options.posthogDistinctId
1312
1527
  });
1313
- const wrappedModel = ai.experimental_wrapLanguageModel({
1528
+ const wrappedModel = ai.wrapLanguageModel({
1314
1529
  model,
1315
1530
  middleware
1316
1531
  });
@@ -1377,6 +1592,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1377
1592
  }
1378
1593
  }
1379
1594
  const latency = (Date.now() - startTime) / 1000;
1595
+ const availableTools = extractAvailableToolCalls('anthropic', anthropicParams);
1380
1596
  await sendEventToPosthog({
1381
1597
  client: this.phClient,
1382
1598
  distinctId: posthogDistinctId,
@@ -1393,6 +1609,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1393
1609
  params: body,
1394
1610
  httpStatus: 200,
1395
1611
  usage,
1612
+ tools: availableTools,
1396
1613
  captureImmediate: posthogCaptureImmediate
1397
1614
  });
1398
1615
  } catch (error) {
@@ -1428,6 +1645,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1428
1645
  const wrappedPromise = parentPromise.then(async result => {
1429
1646
  if ('content' in result) {
1430
1647
  const latency = (Date.now() - startTime) / 1000;
1648
+ const availableTools = extractAvailableToolCalls('anthropic', anthropicParams);
1431
1649
  await sendEventToPosthog({
1432
1650
  client: this.phClient,
1433
1651
  distinctId: posthogDistinctId,
@@ -1446,6 +1664,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1446
1664
  cacheCreationInputTokens: result.usage.cache_creation_input_tokens ?? 0,
1447
1665
  cacheReadInputTokens: result.usage.cache_read_input_tokens ?? 0
1448
1666
  },
1667
+ tools: availableTools,
1449
1668
  captureImmediate: posthogCaptureImmediate
1450
1669
  });
1451
1670
  }
@@ -1508,6 +1727,7 @@ class WrappedModels {
1508
1727
  try {
1509
1728
  const response = await this.client.models.generateContent(geminiParams);
1510
1729
  const latency = (Date.now() - startTime) / 1000;
1730
+ const availableTools = extractAvailableToolCalls('gemini', geminiParams);
1511
1731
  await sendEventToPosthog({
1512
1732
  client: this.phClient,
1513
1733
  distinctId: posthogDistinctId,
@@ -1515,7 +1735,7 @@ class WrappedModels {
1515
1735
  model: geminiParams.model,
1516
1736
  provider: 'gemini',
1517
1737
  input: this.formatInput(geminiParams.contents),
1518
- output: this.formatOutput(response),
1738
+ output: formatResponseGemini(response),
1519
1739
  latency,
1520
1740
  baseURL: 'https://generativelanguage.googleapis.com',
1521
1741
  params: params,
@@ -1524,6 +1744,7 @@ class WrappedModels {
1524
1744
  inputTokens: response.usageMetadata?.promptTokenCount ?? 0,
1525
1745
  outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0
1526
1746
  },
1747
+ tools: availableTools,
1527
1748
  captureImmediate: posthogCaptureImmediate
1528
1749
  });
1529
1750
  return response;
@@ -1583,6 +1804,7 @@ class WrappedModels {
1583
1804
  yield chunk;
1584
1805
  }
1585
1806
  const latency = (Date.now() - startTime) / 1000;
1807
+ const availableTools = extractAvailableToolCalls('gemini', geminiParams);
1586
1808
  await sendEventToPosthog({
1587
1809
  client: this.phClient,
1588
1810
  distinctId: posthogDistinctId,
@@ -1599,6 +1821,7 @@ class WrappedModels {
1599
1821
  params: params,
1600
1822
  httpStatus: 200,
1601
1823
  usage,
1824
+ tools: availableTools,
1602
1825
  captureImmediate: posthogCaptureImmediate
1603
1826
  });
1604
1827
  } catch (error) {
@@ -1680,30 +1903,6 @@ class WrappedModels {
1680
1903
  content: String(contents)
1681
1904
  }];
1682
1905
  }
1683
- formatOutput(response) {
1684
- if (response.text) {
1685
- return [{
1686
- role: 'assistant',
1687
- content: response.text
1688
- }];
1689
- }
1690
- if (response.candidates && Array.isArray(response.candidates)) {
1691
- return response.candidates.map(candidate => {
1692
- if (candidate.content && candidate.content.parts) {
1693
- const text = candidate.content.parts.filter(part => part.text).map(part => part.text).join('');
1694
- return {
1695
- role: 'assistant',
1696
- content: text
1697
- };
1698
- }
1699
- return {
1700
- role: 'assistant',
1701
- content: String(candidate)
1702
- };
1703
- });
1704
- }
1705
- return [];
1706
- }
1707
1906
  }
1708
1907
 
1709
1908
  function getDefaultExportFromCjs (x) {
@@ -2400,7 +2599,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2400
2599
  };
2401
2600
  if (extraParams) {
2402
2601
  generation.modelParams = getModelParams(extraParams.invocation_params);
2403
- if (extraParams.invocation_params.tools) {
2602
+ if (extraParams.invocation_params && extraParams.invocation_params.tools) {
2404
2603
  generation.tools = extraParams.invocation_params.tools;
2405
2604
  }
2406
2605
  }
@@ -2509,7 +2708,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2509
2708
  $ai_base_url: run.baseUrl
2510
2709
  };
2511
2710
  if (run.tools) {
2512
- eventProperties['$ai_tools'] = withPrivacyMode(this.client, this.privacyMode, run.tools);
2711
+ eventProperties['$ai_tools'] = run.tools;
2513
2712
  }
2514
2713
  if (output instanceof Error) {
2515
2714
  eventProperties['$ai_http_status'] = output.status || 500;
@@ -2531,13 +2730,20 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2531
2730
  let completions;
2532
2731
  if (output.generations && Array.isArray(output.generations)) {
2533
2732
  const lastGeneration = output.generations[output.generations.length - 1];
2534
- if (Array.isArray(lastGeneration)) {
2535
- completions = lastGeneration.map(gen => {
2536
- return {
2537
- role: 'assistant',
2538
- content: gen.text
2539
- };
2540
- });
2733
+ if (Array.isArray(lastGeneration) && lastGeneration.length > 0) {
2734
+ // Check if this is a ChatGeneration by looking at the first item
2735
+ const isChatGeneration = 'message' in lastGeneration[0] && lastGeneration[0].message;
2736
+ if (isChatGeneration) {
2737
+ // For ChatGeneration, convert messages to dict format
2738
+ completions = lastGeneration.map(gen => {
2739
+ return this._convertMessageToDict(gen.message);
2740
+ });
2741
+ } else {
2742
+ // For non-ChatGeneration, extract raw response
2743
+ completions = lastGeneration.map(gen => {
2744
+ return this._extractRawResponse(gen);
2745
+ });
2746
+ }
2541
2747
  }
2542
2748
  }
2543
2749
  if (completions) {
@@ -2588,6 +2794,19 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2588
2794
  }
2589
2795
  }));
2590
2796
  }
2797
+ _extractRawResponse(generation) {
2798
+ // Extract the response from the last response of the LLM call
2799
+ // We return the text of the response if not empty
2800
+ if (generation.text != null && generation.text.trim() !== '') {
2801
+ return generation.text.trim();
2802
+ } else if (generation.message) {
2803
+ // Additional kwargs contains the response in case of tool usage
2804
+ return generation.message.additional_kwargs || generation.message.additionalKwargs || {};
2805
+ } else {
2806
+ // Not tool usage, some LLM responses can be simply empty
2807
+ return '';
2808
+ }
2809
+ }
2591
2810
  _convertMessageToDict(message) {
2592
2811
  let messageDict = {};
2593
2812
  const messageType = message.getType();