@posthog/ai 5.2.3 → 6.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -43,30 +43,171 @@ const getModelParams = params => {
43
43
  return modelParams;
44
44
  };
45
45
  const formatResponseAnthropic = response => {
46
- // Example approach if "response.content" holds array of text segments, etc.
47
46
  const output = [];
47
+ const content = [];
48
48
  for (const choice of response.content ?? []) {
49
- if (choice?.text) {
50
- output.push({
51
- role: 'assistant',
52
- content: choice.text
49
+ if (choice?.type === 'text' && choice?.text) {
50
+ content.push({
51
+ type: 'text',
52
+ text: choice.text
53
+ });
54
+ } else if (choice?.type === 'tool_use' && choice?.name && choice?.id) {
55
+ content.push({
56
+ type: 'function',
57
+ id: choice.id,
58
+ function: {
59
+ name: choice.name,
60
+ arguments: choice.input || {}
61
+ }
53
62
  });
54
63
  }
55
64
  }
65
+ if (content.length > 0) {
66
+ output.push({
67
+ role: 'assistant',
68
+ content
69
+ });
70
+ }
56
71
  return output;
57
72
  };
58
73
  const formatResponseOpenAI = response => {
59
74
  const output = [];
60
- for (const choice of response.choices ?? []) {
61
- if (choice.message?.content) {
75
+ if (response.choices) {
76
+ for (const choice of response.choices) {
77
+ const content = [];
78
+ let role = 'assistant';
79
+ if (choice.message) {
80
+ if (choice.message.role) {
81
+ role = choice.message.role;
82
+ }
83
+ if (choice.message.content) {
84
+ content.push({
85
+ type: 'text',
86
+ text: choice.message.content
87
+ });
88
+ }
89
+ if (choice.message.tool_calls) {
90
+ for (const toolCall of choice.message.tool_calls) {
91
+ content.push({
92
+ type: 'function',
93
+ id: toolCall.id,
94
+ function: {
95
+ name: toolCall.function.name,
96
+ arguments: toolCall.function.arguments
97
+ }
98
+ });
99
+ }
100
+ }
101
+ }
102
+ if (content.length > 0) {
103
+ output.push({
104
+ role,
105
+ content
106
+ });
107
+ }
108
+ }
109
+ }
110
+ // Handle Responses API format
111
+ if (response.output) {
112
+ const content = [];
113
+ let role = 'assistant';
114
+ for (const item of response.output) {
115
+ if (item.type === 'message') {
116
+ role = item.role;
117
+ if (item.content && Array.isArray(item.content)) {
118
+ for (const contentItem of item.content) {
119
+ if (contentItem.type === 'output_text' && contentItem.text) {
120
+ content.push({
121
+ type: 'text',
122
+ text: contentItem.text
123
+ });
124
+ } else if (contentItem.text) {
125
+ content.push({
126
+ type: 'text',
127
+ text: contentItem.text
128
+ });
129
+ } else if (contentItem.type === 'input_image' && contentItem.image_url) {
130
+ content.push({
131
+ type: 'image',
132
+ image: contentItem.image_url
133
+ });
134
+ }
135
+ }
136
+ } else if (item.content) {
137
+ content.push({
138
+ type: 'text',
139
+ text: String(item.content)
140
+ });
141
+ }
142
+ } else if (item.type === 'function_call') {
143
+ content.push({
144
+ type: 'function',
145
+ id: item.call_id || item.id || '',
146
+ function: {
147
+ name: item.name,
148
+ arguments: item.arguments || {}
149
+ }
150
+ });
151
+ }
152
+ }
153
+ if (content.length > 0) {
62
154
  output.push({
63
- role: choice.message.role,
64
- content: choice.message.content
155
+ role,
156
+ content
65
157
  });
66
158
  }
67
159
  }
68
160
  return output;
69
161
  };
162
+ const formatResponseGemini = response => {
163
+ const output = [];
164
+ if (response.candidates && Array.isArray(response.candidates)) {
165
+ for (const candidate of response.candidates) {
166
+ if (candidate.content && candidate.content.parts) {
167
+ const content = [];
168
+ for (const part of candidate.content.parts) {
169
+ if (part.text) {
170
+ content.push({
171
+ type: 'text',
172
+ text: part.text
173
+ });
174
+ } else if (part.functionCall) {
175
+ content.push({
176
+ type: 'function',
177
+ function: {
178
+ name: part.functionCall.name,
179
+ arguments: part.functionCall.args
180
+ }
181
+ });
182
+ }
183
+ }
184
+ if (content.length > 0) {
185
+ output.push({
186
+ role: 'assistant',
187
+ content
188
+ });
189
+ }
190
+ } else if (candidate.text) {
191
+ output.push({
192
+ role: 'assistant',
193
+ content: [{
194
+ type: 'text',
195
+ text: candidate.text
196
+ }]
197
+ });
198
+ }
199
+ }
200
+ } else if (response.text) {
201
+ output.push({
202
+ role: 'assistant',
203
+ content: [{
204
+ type: 'text',
205
+ text: response.text
206
+ }]
207
+ });
208
+ }
209
+ return output;
210
+ };
70
211
  const mergeSystemPrompt = (params, provider) => {
71
212
  {
72
213
  const messages = params.messages || [];
@@ -96,6 +237,35 @@ const truncate = str => {
96
237
  return str;
97
238
  }
98
239
  };
240
+ /**
241
+ * Extract available tool calls from the request parameters.
242
+ * These are the tools provided to the LLM, not the tool calls in the response.
243
+ */
244
+ const extractAvailableToolCalls = (provider, params) => {
245
+ if (provider === 'anthropic') {
246
+ if (params.tools) {
247
+ return params.tools;
248
+ }
249
+ return null;
250
+ } else if (provider === 'gemini') {
251
+ if (params.config && params.config.tools) {
252
+ return params.config.tools;
253
+ }
254
+ return null;
255
+ } else if (provider === 'openai') {
256
+ if (params.tools) {
257
+ return params.tools;
258
+ }
259
+ return null;
260
+ } else if (provider === 'vercel') {
261
+ // Vercel AI SDK stores tools in params.mode.tools when mode type is 'regular'
262
+ if (params.mode?.type === 'regular' && params.mode.tools) {
263
+ return params.mode.tools;
264
+ }
265
+ return null;
266
+ }
267
+ return null;
268
+ };
99
269
  function sanitizeValues(obj) {
100
270
  if (obj === undefined || obj === null) {
101
271
  return obj;
@@ -265,6 +435,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
265
435
  }
266
436
  }
267
437
  const latency = (Date.now() - startTime) / 1000;
438
+ const availableTools = extractAvailableToolCalls('openai', openAIParams);
268
439
  await sendEventToPosthog({
269
440
  client: this.phClient,
270
441
  distinctId: posthogDistinctId,
@@ -281,6 +452,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
281
452
  params: body,
282
453
  httpStatus: 200,
283
454
  usage,
455
+ tools: availableTools,
284
456
  captureImmediate: posthogCaptureImmediate
285
457
  });
286
458
  } catch (error) {
@@ -315,6 +487,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
315
487
  const wrappedPromise = parentPromise.then(async result => {
316
488
  if ('choices' in result) {
317
489
  const latency = (Date.now() - startTime) / 1000;
490
+ const availableTools = extractAvailableToolCalls('openai', openAIParams);
318
491
  await sendEventToPosthog({
319
492
  client: this.phClient,
320
493
  distinctId: posthogDistinctId,
@@ -333,6 +506,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
333
506
  reasoningTokens: result.usage?.completion_tokens_details?.reasoning_tokens ?? 0,
334
507
  cacheReadInputTokens: result.usage?.prompt_tokens_details?.cached_tokens ?? 0
335
508
  },
509
+ tools: availableTools,
336
510
  captureImmediate: posthogCaptureImmediate
337
511
  });
338
512
  }
@@ -409,6 +583,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
409
583
  }
410
584
  }
411
585
  const latency = (Date.now() - startTime) / 1000;
586
+ const availableTools = extractAvailableToolCalls('openai', openAIParams);
412
587
  await sendEventToPosthog({
413
588
  client: this.phClient,
414
589
  distinctId: posthogDistinctId,
@@ -423,6 +598,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
423
598
  params: body,
424
599
  httpStatus: 200,
425
600
  usage,
601
+ tools: availableTools,
426
602
  captureImmediate: posthogCaptureImmediate
427
603
  });
428
604
  } catch (error) {
@@ -457,6 +633,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
457
633
  const wrappedPromise = parentPromise.then(async result => {
458
634
  if ('output' in result) {
459
635
  const latency = (Date.now() - startTime) / 1000;
636
+ const availableTools = extractAvailableToolCalls('openai', openAIParams);
460
637
  await sendEventToPosthog({
461
638
  client: this.phClient,
462
639
  distinctId: posthogDistinctId,
@@ -465,7 +642,9 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
465
642
  model: openAIParams.model,
466
643
  provider: 'openai',
467
644
  input: openAIParams.input,
468
- output: result.output,
645
+ output: formatResponseOpenAI({
646
+ output: result.output
647
+ }),
469
648
  latency,
470
649
  baseURL: this.baseURL ?? '',
471
650
  params: body,
@@ -476,6 +655,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
476
655
  reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0,
477
656
  cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0
478
657
  },
658
+ tools: availableTools,
479
659
  captureImmediate: posthogCaptureImmediate
480
660
  });
481
661
  }
@@ -955,7 +1135,7 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
955
1135
  const mapVercelParams = params => {
956
1136
  return {
957
1137
  temperature: params.temperature,
958
- max_tokens: params.maxTokens,
1138
+ max_output_tokens: params.maxOutputTokens,
959
1139
  top_p: params.topP,
960
1140
  frequency_penalty: params.frequencyPenalty,
961
1141
  presence_penalty: params.presencePenalty,
@@ -963,78 +1143,67 @@ const mapVercelParams = params => {
963
1143
  stream: params.stream
964
1144
  };
965
1145
  };
966
- const mapVercelPrompt = prompt => {
967
- // normalize single inputs into an array of messages
968
- let promptsArray;
969
- if (typeof prompt === 'string') {
970
- promptsArray = [{
971
- role: 'user',
972
- content: prompt
973
- }];
974
- } else if (!Array.isArray(prompt)) {
975
- promptsArray = [prompt];
976
- } else {
977
- promptsArray = prompt;
978
- }
1146
+ const mapVercelPrompt = messages => {
979
1147
  // Map and truncate individual content
980
- const inputs = promptsArray.map(p => {
981
- let content = {};
982
- if (Array.isArray(p.content)) {
983
- content = p.content.map(c => {
984
- if (c.type === 'text') {
985
- return {
986
- type: 'text',
987
- content: truncate(c.text)
988
- };
989
- } else if (c.type === 'image') {
990
- return {
991
- type: 'image',
992
- content: {
993
- // if image is a url use it, or use "none supported"
994
- image: c.image instanceof URL ? c.image.toString() : 'raw images not supported',
995
- mimeType: c.mimeType
996
- }
997
- };
998
- } else if (c.type === 'file') {
999
- return {
1000
- type: 'file',
1001
- content: {
1148
+ const inputs = messages.map(message => {
1149
+ let content;
1150
+ // Handle system role which has string content
1151
+ if (message.role === 'system') {
1152
+ content = [{
1153
+ type: 'text',
1154
+ text: truncate(String(message.content))
1155
+ }];
1156
+ } else {
1157
+ // Handle other roles which have array content
1158
+ if (Array.isArray(message.content)) {
1159
+ content = message.content.map(c => {
1160
+ if (c.type === 'text') {
1161
+ return {
1162
+ type: 'text',
1163
+ text: truncate(c.text)
1164
+ };
1165
+ } else if (c.type === 'file') {
1166
+ return {
1167
+ type: 'file',
1002
1168
  file: c.data instanceof URL ? c.data.toString() : 'raw files not supported',
1003
- mimeType: c.mimeType
1004
- }
1005
- };
1006
- } else if (c.type === 'tool-call') {
1007
- return {
1008
- type: 'tool-call',
1009
- content: {
1169
+ mediaType: c.mediaType
1170
+ };
1171
+ } else if (c.type === 'reasoning') {
1172
+ return {
1173
+ type: 'reasoning',
1174
+ text: truncate(c.reasoning)
1175
+ };
1176
+ } else if (c.type === 'tool-call') {
1177
+ return {
1178
+ type: 'tool-call',
1010
1179
  toolCallId: c.toolCallId,
1011
1180
  toolName: c.toolName,
1012
- args: c.args
1013
- }
1014
- };
1015
- } else if (c.type === 'tool-result') {
1016
- return {
1017
- type: 'tool-result',
1018
- content: {
1181
+ input: c.input
1182
+ };
1183
+ } else if (c.type === 'tool-result') {
1184
+ return {
1185
+ type: 'tool-result',
1019
1186
  toolCallId: c.toolCallId,
1020
1187
  toolName: c.toolName,
1021
- result: c.result,
1188
+ output: c.output,
1022
1189
  isError: c.isError
1023
- }
1190
+ };
1191
+ }
1192
+ return {
1193
+ type: 'text',
1194
+ text: ''
1024
1195
  };
1025
- }
1026
- return {
1027
- content: ''
1028
- };
1029
- });
1030
- } else {
1031
- content = {
1032
- type: 'text',
1033
- text: truncate(p.content)
1034
- };
1196
+ });
1197
+ } else {
1198
+ // Fallback for non-array content
1199
+ content = [{
1200
+ type: 'text',
1201
+ text: truncate(String(message.content))
1202
+ }];
1203
+ }
1035
1204
  }
1036
1205
  return {
1037
- role: p.role,
1206
+ role: message.role,
1038
1207
  content
1039
1208
  };
1040
1209
  });
@@ -1066,7 +1235,32 @@ const mapVercelPrompt = prompt => {
1066
1235
  return inputs;
1067
1236
  };
1068
1237
  const mapVercelOutput = result => {
1069
- // normalize string results to object
1238
+ const content = [];
1239
+ if (result.text) {
1240
+ content.push({
1241
+ type: 'text',
1242
+ text: truncate(result.text)
1243
+ });
1244
+ }
1245
+ if (result.toolCalls && Array.isArray(result.toolCalls)) {
1246
+ for (const toolCall of result.toolCalls) {
1247
+ content.push({
1248
+ type: 'function',
1249
+ id: toolCall.toolCallId,
1250
+ function: {
1251
+ name: toolCall.toolName,
1252
+ arguments: typeof toolCall.args === 'string' ? toolCall.args : JSON.stringify(toolCall.args)
1253
+ }
1254
+ });
1255
+ }
1256
+ }
1257
+ if (content.length > 0) {
1258
+ return [{
1259
+ role: 'assistant',
1260
+ content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
1261
+ }];
1262
+ }
1263
+ // Fallback to original behavior for other result types TODO: check if we can remove this
1070
1264
  const normalizedResult = typeof result === 'string' ? {
1071
1265
  text: result
1072
1266
  } : result;
@@ -1077,8 +1271,8 @@ const mapVercelOutput = result => {
1077
1271
  ...(normalizedResult.object ? {
1078
1272
  object: normalizedResult.object
1079
1273
  } : {}),
1080
- ...(normalizedResult.reasoning ? {
1081
- reasoning: normalizedResult.reasoning
1274
+ ...(normalizedResult.reasoningText ? {
1275
+ reasoning: normalizedResult.reasoningText
1082
1276
  } : {}),
1083
1277
  ...(normalizedResult.response ? {
1084
1278
  response: normalizedResult.response
@@ -1137,14 +1331,14 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1137
1331
  ...options,
1138
1332
  ...mapVercelParams(params)
1139
1333
  };
1334
+ const availableTools = extractAvailableToolCalls('vercel', params);
1140
1335
  try {
1141
1336
  const result = await doGenerate();
1142
- const latency = (Date.now() - startTime) / 1000;
1143
1337
  const modelId = options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
1144
1338
  const provider = options.posthogProviderOverride ?? extractProvider(model);
1145
1339
  const baseURL = ''; // cannot currently get baseURL from vercel
1146
1340
  const content = mapVercelOutput(result);
1147
- // let tools = result.toolCalls
1341
+ const latency = (Date.now() - startTime) / 1000;
1148
1342
  const providerMetadata = result.providerMetadata;
1149
1343
  const additionalTokenValues = {
1150
1344
  ...(providerMetadata?.openai?.reasoningTokens ? {
@@ -1165,19 +1359,17 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1165
1359
  model: modelId,
1166
1360
  provider: provider,
1167
1361
  input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
1168
- output: [{
1169
- content,
1170
- role: 'assistant'
1171
- }],
1362
+ output: content,
1172
1363
  latency,
1173
1364
  baseURL,
1174
1365
  params: mergedParams,
1175
1366
  httpStatus: 200,
1176
1367
  usage: {
1177
- inputTokens: result.usage.promptTokens,
1178
- outputTokens: result.usage.completionTokens,
1368
+ inputTokens: result.usage.inputTokens,
1369
+ outputTokens: result.usage.outputTokens,
1179
1370
  ...additionalTokenValues
1180
1371
  },
1372
+ tools: availableTools,
1181
1373
  captureImmediate: options.posthogCaptureImmediate
1182
1374
  });
1183
1375
  return result;
@@ -1201,6 +1393,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1201
1393
  },
1202
1394
  isError: true,
1203
1395
  error: truncate(JSON.stringify(error)),
1396
+ tools: availableTools,
1204
1397
  captureImmediate: options.posthogCaptureImmediate
1205
1398
  });
1206
1399
  throw error;
@@ -1212,6 +1405,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1212
1405
  }) => {
1213
1406
  const startTime = Date.now();
1214
1407
  let generatedText = '';
1408
+ let reasoningText = '';
1215
1409
  let usage = {};
1216
1410
  const mergedParams = {
1217
1411
  ...options,
@@ -1219,6 +1413,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1219
1413
  };
1220
1414
  const modelId = options.posthogModelOverride ?? model.modelId;
1221
1415
  const provider = options.posthogProviderOverride ?? extractProvider(model);
1416
+ const availableTools = extractAvailableToolCalls('vercel', params);
1222
1417
  const baseURL = ''; // cannot currently get baseURL from vercel
1223
1418
  try {
1224
1419
  const {
@@ -1227,13 +1422,17 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1227
1422
  } = await doStream();
1228
1423
  const transformStream = new TransformStream({
1229
1424
  transform(chunk, controller) {
1425
+ // Handle new v5 streaming patterns
1230
1426
  if (chunk.type === 'text-delta') {
1231
- generatedText += chunk.textDelta;
1427
+ generatedText += chunk.delta;
1428
+ }
1429
+ if (chunk.type === 'reasoning-delta') {
1430
+ reasoningText += chunk.delta; // New in v5
1232
1431
  }
1233
1432
  if (chunk.type === 'finish') {
1234
1433
  usage = {
1235
- inputTokens: chunk.usage?.promptTokens,
1236
- outputTokens: chunk.usage?.completionTokens
1434
+ inputTokens: chunk.usage?.inputTokens,
1435
+ outputTokens: chunk.usage?.outputTokens
1237
1436
  };
1238
1437
  if (chunk.providerMetadata?.openai?.reasoningTokens) {
1239
1438
  usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens;
@@ -1252,6 +1451,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1252
1451
  },
1253
1452
  flush: async () => {
1254
1453
  const latency = (Date.now() - startTime) / 1000;
1454
+ const outputContent = reasoningText ? `${reasoningText}\n\n${generatedText}` : generatedText;
1255
1455
  await sendEventToPosthog({
1256
1456
  client: phClient,
1257
1457
  distinctId: options.posthogDistinctId,
@@ -1260,7 +1460,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1260
1460
  provider: provider,
1261
1461
  input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
1262
1462
  output: [{
1263
- content: generatedText,
1463
+ content: outputContent,
1264
1464
  role: 'assistant'
1265
1465
  }],
1266
1466
  latency,
@@ -1268,6 +1468,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1268
1468
  params: mergedParams,
1269
1469
  httpStatus: 200,
1270
1470
  usage,
1471
+ tools: availableTools,
1271
1472
  captureImmediate: options.posthogCaptureImmediate
1272
1473
  });
1273
1474
  }
@@ -1295,6 +1496,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1295
1496
  },
1296
1497
  isError: true,
1297
1498
  error: truncate(JSON.stringify(error)),
1499
+ tools: availableTools,
1298
1500
  captureImmediate: options.posthogCaptureImmediate
1299
1501
  });
1300
1502
  throw error;
@@ -1310,7 +1512,7 @@ const wrapVercelLanguageModel = (model, phClient, options) => {
1310
1512
  posthogTraceId: traceId,
1311
1513
  posthogDistinctId: options.posthogDistinctId
1312
1514
  });
1313
- const wrappedModel = ai.experimental_wrapLanguageModel({
1515
+ const wrappedModel = ai.wrapLanguageModel({
1314
1516
  model,
1315
1517
  middleware
1316
1518
  });
@@ -1377,6 +1579,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1377
1579
  }
1378
1580
  }
1379
1581
  const latency = (Date.now() - startTime) / 1000;
1582
+ const availableTools = extractAvailableToolCalls('anthropic', anthropicParams);
1380
1583
  await sendEventToPosthog({
1381
1584
  client: this.phClient,
1382
1585
  distinctId: posthogDistinctId,
@@ -1393,6 +1596,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1393
1596
  params: body,
1394
1597
  httpStatus: 200,
1395
1598
  usage,
1599
+ tools: availableTools,
1396
1600
  captureImmediate: posthogCaptureImmediate
1397
1601
  });
1398
1602
  } catch (error) {
@@ -1428,6 +1632,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1428
1632
  const wrappedPromise = parentPromise.then(async result => {
1429
1633
  if ('content' in result) {
1430
1634
  const latency = (Date.now() - startTime) / 1000;
1635
+ const availableTools = extractAvailableToolCalls('anthropic', anthropicParams);
1431
1636
  await sendEventToPosthog({
1432
1637
  client: this.phClient,
1433
1638
  distinctId: posthogDistinctId,
@@ -1446,6 +1651,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1446
1651
  cacheCreationInputTokens: result.usage.cache_creation_input_tokens ?? 0,
1447
1652
  cacheReadInputTokens: result.usage.cache_read_input_tokens ?? 0
1448
1653
  },
1654
+ tools: availableTools,
1449
1655
  captureImmediate: posthogCaptureImmediate
1450
1656
  });
1451
1657
  }
@@ -1508,6 +1714,7 @@ class WrappedModels {
1508
1714
  try {
1509
1715
  const response = await this.client.models.generateContent(geminiParams);
1510
1716
  const latency = (Date.now() - startTime) / 1000;
1717
+ const availableTools = extractAvailableToolCalls('gemini', geminiParams);
1511
1718
  await sendEventToPosthog({
1512
1719
  client: this.phClient,
1513
1720
  distinctId: posthogDistinctId,
@@ -1515,7 +1722,7 @@ class WrappedModels {
1515
1722
  model: geminiParams.model,
1516
1723
  provider: 'gemini',
1517
1724
  input: this.formatInput(geminiParams.contents),
1518
- output: this.formatOutput(response),
1725
+ output: formatResponseGemini(response),
1519
1726
  latency,
1520
1727
  baseURL: 'https://generativelanguage.googleapis.com',
1521
1728
  params: params,
@@ -1524,6 +1731,7 @@ class WrappedModels {
1524
1731
  inputTokens: response.usageMetadata?.promptTokenCount ?? 0,
1525
1732
  outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0
1526
1733
  },
1734
+ tools: availableTools,
1527
1735
  captureImmediate: posthogCaptureImmediate
1528
1736
  });
1529
1737
  return response;
@@ -1583,6 +1791,7 @@ class WrappedModels {
1583
1791
  yield chunk;
1584
1792
  }
1585
1793
  const latency = (Date.now() - startTime) / 1000;
1794
+ const availableTools = extractAvailableToolCalls('gemini', geminiParams);
1586
1795
  await sendEventToPosthog({
1587
1796
  client: this.phClient,
1588
1797
  distinctId: posthogDistinctId,
@@ -1599,6 +1808,7 @@ class WrappedModels {
1599
1808
  params: params,
1600
1809
  httpStatus: 200,
1601
1810
  usage,
1811
+ tools: availableTools,
1602
1812
  captureImmediate: posthogCaptureImmediate
1603
1813
  });
1604
1814
  } catch (error) {
@@ -1680,30 +1890,6 @@ class WrappedModels {
1680
1890
  content: String(contents)
1681
1891
  }];
1682
1892
  }
1683
- formatOutput(response) {
1684
- if (response.text) {
1685
- return [{
1686
- role: 'assistant',
1687
- content: response.text
1688
- }];
1689
- }
1690
- if (response.candidates && Array.isArray(response.candidates)) {
1691
- return response.candidates.map(candidate => {
1692
- if (candidate.content && candidate.content.parts) {
1693
- const text = candidate.content.parts.filter(part => part.text).map(part => part.text).join('');
1694
- return {
1695
- role: 'assistant',
1696
- content: text
1697
- };
1698
- }
1699
- return {
1700
- role: 'assistant',
1701
- content: String(candidate)
1702
- };
1703
- });
1704
- }
1705
- return [];
1706
- }
1707
1893
  }
1708
1894
 
1709
1895
  function getDefaultExportFromCjs (x) {
@@ -2400,7 +2586,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2400
2586
  };
2401
2587
  if (extraParams) {
2402
2588
  generation.modelParams = getModelParams(extraParams.invocation_params);
2403
- if (extraParams.invocation_params.tools) {
2589
+ if (extraParams.invocation_params && extraParams.invocation_params.tools) {
2404
2590
  generation.tools = extraParams.invocation_params.tools;
2405
2591
  }
2406
2592
  }
@@ -2509,7 +2695,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2509
2695
  $ai_base_url: run.baseUrl
2510
2696
  };
2511
2697
  if (run.tools) {
2512
- eventProperties['$ai_tools'] = withPrivacyMode(this.client, this.privacyMode, run.tools);
2698
+ eventProperties['$ai_tools'] = run.tools;
2513
2699
  }
2514
2700
  if (output instanceof Error) {
2515
2701
  eventProperties['$ai_http_status'] = output.status || 500;
@@ -2531,13 +2717,20 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2531
2717
  let completions;
2532
2718
  if (output.generations && Array.isArray(output.generations)) {
2533
2719
  const lastGeneration = output.generations[output.generations.length - 1];
2534
- if (Array.isArray(lastGeneration)) {
2535
- completions = lastGeneration.map(gen => {
2536
- return {
2537
- role: 'assistant',
2538
- content: gen.text
2539
- };
2540
- });
2720
+ if (Array.isArray(lastGeneration) && lastGeneration.length > 0) {
2721
+ // Check if this is a ChatGeneration by looking at the first item
2722
+ const isChatGeneration = 'message' in lastGeneration[0] && lastGeneration[0].message;
2723
+ if (isChatGeneration) {
2724
+ // For ChatGeneration, convert messages to dict format
2725
+ completions = lastGeneration.map(gen => {
2726
+ return this._convertMessageToDict(gen.message);
2727
+ });
2728
+ } else {
2729
+ // For non-ChatGeneration, extract raw response
2730
+ completions = lastGeneration.map(gen => {
2731
+ return this._extractRawResponse(gen);
2732
+ });
2733
+ }
2541
2734
  }
2542
2735
  }
2543
2736
  if (completions) {
@@ -2588,6 +2781,19 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2588
2781
  }
2589
2782
  }));
2590
2783
  }
2784
+ _extractRawResponse(generation) {
2785
+ // Extract the response from the last response of the LLM call
2786
+ // We return the text of the response if not empty
2787
+ if (generation.text != null && generation.text.trim() !== '') {
2788
+ return generation.text.trim();
2789
+ } else if (generation.message) {
2790
+ // Additional kwargs contains the response in case of tool usage
2791
+ return generation.message.additional_kwargs || generation.message.additionalKwargs || {};
2792
+ } else {
2793
+ // Not tool usage, some LLM responses can be simply empty
2794
+ return '';
2795
+ }
2796
+ }
2591
2797
  _convertMessageToDict(message) {
2592
2798
  let messageDict = {};
2593
2799
  const messageType = message.getType();