@posthog/ai 6.0.0 → 6.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1215,77 +1215,75 @@ const mapVercelPrompt = messages => {
1215
1215
  return inputs;
1216
1216
  };
1217
1217
  const mapVercelOutput = result => {
1218
- const content = [];
1219
- if (result.text) {
1220
- content.push({
1221
- type: 'text',
1222
- text: truncate(result.text)
1223
- });
1224
- }
1225
- if (result.toolCalls && Array.isArray(result.toolCalls)) {
1226
- for (const toolCall of result.toolCalls) {
1227
- content.push({
1228
- type: 'function',
1229
- id: toolCall.toolCallId,
1218
+ const content = result.map(item => {
1219
+ if (item.type === 'text') {
1220
+ return {
1221
+ type: 'text',
1222
+ text: truncate(item.text)
1223
+ };
1224
+ }
1225
+ if (item.type === 'tool-call') {
1226
+ return {
1227
+ type: 'tool-call',
1228
+ id: item.toolCallId,
1230
1229
  function: {
1231
- name: toolCall.toolName,
1232
- arguments: typeof toolCall.args === 'string' ? toolCall.args : JSON.stringify(toolCall.args)
1230
+ name: item.toolName,
1231
+ arguments: item.args || JSON.stringify(item.arguments || {})
1233
1232
  }
1234
- });
1233
+ };
1235
1234
  }
1236
- }
1235
+ if (item.type === 'reasoning') {
1236
+ return {
1237
+ type: 'reasoning',
1238
+ text: truncate(item.text)
1239
+ };
1240
+ }
1241
+ if (item.type === 'file') {
1242
+ // Handle files similar to input mapping - avoid large base64 data
1243
+ let fileData;
1244
+ if (item.data instanceof URL) {
1245
+ fileData = item.data.toString();
1246
+ } else if (typeof item.data === 'string') {
1247
+ // Check if it's base64 data and potentially large
1248
+ if (item.data.startsWith('data:') || item.data.length > 1000) {
1249
+ fileData = `[${item.mediaType} file - ${item.data.length} bytes]`;
1250
+ } else {
1251
+ fileData = item.data;
1252
+ }
1253
+ } else {
1254
+ fileData = `[binary ${item.mediaType} file]`;
1255
+ }
1256
+ return {
1257
+ type: 'file',
1258
+ name: 'generated_file',
1259
+ mediaType: item.mediaType,
1260
+ data: fileData
1261
+ };
1262
+ }
1263
+ if (item.type === 'source') {
1264
+ return {
1265
+ type: 'source',
1266
+ sourceType: item.sourceType,
1267
+ id: item.id,
1268
+ url: item.url || '',
1269
+ title: item.title || ''
1270
+ };
1271
+ }
1272
+ // Fallback for unknown types - try to extract text if possible
1273
+ return {
1274
+ type: 'text',
1275
+ text: truncate(JSON.stringify(item))
1276
+ };
1277
+ });
1237
1278
  if (content.length > 0) {
1238
1279
  return [{
1239
1280
  role: 'assistant',
1240
1281
  content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
1241
1282
  }];
1242
1283
  }
1243
- // Fallback to original behavior for other result types TODO: check if we can remove this
1244
- const normalizedResult = typeof result === 'string' ? {
1245
- text: result
1246
- } : result;
1247
- const output = {
1248
- ...(normalizedResult.text ? {
1249
- text: normalizedResult.text
1250
- } : {}),
1251
- ...(normalizedResult.object ? {
1252
- object: normalizedResult.object
1253
- } : {}),
1254
- ...(normalizedResult.reasoningText ? {
1255
- reasoning: normalizedResult.reasoningText
1256
- } : {}),
1257
- ...(normalizedResult.response ? {
1258
- response: normalizedResult.response
1259
- } : {}),
1260
- ...(normalizedResult.finishReason ? {
1261
- finishReason: normalizedResult.finishReason
1262
- } : {}),
1263
- ...(normalizedResult.usage ? {
1264
- usage: normalizedResult.usage
1265
- } : {}),
1266
- ...(normalizedResult.warnings ? {
1267
- warnings: normalizedResult.warnings
1268
- } : {}),
1269
- ...(normalizedResult.providerMetadata ? {
1270
- toolCalls: normalizedResult.providerMetadata
1271
- } : {}),
1272
- ...(normalizedResult.files ? {
1273
- files: normalizedResult.files.map(file => ({
1274
- name: file.name,
1275
- size: file.size,
1276
- type: file.type
1277
- }))
1278
- } : {})
1279
- };
1280
- if (output.text && !output.object && !output.reasoning) {
1281
- return [{
1282
- content: truncate(output.text),
1283
- role: 'assistant'
1284
- }];
1285
- }
1286
1284
  // otherwise stringify and truncate
1287
1285
  try {
1288
- const jsonOutput = JSON.stringify(output);
1286
+ const jsonOutput = JSON.stringify(result);
1289
1287
  return [{
1290
1288
  content: truncate(jsonOutput),
1291
1289
  role: 'assistant'
@@ -1317,21 +1315,21 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1317
1315
  const modelId = options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
1318
1316
  const provider = options.posthogProviderOverride ?? extractProvider(model);
1319
1317
  const baseURL = ''; // cannot currently get baseURL from vercel
1320
- const content = mapVercelOutput(result);
1318
+ const content = mapVercelOutput(result.content);
1321
1319
  const latency = (Date.now() - startTime) / 1000;
1322
1320
  const providerMetadata = result.providerMetadata;
1323
1321
  const additionalTokenValues = {
1324
- ...(providerMetadata?.openai?.reasoningTokens ? {
1325
- reasoningTokens: providerMetadata.openai.reasoningTokens
1326
- } : {}),
1327
- ...(providerMetadata?.openai?.cachedPromptTokens ? {
1328
- cacheReadInputTokens: providerMetadata.openai.cachedPromptTokens
1329
- } : {}),
1330
1322
  ...(providerMetadata?.anthropic ? {
1331
- cacheReadInputTokens: providerMetadata.anthropic.cacheReadInputTokens,
1332
1323
  cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
1333
1324
  } : {})
1334
1325
  };
1326
+ const usage = {
1327
+ inputTokens: result.usage.inputTokens,
1328
+ outputTokens: result.usage.outputTokens,
1329
+ reasoningTokens: result.usage.reasoningTokens,
1330
+ cacheReadInputTokens: result.usage.cachedInputTokens,
1331
+ ...additionalTokenValues
1332
+ };
1335
1333
  await sendEventToPosthog({
1336
1334
  client: phClient,
1337
1335
  distinctId: options.posthogDistinctId,
@@ -1344,11 +1342,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1344
1342
  baseURL,
1345
1343
  params: mergedParams,
1346
1344
  httpStatus: 200,
1347
- usage: {
1348
- inputTokens: result.usage.inputTokens,
1349
- outputTokens: result.usage.outputTokens,
1350
- ...additionalTokenValues
1351
- },
1345
+ usage,
1352
1346
  tools: availableTools,
1353
1347
  captureImmediate: options.posthogCaptureImmediate
1354
1348
  });
@@ -1410,28 +1404,43 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1410
1404
  reasoningText += chunk.delta; // New in v5
1411
1405
  }
1412
1406
  if (chunk.type === 'finish') {
1407
+ const providerMetadata = chunk.providerMetadata;
1408
+ const additionalTokenValues = {
1409
+ ...(providerMetadata?.anthropic ? {
1410
+ cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
1411
+ } : {})
1412
+ };
1413
1413
  usage = {
1414
1414
  inputTokens: chunk.usage?.inputTokens,
1415
- outputTokens: chunk.usage?.outputTokens
1415
+ outputTokens: chunk.usage?.outputTokens,
1416
+ reasoningTokens: chunk.usage?.reasoningTokens,
1417
+ cacheReadInputTokens: chunk.usage?.cachedInputTokens,
1418
+ ...additionalTokenValues
1416
1419
  };
1417
- if (chunk.providerMetadata?.openai?.reasoningTokens) {
1418
- usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens;
1419
- }
1420
- if (chunk.providerMetadata?.openai?.cachedPromptTokens) {
1421
- usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptTokens;
1422
- }
1423
- if (chunk.providerMetadata?.anthropic?.cacheReadInputTokens) {
1424
- usage.cacheReadInputTokens = chunk.providerMetadata.anthropic.cacheReadInputTokens;
1425
- }
1426
- if (chunk.providerMetadata?.anthropic?.cacheCreationInputTokens) {
1427
- usage.cacheCreationInputTokens = chunk.providerMetadata.anthropic.cacheCreationInputTokens;
1428
- }
1429
1420
  }
1430
1421
  controller.enqueue(chunk);
1431
1422
  },
1432
1423
  flush: async () => {
1433
1424
  const latency = (Date.now() - startTime) / 1000;
1434
- const outputContent = reasoningText ? `${reasoningText}\n\n${generatedText}` : generatedText;
1425
+ // Build content array similar to mapVercelOutput structure
1426
+ const content = [];
1427
+ if (reasoningText) {
1428
+ content.push({
1429
+ type: 'reasoning',
1430
+ text: truncate(reasoningText)
1431
+ });
1432
+ }
1433
+ if (generatedText) {
1434
+ content.push({
1435
+ type: 'text',
1436
+ text: truncate(generatedText)
1437
+ });
1438
+ }
1439
+ // Structure output like mapVercelOutput does
1440
+ const output = content.length > 0 ? [{
1441
+ role: 'assistant',
1442
+ content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
1443
+ }] : [];
1435
1444
  await sendEventToPosthog({
1436
1445
  client: phClient,
1437
1446
  distinctId: options.posthogDistinctId,
@@ -1439,10 +1448,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1439
1448
  model: modelId,
1440
1449
  provider: provider,
1441
1450
  input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
1442
- output: [{
1443
- content: outputContent,
1444
- role: 'assistant'
1445
- }],
1451
+ output: output,
1446
1452
  latency,
1447
1453
  baseURL,
1448
1454
  params: mergedParams,
@@ -1709,7 +1715,9 @@ class WrappedModels {
1709
1715
  httpStatus: 200,
1710
1716
  usage: {
1711
1717
  inputTokens: response.usageMetadata?.promptTokenCount ?? 0,
1712
- outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0
1718
+ outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0,
1719
+ reasoningTokens: response.usageMetadata?.thoughtsTokenCount ?? 0,
1720
+ cacheReadInputTokens: response.usageMetadata?.cachedContentTokenCount ?? 0
1713
1721
  },
1714
1722
  tools: availableTools,
1715
1723
  captureImmediate: posthogCaptureImmediate
@@ -1765,7 +1773,9 @@ class WrappedModels {
1765
1773
  if (chunk.usageMetadata) {
1766
1774
  usage = {
1767
1775
  inputTokens: chunk.usageMetadata.promptTokenCount ?? 0,
1768
- outputTokens: chunk.usageMetadata.candidatesTokenCount ?? 0
1776
+ outputTokens: chunk.usageMetadata.candidatesTokenCount ?? 0,
1777
+ reasoningTokens: chunk.usageMetadata.thoughtsTokenCount ?? 0,
1778
+ cacheReadInputTokens: chunk.usageMetadata.cachedContentTokenCount ?? 0
1769
1779
  };
1770
1780
  }
1771
1781
  yield chunk;