@posthog/ai 6.0.1 → 6.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -152,6 +152,8 @@ type GenerateContentResponse = {
152
152
  promptTokenCount?: number;
153
153
  candidatesTokenCount?: number;
154
154
  totalTokenCount?: number;
155
+ thoughtsTokenCount?: number;
156
+ cachedContentTokenCount?: number;
155
157
  };
156
158
  [key: string]: any;
157
159
  };
@@ -176,6 +178,7 @@ declare class WrappedModels {
176
178
  generateContent(params: GenerateContentRequest & MonitoringParams): Promise<GenerateContentResponse>;
177
179
  generateContentStream(params: GenerateContentRequest & MonitoringParams): AsyncGenerator<any, void, unknown>;
178
180
  private formatInput;
181
+ private formatInputForPostHog;
179
182
  }
180
183
 
181
184
  declare class LangChainCallbackHandler extends BaseCallbackHandler {
package/dist/index.mjs CHANGED
@@ -350,6 +350,204 @@ const sendEventToPosthog = async ({
350
350
  }
351
351
  };
352
352
 
353
+ // Type guards for safer type checking
354
+ const isString = value => {
355
+ return typeof value === 'string';
356
+ };
357
+ const isObject = value => {
358
+ return value !== null && typeof value === 'object' && !Array.isArray(value);
359
+ };
360
+
361
+ const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
362
+ // ============================================
363
+ // Base64 Detection Helpers
364
+ // ============================================
365
+ const isBase64DataUrl = str => {
366
+ return /^data:([^;]+);base64,/.test(str);
367
+ };
368
+ const isValidUrl = str => {
369
+ try {
370
+ new URL(str);
371
+ return true;
372
+ } catch {
373
+ // Not an absolute URL, check if it's a relative URL or path
374
+ return str.startsWith('/') || str.startsWith('./') || str.startsWith('../');
375
+ }
376
+ };
377
+ const isRawBase64 = str => {
378
+ // Skip if it's a valid URL or path
379
+ if (isValidUrl(str)) {
380
+ return false;
381
+ }
382
+ // Check if it's a valid base64 string
383
+ // Base64 images are typically at least a few hundred chars, but we'll be conservative
384
+ return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
385
+ };
386
+ function redactBase64DataUrl(str) {
387
+ if (!isString(str)) return str;
388
+ // Check for data URL format
389
+ if (isBase64DataUrl(str)) {
390
+ return REDACTED_IMAGE_PLACEHOLDER;
391
+ }
392
+ // Check for raw base64 (Vercel sends raw base64 for inline images)
393
+ if (isRawBase64(str)) {
394
+ return REDACTED_IMAGE_PLACEHOLDER;
395
+ }
396
+ return str;
397
+ }
398
+ const processMessages = (messages, transformContent) => {
399
+ if (!messages) return messages;
400
+ const processContent = content => {
401
+ if (typeof content === 'string') return content;
402
+ if (!content) return content;
403
+ if (Array.isArray(content)) {
404
+ return content.map(transformContent);
405
+ }
406
+ // Handle single object content
407
+ return transformContent(content);
408
+ };
409
+ const processMessage = msg => {
410
+ if (!isObject(msg) || !('content' in msg)) return msg;
411
+ return {
412
+ ...msg,
413
+ content: processContent(msg.content)
414
+ };
415
+ };
416
+ // Handle both arrays and single messages
417
+ if (Array.isArray(messages)) {
418
+ return messages.map(processMessage);
419
+ }
420
+ return processMessage(messages);
421
+ };
422
+ // ============================================
423
+ // Provider-Specific Image Sanitizers
424
+ // ============================================
425
+ const sanitizeOpenAIImage = item => {
426
+ if (!isObject(item)) return item;
427
+ // Handle image_url format
428
+ if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
429
+ return {
430
+ ...item,
431
+ image_url: {
432
+ ...item.image_url,
433
+ url: redactBase64DataUrl(item.image_url.url)
434
+ }
435
+ };
436
+ }
437
+ return item;
438
+ };
439
+ const sanitizeOpenAIResponseImage = item => {
440
+ if (!isObject(item)) return item;
441
+ // Handle input_image format
442
+ if (item.type === 'input_image' && 'image_url' in item) {
443
+ return {
444
+ ...item,
445
+ image_url: redactBase64DataUrl(item.image_url)
446
+ };
447
+ }
448
+ return item;
449
+ };
450
+ const sanitizeAnthropicImage = item => {
451
+ if (!isObject(item)) return item;
452
+ // Handle Anthropic's image format
453
+ if (item.type === 'image' && 'source' in item && isObject(item.source) && item.source.type === 'base64' && 'data' in item.source) {
454
+ return {
455
+ ...item,
456
+ source: {
457
+ ...item.source,
458
+ data: REDACTED_IMAGE_PLACEHOLDER
459
+ }
460
+ };
461
+ }
462
+ return item;
463
+ };
464
+ const sanitizeGeminiPart = part => {
465
+ if (!isObject(part)) return part;
466
+ // Handle Gemini's inline data format
467
+ if ('inlineData' in part && isObject(part.inlineData) && 'data' in part.inlineData) {
468
+ return {
469
+ ...part,
470
+ inlineData: {
471
+ ...part.inlineData,
472
+ data: REDACTED_IMAGE_PLACEHOLDER
473
+ }
474
+ };
475
+ }
476
+ return part;
477
+ };
478
+ const processGeminiItem = item => {
479
+ if (!isObject(item)) return item;
480
+ // If it has parts, process them
481
+ if ('parts' in item && item.parts) {
482
+ const parts = Array.isArray(item.parts) ? item.parts.map(sanitizeGeminiPart) : sanitizeGeminiPart(item.parts);
483
+ return {
484
+ ...item,
485
+ parts
486
+ };
487
+ }
488
+ return item;
489
+ };
490
+ const sanitizeLangChainImage = item => {
491
+ if (!isObject(item)) return item;
492
+ // OpenAI style
493
+ if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
494
+ return {
495
+ ...item,
496
+ image_url: {
497
+ ...item.image_url,
498
+ url: redactBase64DataUrl(item.image_url.url)
499
+ }
500
+ };
501
+ }
502
+ // Direct image with data field
503
+ if (item.type === 'image' && 'data' in item) {
504
+ return {
505
+ ...item,
506
+ data: redactBase64DataUrl(item.data)
507
+ };
508
+ }
509
+ // Anthropic style
510
+ if (item.type === 'image' && 'source' in item && isObject(item.source) && 'data' in item.source) {
511
+ return {
512
+ ...item,
513
+ source: {
514
+ ...item.source,
515
+ data: redactBase64DataUrl(item.source.data)
516
+ }
517
+ };
518
+ }
519
+ // Google style
520
+ if (item.type === 'media' && 'data' in item) {
521
+ return {
522
+ ...item,
523
+ data: redactBase64DataUrl(item.data)
524
+ };
525
+ }
526
+ return item;
527
+ };
528
+ // Export individual sanitizers for tree-shaking
529
+ const sanitizeOpenAI = data => {
530
+ return processMessages(data, sanitizeOpenAIImage);
531
+ };
532
+ const sanitizeOpenAIResponse = data => {
533
+ return processMessages(data, sanitizeOpenAIResponseImage);
534
+ };
535
+ const sanitizeAnthropic = data => {
536
+ return processMessages(data, sanitizeAnthropicImage);
537
+ };
538
+ const sanitizeGemini = data => {
539
+ // Gemini has a different structure with 'parts' directly on items instead of 'content'
540
+ // So we need custom processing instead of using processMessages
541
+ if (!data) return data;
542
+ if (Array.isArray(data)) {
543
+ return data.map(processGeminiItem);
544
+ }
545
+ return processGeminiItem(data);
546
+ };
547
+ const sanitizeLangChain = data => {
548
+ return processMessages(data, sanitizeLangChainImage);
549
+ };
550
+
353
551
  const Chat = OpenAI.Chat;
354
552
  const Completions = Chat.Completions;
355
553
  const Responses = OpenAI.Responses;
@@ -422,7 +620,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
422
620
  traceId,
423
621
  model: openAIParams.model,
424
622
  provider: 'openai',
425
- input: openAIParams.messages,
623
+ input: sanitizeOpenAI(openAIParams.messages),
426
624
  output: [{
427
625
  content: accumulatedContent,
428
626
  role: 'assistant'
@@ -442,7 +640,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
442
640
  traceId,
443
641
  model: openAIParams.model,
444
642
  provider: 'openai',
445
- input: openAIParams.messages,
643
+ input: sanitizeOpenAI(openAIParams.messages),
446
644
  output: [],
447
645
  latency: 0,
448
646
  baseURL: this.baseURL ?? '',
@@ -474,7 +672,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
474
672
  traceId,
475
673
  model: openAIParams.model,
476
674
  provider: 'openai',
477
- input: openAIParams.messages,
675
+ input: sanitizeOpenAI(openAIParams.messages),
478
676
  output: formatResponseOpenAI(result),
479
677
  latency,
480
678
  baseURL: this.baseURL ?? '',
@@ -498,7 +696,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
498
696
  traceId,
499
697
  model: openAIParams.model,
500
698
  provider: 'openai',
501
- input: openAIParams.messages,
699
+ input: sanitizeOpenAI(openAIParams.messages),
502
700
  output: [],
503
701
  latency: 0,
504
702
  baseURL: this.baseURL ?? '',
@@ -571,7 +769,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
571
769
  //@ts-expect-error
572
770
  model: openAIParams.model,
573
771
  provider: 'openai',
574
- input: openAIParams.input,
772
+ input: sanitizeOpenAIResponse(openAIParams.input),
575
773
  output: finalContent,
576
774
  latency,
577
775
  baseURL: this.baseURL ?? '',
@@ -589,7 +787,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
589
787
  //@ts-expect-error
590
788
  model: openAIParams.model,
591
789
  provider: 'openai',
592
- input: openAIParams.input,
790
+ input: sanitizeOpenAIResponse(openAIParams.input),
593
791
  output: [],
594
792
  latency: 0,
595
793
  baseURL: this.baseURL ?? '',
@@ -621,7 +819,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
621
819
  //@ts-expect-error
622
820
  model: openAIParams.model,
623
821
  provider: 'openai',
624
- input: openAIParams.input,
822
+ input: sanitizeOpenAIResponse(openAIParams.input),
625
823
  output: formatResponseOpenAI({
626
824
  output: result.output
627
825
  }),
@@ -648,7 +846,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
648
846
  //@ts-expect-error
649
847
  model: openAIParams.model,
650
848
  provider: 'openai',
651
- input: openAIParams.input,
849
+ input: sanitizeOpenAIResponse(openAIParams.input),
652
850
  output: [],
653
851
  latency: 0,
654
852
  baseURL: this.baseURL ?? '',
@@ -696,7 +894,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
696
894
  //@ts-expect-error
697
895
  model: openAIParams.model,
698
896
  provider: 'openai',
699
- input: openAIParams.input,
897
+ input: sanitizeOpenAIResponse(openAIParams.input),
700
898
  output: result.output,
701
899
  latency,
702
900
  baseURL: this.baseURL ?? '',
@@ -719,7 +917,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
719
917
  //@ts-expect-error
720
918
  model: openAIParams.model,
721
919
  provider: 'openai',
722
- input: openAIParams.input,
920
+ input: sanitizeOpenAIResponse(openAIParams.input),
723
921
  output: [],
724
922
  latency: 0,
725
923
  baseURL: this.baseURL ?? '',
@@ -1143,9 +1341,20 @@ const mapVercelPrompt = messages => {
1143
1341
  text: truncate(c.text)
1144
1342
  };
1145
1343
  } else if (c.type === 'file') {
1344
+ // For file type, check if it's a data URL and redact if needed
1345
+ let fileData;
1346
+ const contentData = c.data;
1347
+ if (contentData instanceof URL) {
1348
+ fileData = contentData.toString();
1349
+ } else if (isString(contentData)) {
1350
+ // Redact base64 data URLs and raw base64 to prevent oversized events
1351
+ fileData = redactBase64DataUrl(contentData);
1352
+ } else {
1353
+ fileData = 'raw files not supported';
1354
+ }
1146
1355
  return {
1147
1356
  type: 'file',
1148
- file: c.data instanceof URL ? c.data.toString() : 'raw files not supported',
1357
+ file: fileData,
1149
1358
  mediaType: c.mediaType
1150
1359
  };
1151
1360
  } else if (c.type === 'reasoning') {
@@ -1244,11 +1453,10 @@ const mapVercelOutput = result => {
1244
1453
  if (item.data instanceof URL) {
1245
1454
  fileData = item.data.toString();
1246
1455
  } else if (typeof item.data === 'string') {
1247
- // Check if it's base64 data and potentially large
1248
- if (item.data.startsWith('data:') || item.data.length > 1000) {
1456
+ fileData = redactBase64DataUrl(item.data);
1457
+ // If not redacted and still large, replace with size indicator
1458
+ if (fileData === item.data && item.data.length > 1000) {
1249
1459
  fileData = `[${item.mediaType} file - ${item.data.length} bytes]`;
1250
- } else {
1251
- fileData = item.data;
1252
1460
  }
1253
1461
  } else {
1254
1462
  fileData = `[binary ${item.mediaType} file]`;
@@ -1319,17 +1527,17 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1319
1527
  const latency = (Date.now() - startTime) / 1000;
1320
1528
  const providerMetadata = result.providerMetadata;
1321
1529
  const additionalTokenValues = {
1322
- ...(providerMetadata?.openai?.reasoningTokens ? {
1323
- reasoningTokens: providerMetadata.openai.reasoningTokens
1324
- } : {}),
1325
- ...(providerMetadata?.openai?.cachedPromptTokens ? {
1326
- cacheReadInputTokens: providerMetadata.openai.cachedPromptTokens
1327
- } : {}),
1328
1530
  ...(providerMetadata?.anthropic ? {
1329
- cacheReadInputTokens: providerMetadata.anthropic.cacheReadInputTokens,
1330
1531
  cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
1331
1532
  } : {})
1332
1533
  };
1534
+ const usage = {
1535
+ inputTokens: result.usage.inputTokens,
1536
+ outputTokens: result.usage.outputTokens,
1537
+ reasoningTokens: result.usage.reasoningTokens,
1538
+ cacheReadInputTokens: result.usage.cachedInputTokens,
1539
+ ...additionalTokenValues
1540
+ };
1333
1541
  await sendEventToPosthog({
1334
1542
  client: phClient,
1335
1543
  distinctId: options.posthogDistinctId,
@@ -1342,11 +1550,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1342
1550
  baseURL,
1343
1551
  params: mergedParams,
1344
1552
  httpStatus: 200,
1345
- usage: {
1346
- inputTokens: result.usage.inputTokens,
1347
- outputTokens: result.usage.outputTokens,
1348
- ...additionalTokenValues
1349
- },
1553
+ usage,
1350
1554
  tools: availableTools,
1351
1555
  captureImmediate: options.posthogCaptureImmediate
1352
1556
  });
@@ -1408,22 +1612,19 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1408
1612
  reasoningText += chunk.delta; // New in v5
1409
1613
  }
1410
1614
  if (chunk.type === 'finish') {
1615
+ const providerMetadata = chunk.providerMetadata;
1616
+ const additionalTokenValues = {
1617
+ ...(providerMetadata?.anthropic ? {
1618
+ cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
1619
+ } : {})
1620
+ };
1411
1621
  usage = {
1412
1622
  inputTokens: chunk.usage?.inputTokens,
1413
- outputTokens: chunk.usage?.outputTokens
1623
+ outputTokens: chunk.usage?.outputTokens,
1624
+ reasoningTokens: chunk.usage?.reasoningTokens,
1625
+ cacheReadInputTokens: chunk.usage?.cachedInputTokens,
1626
+ ...additionalTokenValues
1414
1627
  };
1415
- if (chunk.providerMetadata?.openai?.reasoningTokens) {
1416
- usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens;
1417
- }
1418
- if (chunk.providerMetadata?.openai?.cachedPromptTokens) {
1419
- usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptTokens;
1420
- }
1421
- if (chunk.providerMetadata?.anthropic?.cacheReadInputTokens) {
1422
- usage.cacheReadInputTokens = chunk.providerMetadata.anthropic.cacheReadInputTokens;
1423
- }
1424
- if (chunk.providerMetadata?.anthropic?.cacheCreationInputTokens) {
1425
- usage.cacheCreationInputTokens = chunk.providerMetadata.anthropic.cacheCreationInputTokens;
1426
- }
1427
1628
  }
1428
1629
  controller.enqueue(chunk);
1429
1630
  },
@@ -1579,7 +1780,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1579
1780
  traceId,
1580
1781
  model: anthropicParams.model,
1581
1782
  provider: 'anthropic',
1582
- input: mergeSystemPrompt(anthropicParams, 'anthropic'),
1783
+ input: sanitizeAnthropic(mergeSystemPrompt(anthropicParams, 'anthropic')),
1583
1784
  output: [{
1584
1785
  content: accumulatedContent,
1585
1786
  role: 'assistant'
@@ -1600,7 +1801,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1600
1801
  traceId,
1601
1802
  model: anthropicParams.model,
1602
1803
  provider: 'anthropic',
1603
- input: mergeSystemPrompt(anthropicParams),
1804
+ input: sanitizeAnthropic(mergeSystemPrompt(anthropicParams)),
1604
1805
  output: [],
1605
1806
  latency: 0,
1606
1807
  baseURL: this.baseURL ?? '',
@@ -1632,7 +1833,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1632
1833
  traceId,
1633
1834
  model: anthropicParams.model,
1634
1835
  provider: 'anthropic',
1635
- input: mergeSystemPrompt(anthropicParams),
1836
+ input: sanitizeAnthropic(mergeSystemPrompt(anthropicParams)),
1636
1837
  output: formatResponseAnthropic(result),
1637
1838
  latency,
1638
1839
  baseURL: this.baseURL ?? '',
@@ -1656,7 +1857,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1656
1857
  traceId,
1657
1858
  model: anthropicParams.model,
1658
1859
  provider: 'anthropic',
1659
- input: mergeSystemPrompt(anthropicParams),
1860
+ input: sanitizeAnthropic(mergeSystemPrompt(anthropicParams)),
1660
1861
  output: [],
1661
1862
  latency: 0,
1662
1863
  baseURL: this.baseURL ?? '',
@@ -1714,7 +1915,7 @@ class WrappedModels {
1714
1915
  traceId,
1715
1916
  model: geminiParams.model,
1716
1917
  provider: 'gemini',
1717
- input: this.formatInput(geminiParams.contents),
1918
+ input: this.formatInputForPostHog(geminiParams.contents),
1718
1919
  output: formatResponseGemini(response),
1719
1920
  latency,
1720
1921
  baseURL: 'https://generativelanguage.googleapis.com',
@@ -1722,7 +1923,9 @@ class WrappedModels {
1722
1923
  httpStatus: 200,
1723
1924
  usage: {
1724
1925
  inputTokens: response.usageMetadata?.promptTokenCount ?? 0,
1725
- outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0
1926
+ outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0,
1927
+ reasoningTokens: response.usageMetadata?.thoughtsTokenCount ?? 0,
1928
+ cacheReadInputTokens: response.usageMetadata?.cachedContentTokenCount ?? 0
1726
1929
  },
1727
1930
  tools: availableTools,
1728
1931
  captureImmediate: posthogCaptureImmediate
@@ -1736,7 +1939,7 @@ class WrappedModels {
1736
1939
  traceId,
1737
1940
  model: geminiParams.model,
1738
1941
  provider: 'gemini',
1739
- input: this.formatInput(geminiParams.contents),
1942
+ input: this.formatInputForPostHog(geminiParams.contents),
1740
1943
  output: [],
1741
1944
  latency,
1742
1945
  baseURL: 'https://generativelanguage.googleapis.com',
@@ -1778,7 +1981,9 @@ class WrappedModels {
1778
1981
  if (chunk.usageMetadata) {
1779
1982
  usage = {
1780
1983
  inputTokens: chunk.usageMetadata.promptTokenCount ?? 0,
1781
- outputTokens: chunk.usageMetadata.candidatesTokenCount ?? 0
1984
+ outputTokens: chunk.usageMetadata.candidatesTokenCount ?? 0,
1985
+ reasoningTokens: chunk.usageMetadata.thoughtsTokenCount ?? 0,
1986
+ cacheReadInputTokens: chunk.usageMetadata.cachedContentTokenCount ?? 0
1782
1987
  };
1783
1988
  }
1784
1989
  yield chunk;
@@ -1791,7 +1996,7 @@ class WrappedModels {
1791
1996
  traceId,
1792
1997
  model: geminiParams.model,
1793
1998
  provider: 'gemini',
1794
- input: this.formatInput(geminiParams.contents),
1999
+ input: this.formatInputForPostHog(geminiParams.contents),
1795
2000
  output: [{
1796
2001
  content: accumulatedContent,
1797
2002
  role: 'assistant'
@@ -1812,7 +2017,7 @@ class WrappedModels {
1812
2017
  traceId,
1813
2018
  model: geminiParams.model,
1814
2019
  provider: 'gemini',
1815
- input: this.formatInput(geminiParams.contents),
2020
+ input: this.formatInputForPostHog(geminiParams.contents),
1816
2021
  output: [],
1817
2022
  latency,
1818
2023
  baseURL: 'https://generativelanguage.googleapis.com',
@@ -1857,6 +2062,12 @@ class WrappedModels {
1857
2062
  content: item.content
1858
2063
  };
1859
2064
  }
2065
+ if (item.parts) {
2066
+ return {
2067
+ role: item.role || 'user',
2068
+ content: item.parts.map(part => part.text ? part.text : part)
2069
+ };
2070
+ }
1860
2071
  }
1861
2072
  return {
1862
2073
  role: 'user',
@@ -1883,6 +2094,10 @@ class WrappedModels {
1883
2094
  content: String(contents)
1884
2095
  }];
1885
2096
  }
2097
+ formatInputForPostHog(contents) {
2098
+ const sanitized = sanitizeGemini(contents);
2099
+ return this.formatInput(sanitized);
2100
+ }
1886
2101
  }
1887
2102
 
1888
2103
  function getDefaultExportFromCjs (x) {
@@ -2574,7 +2789,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2574
2789
  }) || 'generation';
2575
2790
  const generation = {
2576
2791
  name: runNameFound,
2577
- input: messages,
2792
+ input: sanitizeLangChain(messages),
2578
2793
  startTime: Date.now()
2579
2794
  };
2580
2795
  if (extraParams) {
@@ -2837,7 +3052,8 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2837
3052
  ...message.additional_kwargs
2838
3053
  };
2839
3054
  }
2840
- return messageDict;
3055
+ // Sanitize the message content to redact base64 images
3056
+ return sanitizeLangChain(messageDict);
2841
3057
  }
2842
3058
  _parseUsageModel(usage) {
2843
3059
  const conversionList = [['promptTokens', 'input'], ['completionTokens', 'output'], ['input_tokens', 'input'], ['output_tokens', 'output'], ['prompt_token_count', 'input'], ['candidates_token_count', 'output'], ['inputTokenCount', 'input'], ['outputTokenCount', 'output'], ['input_token_count', 'input'], ['generated_token_count', 'output']];