@posthog/ai 6.1.0 → 6.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -258,9 +258,8 @@ const extractAvailableToolCalls = (provider, params) => {
258
258
  }
259
259
  return null;
260
260
  } else if (provider === 'vercel') {
261
- // Vercel AI SDK stores tools in params.mode.tools when mode type is 'regular'
262
- if (params.mode?.type === 'regular' && params.mode.tools) {
263
- return params.mode.tools;
261
+ if (params.tools) {
262
+ return params.tools;
264
263
  }
265
264
  return null;
266
265
  }
@@ -370,6 +369,204 @@ const sendEventToPosthog = async ({
370
369
  }
371
370
  };
372
371
 
372
+ // Type guards for safer type checking
373
+ const isString = value => {
374
+ return typeof value === 'string';
375
+ };
376
+ const isObject = value => {
377
+ return value !== null && typeof value === 'object' && !Array.isArray(value);
378
+ };
379
+
380
+ const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
381
+ // ============================================
382
+ // Base64 Detection Helpers
383
+ // ============================================
384
+ const isBase64DataUrl = str => {
385
+ return /^data:([^;]+);base64,/.test(str);
386
+ };
387
+ const isValidUrl = str => {
388
+ try {
389
+ new URL(str);
390
+ return true;
391
+ } catch {
392
+ // Not an absolute URL, check if it's a relative URL or path
393
+ return str.startsWith('/') || str.startsWith('./') || str.startsWith('../');
394
+ }
395
+ };
396
+ const isRawBase64 = str => {
397
+ // Skip if it's a valid URL or path
398
+ if (isValidUrl(str)) {
399
+ return false;
400
+ }
401
+ // Check if it's a valid base64 string
402
+ // Base64 images are typically at least a few hundred chars, but we'll be conservative
403
+ return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
404
+ };
405
+ function redactBase64DataUrl(str) {
406
+ if (!isString(str)) return str;
407
+ // Check for data URL format
408
+ if (isBase64DataUrl(str)) {
409
+ return REDACTED_IMAGE_PLACEHOLDER;
410
+ }
411
+ // Check for raw base64 (Vercel sends raw base64 for inline images)
412
+ if (isRawBase64(str)) {
413
+ return REDACTED_IMAGE_PLACEHOLDER;
414
+ }
415
+ return str;
416
+ }
417
+ const processMessages = (messages, transformContent) => {
418
+ if (!messages) return messages;
419
+ const processContent = content => {
420
+ if (typeof content === 'string') return content;
421
+ if (!content) return content;
422
+ if (Array.isArray(content)) {
423
+ return content.map(transformContent);
424
+ }
425
+ // Handle single object content
426
+ return transformContent(content);
427
+ };
428
+ const processMessage = msg => {
429
+ if (!isObject(msg) || !('content' in msg)) return msg;
430
+ return {
431
+ ...msg,
432
+ content: processContent(msg.content)
433
+ };
434
+ };
435
+ // Handle both arrays and single messages
436
+ if (Array.isArray(messages)) {
437
+ return messages.map(processMessage);
438
+ }
439
+ return processMessage(messages);
440
+ };
441
+ // ============================================
442
+ // Provider-Specific Image Sanitizers
443
+ // ============================================
444
+ const sanitizeOpenAIImage = item => {
445
+ if (!isObject(item)) return item;
446
+ // Handle image_url format
447
+ if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
448
+ return {
449
+ ...item,
450
+ image_url: {
451
+ ...item.image_url,
452
+ url: redactBase64DataUrl(item.image_url.url)
453
+ }
454
+ };
455
+ }
456
+ return item;
457
+ };
458
+ const sanitizeOpenAIResponseImage = item => {
459
+ if (!isObject(item)) return item;
460
+ // Handle input_image format
461
+ if (item.type === 'input_image' && 'image_url' in item) {
462
+ return {
463
+ ...item,
464
+ image_url: redactBase64DataUrl(item.image_url)
465
+ };
466
+ }
467
+ return item;
468
+ };
469
+ const sanitizeAnthropicImage = item => {
470
+ if (!isObject(item)) return item;
471
+ // Handle Anthropic's image format
472
+ if (item.type === 'image' && 'source' in item && isObject(item.source) && item.source.type === 'base64' && 'data' in item.source) {
473
+ return {
474
+ ...item,
475
+ source: {
476
+ ...item.source,
477
+ data: REDACTED_IMAGE_PLACEHOLDER
478
+ }
479
+ };
480
+ }
481
+ return item;
482
+ };
483
+ const sanitizeGeminiPart = part => {
484
+ if (!isObject(part)) return part;
485
+ // Handle Gemini's inline data format
486
+ if ('inlineData' in part && isObject(part.inlineData) && 'data' in part.inlineData) {
487
+ return {
488
+ ...part,
489
+ inlineData: {
490
+ ...part.inlineData,
491
+ data: REDACTED_IMAGE_PLACEHOLDER
492
+ }
493
+ };
494
+ }
495
+ return part;
496
+ };
497
+ const processGeminiItem = item => {
498
+ if (!isObject(item)) return item;
499
+ // If it has parts, process them
500
+ if ('parts' in item && item.parts) {
501
+ const parts = Array.isArray(item.parts) ? item.parts.map(sanitizeGeminiPart) : sanitizeGeminiPart(item.parts);
502
+ return {
503
+ ...item,
504
+ parts
505
+ };
506
+ }
507
+ return item;
508
+ };
509
+ const sanitizeLangChainImage = item => {
510
+ if (!isObject(item)) return item;
511
+ // OpenAI style
512
+ if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
513
+ return {
514
+ ...item,
515
+ image_url: {
516
+ ...item.image_url,
517
+ url: redactBase64DataUrl(item.image_url.url)
518
+ }
519
+ };
520
+ }
521
+ // Direct image with data field
522
+ if (item.type === 'image' && 'data' in item) {
523
+ return {
524
+ ...item,
525
+ data: redactBase64DataUrl(item.data)
526
+ };
527
+ }
528
+ // Anthropic style
529
+ if (item.type === 'image' && 'source' in item && isObject(item.source) && 'data' in item.source) {
530
+ return {
531
+ ...item,
532
+ source: {
533
+ ...item.source,
534
+ data: redactBase64DataUrl(item.source.data)
535
+ }
536
+ };
537
+ }
538
+ // Google style
539
+ if (item.type === 'media' && 'data' in item) {
540
+ return {
541
+ ...item,
542
+ data: redactBase64DataUrl(item.data)
543
+ };
544
+ }
545
+ return item;
546
+ };
547
+ // Export individual sanitizers for tree-shaking
548
+ const sanitizeOpenAI = data => {
549
+ return processMessages(data, sanitizeOpenAIImage);
550
+ };
551
+ const sanitizeOpenAIResponse = data => {
552
+ return processMessages(data, sanitizeOpenAIResponseImage);
553
+ };
554
+ const sanitizeAnthropic = data => {
555
+ return processMessages(data, sanitizeAnthropicImage);
556
+ };
557
+ const sanitizeGemini = data => {
558
+ // Gemini has a different structure with 'parts' directly on items instead of 'content'
559
+ // So we need custom processing instead of using processMessages
560
+ if (!data) return data;
561
+ if (Array.isArray(data)) {
562
+ return data.map(processGeminiItem);
563
+ }
564
+ return processGeminiItem(data);
565
+ };
566
+ const sanitizeLangChain = data => {
567
+ return processMessages(data, sanitizeLangChainImage);
568
+ };
569
+
373
570
  const Chat = openai.OpenAI.Chat;
374
571
  const Completions = Chat.Completions;
375
572
  const Responses = openai.OpenAI.Responses;
@@ -417,14 +614,52 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
417
614
  const [stream1, stream2] = value.tee();
418
615
  (async () => {
419
616
  try {
617
+ const contentBlocks = [];
420
618
  let accumulatedContent = '';
421
619
  let usage = {
422
620
  inputTokens: 0,
423
621
  outputTokens: 0
424
622
  };
623
+ // Map to track in-progress tool calls
624
+ const toolCallsInProgress = new Map();
425
625
  for await (const chunk of stream1) {
426
- const delta = chunk?.choices?.[0]?.delta?.content ?? '';
427
- accumulatedContent += delta;
626
+ const choice = chunk?.choices?.[0];
627
+ // Handle text content
628
+ const deltaContent = choice?.delta?.content;
629
+ if (deltaContent) {
630
+ accumulatedContent += deltaContent;
631
+ }
632
+ // Handle tool calls
633
+ const deltaToolCalls = choice?.delta?.tool_calls;
634
+ if (deltaToolCalls && Array.isArray(deltaToolCalls)) {
635
+ for (const toolCall of deltaToolCalls) {
636
+ const index = toolCall.index;
637
+ if (index !== undefined) {
638
+ if (!toolCallsInProgress.has(index)) {
639
+ // New tool call
640
+ toolCallsInProgress.set(index, {
641
+ id: toolCall.id || '',
642
+ name: toolCall.function?.name || '',
643
+ arguments: ''
644
+ });
645
+ }
646
+ const inProgressCall = toolCallsInProgress.get(index);
647
+ if (inProgressCall) {
648
+ // Update tool call data
649
+ if (toolCall.id) {
650
+ inProgressCall.id = toolCall.id;
651
+ }
652
+ if (toolCall.function?.name) {
653
+ inProgressCall.name = toolCall.function.name;
654
+ }
655
+ if (toolCall.function?.arguments) {
656
+ inProgressCall.arguments += toolCall.function.arguments;
657
+ }
658
+ }
659
+ }
660
+ }
661
+ }
662
+ // Handle usage information
428
663
  if (chunk.usage) {
429
664
  usage = {
430
665
  inputTokens: chunk.usage.prompt_tokens ?? 0,
@@ -434,6 +669,37 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
434
669
  };
435
670
  }
436
671
  }
672
+ // Build final content blocks
673
+ if (accumulatedContent) {
674
+ contentBlocks.push({
675
+ type: 'text',
676
+ text: accumulatedContent
677
+ });
678
+ }
679
+ // Add completed tool calls to content blocks
680
+ for (const toolCall of toolCallsInProgress.values()) {
681
+ if (toolCall.name) {
682
+ contentBlocks.push({
683
+ type: 'function',
684
+ id: toolCall.id,
685
+ function: {
686
+ name: toolCall.name,
687
+ arguments: toolCall.arguments
688
+ }
689
+ });
690
+ }
691
+ }
692
+ // Format output to match non-streaming version
693
+ const formattedOutput = contentBlocks.length > 0 ? [{
694
+ role: 'assistant',
695
+ content: contentBlocks
696
+ }] : [{
697
+ role: 'assistant',
698
+ content: [{
699
+ type: 'text',
700
+ text: ''
701
+ }]
702
+ }];
437
703
  const latency = (Date.now() - startTime) / 1000;
438
704
  const availableTools = extractAvailableToolCalls('openai', openAIParams);
439
705
  await sendEventToPosthog({
@@ -442,11 +708,8 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
442
708
  traceId,
443
709
  model: openAIParams.model,
444
710
  provider: 'openai',
445
- input: openAIParams.messages,
446
- output: [{
447
- content: accumulatedContent,
448
- role: 'assistant'
449
- }],
711
+ input: sanitizeOpenAI(openAIParams.messages),
712
+ output: formattedOutput,
450
713
  latency,
451
714
  baseURL: this.baseURL ?? '',
452
715
  params: body,
@@ -456,18 +719,19 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
456
719
  captureImmediate: posthogCaptureImmediate
457
720
  });
458
721
  } catch (error) {
722
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
459
723
  await sendEventToPosthog({
460
724
  client: this.phClient,
461
725
  distinctId: posthogDistinctId,
462
726
  traceId,
463
727
  model: openAIParams.model,
464
728
  provider: 'openai',
465
- input: openAIParams.messages,
729
+ input: sanitizeOpenAI(openAIParams.messages),
466
730
  output: [],
467
731
  latency: 0,
468
732
  baseURL: this.baseURL ?? '',
469
733
  params: body,
470
- httpStatus: error?.status ? error.status : 500,
734
+ httpStatus,
471
735
  usage: {
472
736
  inputTokens: 0,
473
737
  outputTokens: 0
@@ -494,7 +758,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
494
758
  traceId,
495
759
  model: openAIParams.model,
496
760
  provider: 'openai',
497
- input: openAIParams.messages,
761
+ input: sanitizeOpenAI(openAIParams.messages),
498
762
  output: formatResponseOpenAI(result),
499
763
  latency,
500
764
  baseURL: this.baseURL ?? '',
@@ -512,18 +776,19 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
512
776
  }
513
777
  return result;
514
778
  }, async error => {
779
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
515
780
  await sendEventToPosthog({
516
781
  client: this.phClient,
517
782
  distinctId: posthogDistinctId,
518
783
  traceId,
519
784
  model: openAIParams.model,
520
785
  provider: 'openai',
521
- input: openAIParams.messages,
786
+ input: sanitizeOpenAI(openAIParams.messages),
522
787
  output: [],
523
788
  latency: 0,
524
789
  baseURL: this.baseURL ?? '',
525
790
  params: body,
526
- httpStatus: error?.status ? error.status : 500,
791
+ httpStatus,
527
792
  usage: {
528
793
  inputTokens: 0,
529
794
  outputTokens: 0
@@ -591,7 +856,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
591
856
  //@ts-expect-error
592
857
  model: openAIParams.model,
593
858
  provider: 'openai',
594
- input: openAIParams.input,
859
+ input: sanitizeOpenAIResponse(openAIParams.input),
595
860
  output: finalContent,
596
861
  latency,
597
862
  baseURL: this.baseURL ?? '',
@@ -602,6 +867,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
602
867
  captureImmediate: posthogCaptureImmediate
603
868
  });
604
869
  } catch (error) {
870
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
605
871
  await sendEventToPosthog({
606
872
  client: this.phClient,
607
873
  distinctId: posthogDistinctId,
@@ -609,12 +875,12 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
609
875
  //@ts-expect-error
610
876
  model: openAIParams.model,
611
877
  provider: 'openai',
612
- input: openAIParams.input,
878
+ input: sanitizeOpenAIResponse(openAIParams.input),
613
879
  output: [],
614
880
  latency: 0,
615
881
  baseURL: this.baseURL ?? '',
616
882
  params: body,
617
- httpStatus: error?.status ? error.status : 500,
883
+ httpStatus,
618
884
  usage: {
619
885
  inputTokens: 0,
620
886
  outputTokens: 0
@@ -641,7 +907,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
641
907
  //@ts-expect-error
642
908
  model: openAIParams.model,
643
909
  provider: 'openai',
644
- input: openAIParams.input,
910
+ input: sanitizeOpenAIResponse(openAIParams.input),
645
911
  output: formatResponseOpenAI({
646
912
  output: result.output
647
913
  }),
@@ -661,6 +927,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
661
927
  }
662
928
  return result;
663
929
  }, async error => {
930
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
664
931
  await sendEventToPosthog({
665
932
  client: this.phClient,
666
933
  distinctId: posthogDistinctId,
@@ -668,12 +935,12 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
668
935
  //@ts-expect-error
669
936
  model: openAIParams.model,
670
937
  provider: 'openai',
671
- input: openAIParams.input,
938
+ input: sanitizeOpenAIResponse(openAIParams.input),
672
939
  output: [],
673
940
  latency: 0,
674
941
  baseURL: this.baseURL ?? '',
675
942
  params: body,
676
- httpStatus: error?.status ? error.status : 500,
943
+ httpStatus,
677
944
  usage: {
678
945
  inputTokens: 0,
679
946
  outputTokens: 0
@@ -716,7 +983,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
716
983
  //@ts-expect-error
717
984
  model: openAIParams.model,
718
985
  provider: 'openai',
719
- input: openAIParams.input,
986
+ input: sanitizeOpenAIResponse(openAIParams.input),
720
987
  output: result.output,
721
988
  latency,
722
989
  baseURL: this.baseURL ?? '',
@@ -732,6 +999,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
732
999
  });
733
1000
  return result;
734
1001
  }, async error => {
1002
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
735
1003
  await sendEventToPosthog({
736
1004
  client: this.phClient,
737
1005
  distinctId: posthogDistinctId,
@@ -739,12 +1007,12 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
739
1007
  //@ts-expect-error
740
1008
  model: openAIParams.model,
741
1009
  provider: 'openai',
742
- input: openAIParams.input,
1010
+ input: sanitizeOpenAIResponse(openAIParams.input),
743
1011
  output: [],
744
1012
  latency: 0,
745
1013
  baseURL: this.baseURL ?? '',
746
1014
  params: body,
747
- httpStatus: error?.status ? error.status : 500,
1015
+ httpStatus,
748
1016
  usage: {
749
1017
  inputTokens: 0,
750
1018
  outputTokens: 0
@@ -806,14 +1074,52 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
806
1074
  const [stream1, stream2] = value.tee();
807
1075
  (async () => {
808
1076
  try {
1077
+ const contentBlocks = [];
809
1078
  let accumulatedContent = '';
810
1079
  let usage = {
811
1080
  inputTokens: 0,
812
1081
  outputTokens: 0
813
1082
  };
1083
+ // Map to track in-progress tool calls
1084
+ const toolCallsInProgress = new Map();
814
1085
  for await (const chunk of stream1) {
815
- const delta = chunk?.choices?.[0]?.delta?.content ?? '';
816
- accumulatedContent += delta;
1086
+ const choice = chunk?.choices?.[0];
1087
+ // Handle text content
1088
+ const deltaContent = choice?.delta?.content;
1089
+ if (deltaContent) {
1090
+ accumulatedContent += deltaContent;
1091
+ }
1092
+ // Handle tool calls
1093
+ const deltaToolCalls = choice?.delta?.tool_calls;
1094
+ if (deltaToolCalls && Array.isArray(deltaToolCalls)) {
1095
+ for (const toolCall of deltaToolCalls) {
1096
+ const index = toolCall.index;
1097
+ if (index !== undefined) {
1098
+ if (!toolCallsInProgress.has(index)) {
1099
+ // New tool call
1100
+ toolCallsInProgress.set(index, {
1101
+ id: toolCall.id || '',
1102
+ name: toolCall.function?.name || '',
1103
+ arguments: ''
1104
+ });
1105
+ }
1106
+ const inProgressCall = toolCallsInProgress.get(index);
1107
+ if (inProgressCall) {
1108
+ // Update tool call data
1109
+ if (toolCall.id) {
1110
+ inProgressCall.id = toolCall.id;
1111
+ }
1112
+ if (toolCall.function?.name) {
1113
+ inProgressCall.name = toolCall.function.name;
1114
+ }
1115
+ if (toolCall.function?.arguments) {
1116
+ inProgressCall.arguments += toolCall.function.arguments;
1117
+ }
1118
+ }
1119
+ }
1120
+ }
1121
+ }
1122
+ // Handle usage information
817
1123
  if (chunk.usage) {
818
1124
  usage = {
819
1125
  inputTokens: chunk.usage.prompt_tokens ?? 0,
@@ -823,6 +1129,37 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
823
1129
  };
824
1130
  }
825
1131
  }
1132
+ // Build final content blocks
1133
+ if (accumulatedContent) {
1134
+ contentBlocks.push({
1135
+ type: 'text',
1136
+ text: accumulatedContent
1137
+ });
1138
+ }
1139
+ // Add completed tool calls to content blocks
1140
+ for (const toolCall of toolCallsInProgress.values()) {
1141
+ if (toolCall.name) {
1142
+ contentBlocks.push({
1143
+ type: 'function',
1144
+ id: toolCall.id,
1145
+ function: {
1146
+ name: toolCall.name,
1147
+ arguments: toolCall.arguments
1148
+ }
1149
+ });
1150
+ }
1151
+ }
1152
+ // Format output to match non-streaming version
1153
+ const formattedOutput = contentBlocks.length > 0 ? [{
1154
+ role: 'assistant',
1155
+ content: contentBlocks
1156
+ }] : [{
1157
+ role: 'assistant',
1158
+ content: [{
1159
+ type: 'text',
1160
+ text: ''
1161
+ }]
1162
+ }];
826
1163
  const latency = (Date.now() - startTime) / 1000;
827
1164
  await sendEventToPosthog({
828
1165
  client: this.phClient,
@@ -831,10 +1168,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
831
1168
  model: openAIParams.model,
832
1169
  provider: 'azure',
833
1170
  input: openAIParams.messages,
834
- output: [{
835
- content: accumulatedContent,
836
- role: 'assistant'
837
- }],
1171
+ output: formattedOutput,
838
1172
  latency,
839
1173
  baseURL: this.baseURL ?? '',
840
1174
  params: body,
@@ -843,6 +1177,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
843
1177
  captureImmediate: posthogCaptureImmediate
844
1178
  });
845
1179
  } catch (error) {
1180
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
846
1181
  await sendEventToPosthog({
847
1182
  client: this.phClient,
848
1183
  distinctId: posthogDistinctId,
@@ -854,7 +1189,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
854
1189
  latency: 0,
855
1190
  baseURL: this.baseURL ?? '',
856
1191
  params: body,
857
- httpStatus: error?.status ? error.status : 500,
1192
+ httpStatus,
858
1193
  usage: {
859
1194
  inputTokens: 0,
860
1195
  outputTokens: 0
@@ -897,6 +1232,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
897
1232
  }
898
1233
  return result;
899
1234
  }, async error => {
1235
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
900
1236
  await sendEventToPosthog({
901
1237
  client: this.phClient,
902
1238
  distinctId: posthogDistinctId,
@@ -908,7 +1244,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
908
1244
  latency: 0,
909
1245
  baseURL: this.baseURL ?? '',
910
1246
  params: body,
911
- httpStatus: error?.status ? error.status : 500,
1247
+ httpStatus,
912
1248
  usage: {
913
1249
  inputTokens: 0,
914
1250
  outputTokens: 0
@@ -985,6 +1321,7 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
985
1321
  captureImmediate: posthogCaptureImmediate
986
1322
  });
987
1323
  } catch (error) {
1324
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
988
1325
  await sendEventToPosthog({
989
1326
  client: this.phClient,
990
1327
  distinctId: posthogDistinctId,
@@ -997,7 +1334,7 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
997
1334
  latency: 0,
998
1335
  baseURL: this.baseURL ?? '',
999
1336
  params: body,
1000
- httpStatus: error?.status ? error.status : 500,
1337
+ httpStatus,
1001
1338
  usage: {
1002
1339
  inputTokens: 0,
1003
1340
  outputTokens: 0
@@ -1040,6 +1377,7 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
1040
1377
  }
1041
1378
  return result;
1042
1379
  }, async error => {
1380
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
1043
1381
  await sendEventToPosthog({
1044
1382
  client: this.phClient,
1045
1383
  distinctId: posthogDistinctId,
@@ -1052,7 +1390,7 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
1052
1390
  latency: 0,
1053
1391
  baseURL: this.baseURL ?? '',
1054
1392
  params: body,
1055
- httpStatus: error?.status ? error.status : 500,
1393
+ httpStatus,
1056
1394
  usage: {
1057
1395
  inputTokens: 0,
1058
1396
  outputTokens: 0
@@ -1163,9 +1501,20 @@ const mapVercelPrompt = messages => {
1163
1501
  text: truncate(c.text)
1164
1502
  };
1165
1503
  } else if (c.type === 'file') {
1504
+ // For file type, check if it's a data URL and redact if needed
1505
+ let fileData;
1506
+ const contentData = c.data;
1507
+ if (contentData instanceof URL) {
1508
+ fileData = contentData.toString();
1509
+ } else if (isString(contentData)) {
1510
+ // Redact base64 data URLs and raw base64 to prevent oversized events
1511
+ fileData = redactBase64DataUrl(contentData);
1512
+ } else {
1513
+ fileData = 'raw files not supported';
1514
+ }
1166
1515
  return {
1167
1516
  type: 'file',
1168
- file: c.data instanceof URL ? c.data.toString() : 'raw files not supported',
1517
+ file: fileData,
1169
1518
  mediaType: c.mediaType
1170
1519
  };
1171
1520
  } else if (c.type === 'reasoning') {
@@ -1264,11 +1613,10 @@ const mapVercelOutput = result => {
1264
1613
  if (item.data instanceof URL) {
1265
1614
  fileData = item.data.toString();
1266
1615
  } else if (typeof item.data === 'string') {
1267
- // Check if it's base64 data and potentially large
1268
- if (item.data.startsWith('data:') || item.data.length > 1000) {
1616
+ fileData = redactBase64DataUrl(item.data);
1617
+ // If not redacted and still large, replace with size indicator
1618
+ if (fileData === item.data && item.data.length > 1000) {
1269
1619
  fileData = `[${item.mediaType} file - ${item.data.length} bytes]`;
1270
- } else {
1271
- fileData = item.data;
1272
1620
  }
1273
1621
  } else {
1274
1622
  fileData = `[binary ${item.mediaType} file]`;
@@ -1409,6 +1757,8 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1409
1757
  const provider = options.posthogProviderOverride ?? extractProvider(model);
1410
1758
  const availableTools = extractAvailableToolCalls('vercel', params);
1411
1759
  const baseURL = ''; // cannot currently get baseURL from vercel
1760
+ // Map to track in-progress tool calls
1761
+ const toolCallsInProgress = new Map();
1412
1762
  try {
1413
1763
  const {
1414
1764
  stream,
@@ -1423,6 +1773,34 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1423
1773
  if (chunk.type === 'reasoning-delta') {
1424
1774
  reasoningText += chunk.delta; // New in v5
1425
1775
  }
1776
+ // Handle tool call chunks
1777
+ if (chunk.type === 'tool-input-start') {
1778
+ // Initialize a new tool call
1779
+ toolCallsInProgress.set(chunk.id, {
1780
+ toolCallId: chunk.id,
1781
+ toolName: chunk.toolName,
1782
+ input: ''
1783
+ });
1784
+ }
1785
+ if (chunk.type === 'tool-input-delta') {
1786
+ // Accumulate tool call arguments
1787
+ const toolCall = toolCallsInProgress.get(chunk.id);
1788
+ if (toolCall) {
1789
+ toolCall.input += chunk.delta;
1790
+ }
1791
+ }
1792
+ if (chunk.type === 'tool-input-end') {
1793
+ // Tool call is complete, keep it in the map for final processing
1794
+ // Nothing specific to do here, the tool call is already complete
1795
+ }
1796
+ if (chunk.type === 'tool-call') {
1797
+ // Direct tool call chunk (complete tool call)
1798
+ toolCallsInProgress.set(chunk.toolCallId, {
1799
+ toolCallId: chunk.toolCallId,
1800
+ toolName: chunk.toolName,
1801
+ input: chunk.input
1802
+ });
1803
+ }
1426
1804
  if (chunk.type === 'finish') {
1427
1805
  const providerMetadata = chunk.providerMetadata;
1428
1806
  const additionalTokenValues = {
@@ -1456,6 +1834,19 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
1456
1834
  text: truncate(generatedText)
1457
1835
  });
1458
1836
  }
1837
+ // Add completed tool calls to content
1838
+ for (const toolCall of toolCallsInProgress.values()) {
1839
+ if (toolCall.toolName) {
1840
+ content.push({
1841
+ type: 'tool-call',
1842
+ id: toolCall.toolCallId,
1843
+ function: {
1844
+ name: toolCall.toolName,
1845
+ arguments: toolCall.input
1846
+ }
1847
+ });
1848
+ }
1849
+ }
1459
1850
  // Structure output like mapVercelOutput does
1460
1851
  const output = content.length > 0 ? [{
1461
1852
  role: 'assistant',
@@ -1558,6 +1949,9 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1558
1949
  if (anthropicParams.stream) {
1559
1950
  return parentPromise.then(value => {
1560
1951
  let accumulatedContent = '';
1952
+ const contentBlocks = [];
1953
+ const toolsInProgress = new Map();
1954
+ let currentTextBlock = null;
1561
1955
  const usage = {
1562
1956
  inputTokens: 0,
1563
1957
  outputTokens: 0,
@@ -1569,10 +1963,70 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1569
1963
  (async () => {
1570
1964
  try {
1571
1965
  for await (const chunk of stream1) {
1966
+ // Handle content block start events
1967
+ if (chunk.type === 'content_block_start') {
1968
+ if (chunk.content_block?.type === 'text') {
1969
+ currentTextBlock = {
1970
+ type: 'text',
1971
+ text: ''
1972
+ };
1973
+ contentBlocks.push(currentTextBlock);
1974
+ } else if (chunk.content_block?.type === 'tool_use') {
1975
+ const toolBlock = {
1976
+ type: 'function',
1977
+ id: chunk.content_block.id,
1978
+ function: {
1979
+ name: chunk.content_block.name,
1980
+ arguments: {}
1981
+ }
1982
+ };
1983
+ contentBlocks.push(toolBlock);
1984
+ toolsInProgress.set(chunk.content_block.id, {
1985
+ block: toolBlock,
1986
+ inputString: ''
1987
+ });
1988
+ currentTextBlock = null;
1989
+ }
1990
+ }
1991
+ // Handle text delta events
1572
1992
  if ('delta' in chunk) {
1573
1993
  if ('text' in chunk.delta) {
1574
1994
  const delta = chunk?.delta?.text ?? '';
1575
1995
  accumulatedContent += delta;
1996
+ if (currentTextBlock) {
1997
+ currentTextBlock.text += delta;
1998
+ }
1999
+ }
2000
+ }
2001
+ // Handle tool input delta events
2002
+ if (chunk.type === 'content_block_delta' && chunk.delta?.type === 'input_json_delta') {
2003
+ const block = chunk.index !== undefined ? contentBlocks[chunk.index] : undefined;
2004
+ const toolId = block?.type === 'function' ? block.id : undefined;
2005
+ if (toolId && toolsInProgress.has(toolId)) {
2006
+ const tool = toolsInProgress.get(toolId);
2007
+ if (tool) {
2008
+ tool.inputString += chunk.delta.partial_json || '';
2009
+ }
2010
+ }
2011
+ }
2012
+ // Handle content block stop events
2013
+ if (chunk.type === 'content_block_stop') {
2014
+ currentTextBlock = null;
2015
+ // Parse accumulated tool input
2016
+ if (chunk.index !== undefined) {
2017
+ const block = contentBlocks[chunk.index];
2018
+ if (block?.type === 'function' && block.id && toolsInProgress.has(block.id)) {
2019
+ const tool = toolsInProgress.get(block.id);
2020
+ if (tool) {
2021
+ try {
2022
+ block.function.arguments = JSON.parse(tool.inputString);
2023
+ } catch (e) {
2024
+ // Keep empty object if parsing fails
2025
+ console.error('Error parsing tool input:', e);
2026
+ }
2027
+ }
2028
+ toolsInProgress.delete(block.id);
2029
+ }
1576
2030
  }
1577
2031
  }
1578
2032
  if (chunk.type == 'message_start') {
@@ -1586,17 +2040,25 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1586
2040
  }
1587
2041
  const latency = (Date.now() - startTime) / 1000;
1588
2042
  const availableTools = extractAvailableToolCalls('anthropic', anthropicParams);
2043
+ // Format output to match non-streaming version
2044
+ const formattedOutput = contentBlocks.length > 0 ? [{
2045
+ role: 'assistant',
2046
+ content: contentBlocks
2047
+ }] : [{
2048
+ role: 'assistant',
2049
+ content: [{
2050
+ type: 'text',
2051
+ text: accumulatedContent
2052
+ }]
2053
+ }];
1589
2054
  await sendEventToPosthog({
1590
2055
  client: this.phClient,
1591
2056
  distinctId: posthogDistinctId,
1592
2057
  traceId,
1593
2058
  model: anthropicParams.model,
1594
2059
  provider: 'anthropic',
1595
- input: mergeSystemPrompt(anthropicParams, 'anthropic'),
1596
- output: [{
1597
- content: accumulatedContent,
1598
- role: 'assistant'
1599
- }],
2060
+ input: sanitizeAnthropic(mergeSystemPrompt(anthropicParams, 'anthropic')),
2061
+ output: formattedOutput,
1600
2062
  latency,
1601
2063
  baseURL: this.baseURL ?? '',
1602
2064
  params: body,
@@ -1613,7 +2075,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1613
2075
  traceId,
1614
2076
  model: anthropicParams.model,
1615
2077
  provider: 'anthropic',
1616
- input: mergeSystemPrompt(anthropicParams),
2078
+ input: sanitizeAnthropic(mergeSystemPrompt(anthropicParams)),
1617
2079
  output: [],
1618
2080
  latency: 0,
1619
2081
  baseURL: this.baseURL ?? '',
@@ -1645,7 +2107,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1645
2107
  traceId,
1646
2108
  model: anthropicParams.model,
1647
2109
  provider: 'anthropic',
1648
- input: mergeSystemPrompt(anthropicParams),
2110
+ input: sanitizeAnthropic(mergeSystemPrompt(anthropicParams)),
1649
2111
  output: formatResponseAnthropic(result),
1650
2112
  latency,
1651
2113
  baseURL: this.baseURL ?? '',
@@ -1669,7 +2131,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
1669
2131
  traceId,
1670
2132
  model: anthropicParams.model,
1671
2133
  provider: 'anthropic',
1672
- input: mergeSystemPrompt(anthropicParams),
2134
+ input: sanitizeAnthropic(mergeSystemPrompt(anthropicParams)),
1673
2135
  output: [],
1674
2136
  latency: 0,
1675
2137
  baseURL: this.baseURL ?? '',
@@ -1721,23 +2183,24 @@ class WrappedModels {
1721
2183
  const response = await this.client.models.generateContent(geminiParams);
1722
2184
  const latency = (Date.now() - startTime) / 1000;
1723
2185
  const availableTools = extractAvailableToolCalls('gemini', geminiParams);
2186
+ const metadata = response.usageMetadata;
1724
2187
  await sendEventToPosthog({
1725
2188
  client: this.phClient,
1726
2189
  distinctId: posthogDistinctId,
1727
2190
  traceId,
1728
2191
  model: geminiParams.model,
1729
2192
  provider: 'gemini',
1730
- input: this.formatInput(geminiParams.contents),
2193
+ input: this.formatInputForPostHog(geminiParams.contents),
1731
2194
  output: formatResponseGemini(response),
1732
2195
  latency,
1733
2196
  baseURL: 'https://generativelanguage.googleapis.com',
1734
2197
  params: params,
1735
2198
  httpStatus: 200,
1736
2199
  usage: {
1737
- inputTokens: response.usageMetadata?.promptTokenCount ?? 0,
1738
- outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0,
1739
- reasoningTokens: response.usageMetadata?.thoughtsTokenCount ?? 0,
1740
- cacheReadInputTokens: response.usageMetadata?.cachedContentTokenCount ?? 0
2200
+ inputTokens: metadata?.promptTokenCount ?? 0,
2201
+ outputTokens: metadata?.candidatesTokenCount ?? 0,
2202
+ reasoningTokens: metadata?.thoughtsTokenCount ?? 0,
2203
+ cacheReadInputTokens: metadata?.cachedContentTokenCount ?? 0
1741
2204
  },
1742
2205
  tools: availableTools,
1743
2206
  captureImmediate: posthogCaptureImmediate
@@ -1751,7 +2214,7 @@ class WrappedModels {
1751
2214
  traceId,
1752
2215
  model: geminiParams.model,
1753
2216
  provider: 'gemini',
1754
- input: this.formatInput(geminiParams.contents),
2217
+ input: this.formatInputForPostHog(geminiParams.contents),
1755
2218
  output: [],
1756
2219
  latency,
1757
2220
  baseURL: 'https://generativelanguage.googleapis.com',
@@ -1779,7 +2242,7 @@ class WrappedModels {
1779
2242
  } = params;
1780
2243
  const traceId = posthogTraceId ?? uuid.v4();
1781
2244
  const startTime = Date.now();
1782
- let accumulatedContent = '';
2245
+ const accumulatedContent = [];
1783
2246
  let usage = {
1784
2247
  inputTokens: 0,
1785
2248
  outputTokens: 0
@@ -1787,32 +2250,74 @@ class WrappedModels {
1787
2250
  try {
1788
2251
  const stream = await this.client.models.generateContentStream(geminiParams);
1789
2252
  for await (const chunk of stream) {
2253
+ // Handle text content
1790
2254
  if (chunk.text) {
1791
- accumulatedContent += chunk.text;
2255
+ // Find if we already have a text item to append to
2256
+ let lastTextItem;
2257
+ for (let i = accumulatedContent.length - 1; i >= 0; i--) {
2258
+ if (accumulatedContent[i].type === 'text') {
2259
+ lastTextItem = accumulatedContent[i];
2260
+ break;
2261
+ }
2262
+ }
2263
+ if (lastTextItem && lastTextItem.type === 'text') {
2264
+ lastTextItem.text += chunk.text;
2265
+ } else {
2266
+ accumulatedContent.push({
2267
+ type: 'text',
2268
+ text: chunk.text
2269
+ });
2270
+ }
1792
2271
  }
2272
+ // Handle function calls from candidates
2273
+ if (chunk.candidates && Array.isArray(chunk.candidates)) {
2274
+ for (const candidate of chunk.candidates) {
2275
+ if (candidate.content && candidate.content.parts) {
2276
+ for (const part of candidate.content.parts) {
2277
+ // Type-safe check for functionCall
2278
+ if ('functionCall' in part) {
2279
+ const funcCall = part.functionCall;
2280
+ if (funcCall?.name) {
2281
+ accumulatedContent.push({
2282
+ type: 'function',
2283
+ function: {
2284
+ name: funcCall.name,
2285
+ arguments: funcCall.args || {}
2286
+ }
2287
+ });
2288
+ }
2289
+ }
2290
+ }
2291
+ }
2292
+ }
2293
+ }
2294
+ // Update usage metadata - handle both old and new field names
1793
2295
  if (chunk.usageMetadata) {
2296
+ const metadata = chunk.usageMetadata;
1794
2297
  usage = {
1795
- inputTokens: chunk.usageMetadata.promptTokenCount ?? 0,
1796
- outputTokens: chunk.usageMetadata.candidatesTokenCount ?? 0,
1797
- reasoningTokens: chunk.usageMetadata.thoughtsTokenCount ?? 0,
1798
- cacheReadInputTokens: chunk.usageMetadata.cachedContentTokenCount ?? 0
2298
+ inputTokens: metadata.promptTokenCount ?? 0,
2299
+ outputTokens: metadata.candidatesTokenCount ?? 0,
2300
+ reasoningTokens: metadata.thoughtsTokenCount ?? 0,
2301
+ cacheReadInputTokens: metadata.cachedContentTokenCount ?? 0
1799
2302
  };
1800
2303
  }
1801
2304
  yield chunk;
1802
2305
  }
1803
2306
  const latency = (Date.now() - startTime) / 1000;
1804
2307
  const availableTools = extractAvailableToolCalls('gemini', geminiParams);
2308
+ // Format output similar to formatResponseGemini
2309
+ const output = accumulatedContent.length > 0 ? [{
2310
+ role: 'assistant',
2311
+ content: accumulatedContent
2312
+ }] : [];
1805
2313
  await sendEventToPosthog({
1806
2314
  client: this.phClient,
1807
2315
  distinctId: posthogDistinctId,
1808
2316
  traceId,
1809
2317
  model: geminiParams.model,
1810
2318
  provider: 'gemini',
1811
- input: this.formatInput(geminiParams.contents),
1812
- output: [{
1813
- content: accumulatedContent,
1814
- role: 'assistant'
1815
- }],
2319
+ input: this.formatInputForPostHog(geminiParams.contents),
2320
+ output,
1816
2321
  latency,
1817
2322
  baseURL: 'https://generativelanguage.googleapis.com',
1818
2323
  params: params,
@@ -1829,7 +2334,7 @@ class WrappedModels {
1829
2334
  traceId,
1830
2335
  model: geminiParams.model,
1831
2336
  provider: 'gemini',
1832
- input: this.formatInput(geminiParams.contents),
2337
+ input: this.formatInputForPostHog(geminiParams.contents),
1833
2338
  output: [],
1834
2339
  latency,
1835
2340
  baseURL: 'https://generativelanguage.googleapis.com',
@@ -1862,16 +2367,28 @@ class WrappedModels {
1862
2367
  };
1863
2368
  }
1864
2369
  if (item && typeof item === 'object') {
1865
- if (item.text) {
2370
+ const obj = item;
2371
+ if ('text' in obj && obj.text) {
2372
+ return {
2373
+ role: obj.role || 'user',
2374
+ content: obj.text
2375
+ };
2376
+ }
2377
+ if ('content' in obj && obj.content) {
1866
2378
  return {
1867
- role: item.role || 'user',
1868
- content: item.text
2379
+ role: obj.role || 'user',
2380
+ content: obj.content
1869
2381
  };
1870
2382
  }
1871
- if (item.content) {
2383
+ if ('parts' in obj && Array.isArray(obj.parts)) {
1872
2384
  return {
1873
- role: item.role || 'user',
1874
- content: item.content
2385
+ role: obj.role || 'user',
2386
+ content: obj.parts.map(part => {
2387
+ if (part && typeof part === 'object' && 'text' in part) {
2388
+ return part.text;
2389
+ }
2390
+ return part;
2391
+ })
1875
2392
  };
1876
2393
  }
1877
2394
  }
@@ -1882,16 +2399,17 @@ class WrappedModels {
1882
2399
  });
1883
2400
  }
1884
2401
  if (contents && typeof contents === 'object') {
1885
- if (contents.text) {
2402
+ const obj = contents;
2403
+ if ('text' in obj && obj.text) {
1886
2404
  return [{
1887
2405
  role: 'user',
1888
- content: contents.text
2406
+ content: obj.text
1889
2407
  }];
1890
2408
  }
1891
- if (contents.content) {
2409
+ if ('content' in obj && obj.content) {
1892
2410
  return [{
1893
2411
  role: 'user',
1894
- content: contents.content
2412
+ content: obj.content
1895
2413
  }];
1896
2414
  }
1897
2415
  }
@@ -1900,6 +2418,10 @@ class WrappedModels {
1900
2418
  content: String(contents)
1901
2419
  }];
1902
2420
  }
2421
+ formatInputForPostHog(contents) {
2422
+ const sanitized = sanitizeGemini(contents);
2423
+ return this.formatInput(sanitized);
2424
+ }
1903
2425
  }
1904
2426
 
1905
2427
  function getDefaultExportFromCjs (x) {
@@ -2591,7 +3113,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2591
3113
  }) || 'generation';
2592
3114
  const generation = {
2593
3115
  name: runNameFound,
2594
- input: messages,
3116
+ input: sanitizeLangChain(messages),
2595
3117
  startTime: Date.now()
2596
3118
  };
2597
3119
  if (extraParams) {
@@ -2854,7 +3376,8 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
2854
3376
  ...message.additional_kwargs
2855
3377
  };
2856
3378
  }
2857
- return messageDict;
3379
+ // Sanitize the message content to redact base64 images
3380
+ return sanitizeLangChain(messageDict);
2858
3381
  }
2859
3382
  _parseUsageModel(usage) {
2860
3383
  const conversionList = [['promptTokens', 'input'], ['completionTokens', 'output'], ['input_tokens', 'input'], ['output_tokens', 'output'], ['prompt_token_count', 'input'], ['candidates_token_count', 'output'], ['inputTokenCount', 'input'], ['outputTokenCount', 'output'], ['input_token_count', 'input'], ['generated_token_count', 'output']];