@posthog/ai 7.5.4 → 7.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@ import { Buffer } from 'buffer';
3
3
  import { v4 } from 'uuid';
4
4
  import { uuidv7 } from '@posthog/core';
5
5
 
6
- var version = "7.5.4";
6
+ var version = "7.6.0";
7
7
 
8
8
  // Type guards for safer type checking
9
9
 
@@ -14,6 +14,139 @@ const isObject = value => {
14
14
  return value !== null && typeof value === 'object' && !Array.isArray(value);
15
15
  };
16
16
 
17
+ const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
18
+
19
+ // ============================================
20
+ // Multimodal Feature Toggle
21
+ // ============================================
22
+
23
+ const isMultimodalEnabled = () => {
24
+ const val = process.env._INTERNAL_LLMA_MULTIMODAL || '';
25
+ return val.toLowerCase() === 'true' || val === '1' || val.toLowerCase() === 'yes';
26
+ };
27
+
28
+ // ============================================
29
+ // Base64 Detection Helpers
30
+ // ============================================
31
+
32
+ const isBase64DataUrl = str => {
33
+ return /^data:([^;]+);base64,/.test(str);
34
+ };
35
+ const isValidUrl = str => {
36
+ try {
37
+ new URL(str);
38
+ return true;
39
+ } catch {
40
+ // Not an absolute URL, check if it's a relative URL or path
41
+ return str.startsWith('/') || str.startsWith('./') || str.startsWith('../');
42
+ }
43
+ };
44
+ const isRawBase64 = str => {
45
+ // Skip if it's a valid URL or path
46
+ if (isValidUrl(str)) {
47
+ return false;
48
+ }
49
+
50
+ // Check if it's a valid base64 string
51
+ // Base64 images are typically at least a few hundred chars, but we'll be conservative
52
+ return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
53
+ };
54
+ function redactBase64DataUrl(str) {
55
+ if (isMultimodalEnabled()) return str;
56
+ if (!isString(str)) return str;
57
+
58
+ // Check for data URL format
59
+ if (isBase64DataUrl(str)) {
60
+ return REDACTED_IMAGE_PLACEHOLDER;
61
+ }
62
+
63
+ // Check for raw base64 (Vercel sends raw base64 for inline images)
64
+ if (isRawBase64(str)) {
65
+ return REDACTED_IMAGE_PLACEHOLDER;
66
+ }
67
+ return str;
68
+ }
69
+
70
+ // ============================================
71
+ // Common Message Processing
72
+ // ============================================
73
+
74
+ const processMessages = (messages, transformContent) => {
75
+ if (!messages) return messages;
76
+ const processContent = content => {
77
+ if (typeof content === 'string') return content;
78
+ if (!content) return content;
79
+ if (Array.isArray(content)) {
80
+ return content.map(transformContent);
81
+ }
82
+
83
+ // Handle single object content
84
+ return transformContent(content);
85
+ };
86
+ const processMessage = msg => {
87
+ if (!isObject(msg) || !('content' in msg)) return msg;
88
+ return {
89
+ ...msg,
90
+ content: processContent(msg.content)
91
+ };
92
+ };
93
+
94
+ // Handle both arrays and single messages
95
+ if (Array.isArray(messages)) {
96
+ return messages.map(processMessage);
97
+ }
98
+ return processMessage(messages);
99
+ };
100
+
101
+ // ============================================
102
+ // Provider-Specific Image Sanitizers
103
+ // ============================================
104
+
105
+ const sanitizeOpenAIImage = item => {
106
+ if (!isObject(item)) return item;
107
+
108
+ // Handle image_url format
109
+ if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
110
+ return {
111
+ ...item,
112
+ image_url: {
113
+ ...item.image_url,
114
+ url: redactBase64DataUrl(item.image_url.url)
115
+ }
116
+ };
117
+ }
118
+
119
+ // Handle audio format
120
+ if (item.type === 'audio' && 'data' in item) {
121
+ if (isMultimodalEnabled()) return item;
122
+ return {
123
+ ...item,
124
+ data: REDACTED_IMAGE_PLACEHOLDER
125
+ };
126
+ }
127
+ return item;
128
+ };
129
+ const sanitizeOpenAIResponseImage = item => {
130
+ if (!isObject(item)) return item;
131
+
132
+ // Handle input_image format
133
+ if (item.type === 'input_image' && 'image_url' in item) {
134
+ return {
135
+ ...item,
136
+ image_url: redactBase64DataUrl(item.image_url)
137
+ };
138
+ }
139
+ return item;
140
+ };
141
+
142
+ // Export individual sanitizers for tree-shaking
143
+ const sanitizeOpenAI = data => {
144
+ return processMessages(data, sanitizeOpenAIImage);
145
+ };
146
+ const sanitizeOpenAIResponse = data => {
147
+ return processMessages(data, sanitizeOpenAIResponseImage);
148
+ };
149
+
17
150
  const STRING_FORMAT = 'utf8';
18
151
 
19
152
  /**
@@ -409,6 +542,9 @@ const sendEventToPosthog = async ({
409
542
  } : {}),
410
543
  ...(usage.webSearchCount ? {
411
544
  $ai_web_search_count: usage.webSearchCount
545
+ } : {}),
546
+ ...(usage.rawUsage ? {
547
+ $ai_usage: usage.rawUsage
412
548
  } : {})
413
549
  };
414
550
  const properties = {
@@ -498,124 +634,6 @@ function formatOpenAIResponsesInput(input, instructions) {
498
634
  return messages;
499
635
  }
500
636
 
501
- const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
502
-
503
- // ============================================
504
- // Multimodal Feature Toggle
505
- // ============================================
506
-
507
- const isMultimodalEnabled = () => {
508
- const val = process.env._INTERNAL_LLMA_MULTIMODAL || '';
509
- return val.toLowerCase() === 'true' || val === '1' || val.toLowerCase() === 'yes';
510
- };
511
-
512
- // ============================================
513
- // Base64 Detection Helpers
514
- // ============================================
515
-
516
- const isBase64DataUrl = str => {
517
- return /^data:([^;]+);base64,/.test(str);
518
- };
519
- const isValidUrl = str => {
520
- try {
521
- new URL(str);
522
- return true;
523
- } catch {
524
- // Not an absolute URL, check if it's a relative URL or path
525
- return str.startsWith('/') || str.startsWith('./') || str.startsWith('../');
526
- }
527
- };
528
- const isRawBase64 = str => {
529
- // Skip if it's a valid URL or path
530
- if (isValidUrl(str)) {
531
- return false;
532
- }
533
-
534
- // Check if it's a valid base64 string
535
- // Base64 images are typically at least a few hundred chars, but we'll be conservative
536
- return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
537
- };
538
- function redactBase64DataUrl(str) {
539
- if (isMultimodalEnabled()) return str;
540
- if (!isString(str)) return str;
541
-
542
- // Check for data URL format
543
- if (isBase64DataUrl(str)) {
544
- return REDACTED_IMAGE_PLACEHOLDER;
545
- }
546
-
547
- // Check for raw base64 (Vercel sends raw base64 for inline images)
548
- if (isRawBase64(str)) {
549
- return REDACTED_IMAGE_PLACEHOLDER;
550
- }
551
- return str;
552
- }
553
-
554
- // ============================================
555
- // Common Message Processing
556
- // ============================================
557
-
558
- const processMessages = (messages, transformContent) => {
559
- if (!messages) return messages;
560
- const processContent = content => {
561
- if (typeof content === 'string') return content;
562
- if (!content) return content;
563
- if (Array.isArray(content)) {
564
- return content.map(transformContent);
565
- }
566
-
567
- // Handle single object content
568
- return transformContent(content);
569
- };
570
- const processMessage = msg => {
571
- if (!isObject(msg) || !('content' in msg)) return msg;
572
- return {
573
- ...msg,
574
- content: processContent(msg.content)
575
- };
576
- };
577
-
578
- // Handle both arrays and single messages
579
- if (Array.isArray(messages)) {
580
- return messages.map(processMessage);
581
- }
582
- return processMessage(messages);
583
- };
584
-
585
- // ============================================
586
- // Provider-Specific Image Sanitizers
587
- // ============================================
588
-
589
- const sanitizeOpenAIImage = item => {
590
- if (!isObject(item)) return item;
591
-
592
- // Handle image_url format
593
- if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
594
- return {
595
- ...item,
596
- image_url: {
597
- ...item.image_url,
598
- url: redactBase64DataUrl(item.image_url.url)
599
- }
600
- };
601
- }
602
-
603
- // Handle audio format
604
- if (item.type === 'audio' && 'data' in item) {
605
- if (isMultimodalEnabled()) return item;
606
- return {
607
- ...item,
608
- data: REDACTED_IMAGE_PLACEHOLDER
609
- };
610
- }
611
- return item;
612
- };
613
-
614
- // Export individual sanitizers for tree-shaking
615
- const sanitizeOpenAI = data => {
616
- return processMessages(data, sanitizeOpenAIImage);
617
- };
618
-
619
637
  const Chat = OpenAI.Chat;
620
638
  const Completions = Chat.Completions;
621
639
  const Responses = OpenAI.Responses;
@@ -680,6 +698,7 @@ class WrappedCompletions extends Completions {
680
698
 
681
699
  // Map to track in-progress tool calls
682
700
  const toolCallsInProgress = new Map();
701
+ let rawUsageData;
683
702
  for await (const chunk of stream1) {
684
703
  // Extract model from chunk (Chat Completions chunks have model field)
685
704
  if (!modelFromResponse && chunk.model) {
@@ -730,6 +749,7 @@ class WrappedCompletions extends Completions {
730
749
 
731
750
  // Handle usage information
732
751
  if (chunk.usage) {
752
+ rawUsageData = chunk.usage;
733
753
  usage = {
734
754
  ...usage,
735
755
  inputTokens: chunk.usage.prompt_tokens ?? 0,
@@ -791,7 +811,8 @@ class WrappedCompletions extends Completions {
791
811
  outputTokens: usage.outputTokens,
792
812
  reasoningTokens: usage.reasoningTokens,
793
813
  cacheReadInputTokens: usage.cacheReadInputTokens,
794
- webSearchCount: usage.webSearchCount
814
+ webSearchCount: usage.webSearchCount,
815
+ rawUsage: rawUsageData
795
816
  },
796
817
  tools: availableTools
797
818
  });
@@ -843,7 +864,8 @@ class WrappedCompletions extends Completions {
843
864
  outputTokens: result.usage?.completion_tokens ?? 0,
844
865
  reasoningTokens: result.usage?.completion_tokens_details?.reasoning_tokens ?? 0,
845
866
  cacheReadInputTokens: result.usage?.prompt_tokens_details?.cached_tokens ?? 0,
846
- webSearchCount: calculateWebSearchCount(result)
867
+ webSearchCount: calculateWebSearchCount(result),
868
+ rawUsage: result.usage
847
869
  },
848
870
  tools: availableTools
849
871
  });
@@ -908,6 +930,7 @@ class WrappedResponses extends Responses {
908
930
  outputTokens: 0,
909
931
  webSearchCount: 0
910
932
  };
933
+ let rawUsageData;
911
934
  for await (const chunk of stream1) {
912
935
  if ('response' in chunk && chunk.response) {
913
936
  // Extract model from response object in chunk (for stored prompts)
@@ -923,6 +946,7 @@ class WrappedResponses extends Responses {
923
946
  finalContent = chunk.response.output;
924
947
  }
925
948
  if ('response' in chunk && chunk.response?.usage) {
949
+ rawUsageData = chunk.response.usage;
926
950
  usage = {
927
951
  ...usage,
928
952
  inputTokens: chunk.response.usage.input_tokens ?? 0,
@@ -939,7 +963,7 @@ class WrappedResponses extends Responses {
939
963
  ...posthogParams,
940
964
  model: openAIParams.model ?? modelFromResponse,
941
965
  provider: 'openai',
942
- input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
966
+ input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
943
967
  output: finalContent,
944
968
  latency,
945
969
  baseURL: this.baseURL,
@@ -950,7 +974,8 @@ class WrappedResponses extends Responses {
950
974
  outputTokens: usage.outputTokens,
951
975
  reasoningTokens: usage.reasoningTokens,
952
976
  cacheReadInputTokens: usage.cacheReadInputTokens,
953
- webSearchCount: usage.webSearchCount
977
+ webSearchCount: usage.webSearchCount,
978
+ rawUsage: rawUsageData
954
979
  },
955
980
  tools: availableTools
956
981
  });
@@ -960,7 +985,7 @@ class WrappedResponses extends Responses {
960
985
  ...posthogParams,
961
986
  model: openAIParams.model,
962
987
  provider: 'openai',
963
- input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
988
+ input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
964
989
  output: [],
965
990
  latency: 0,
966
991
  baseURL: this.baseURL,
@@ -991,7 +1016,7 @@ class WrappedResponses extends Responses {
991
1016
  ...posthogParams,
992
1017
  model: openAIParams.model ?? result.model,
993
1018
  provider: 'openai',
994
- input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1019
+ input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
995
1020
  output: formattedOutput,
996
1021
  latency,
997
1022
  baseURL: this.baseURL,
@@ -1002,7 +1027,8 @@ class WrappedResponses extends Responses {
1002
1027
  outputTokens: result.usage?.output_tokens ?? 0,
1003
1028
  reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0,
1004
1029
  cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0,
1005
- webSearchCount: calculateWebSearchCount(result)
1030
+ webSearchCount: calculateWebSearchCount(result),
1031
+ rawUsage: result.usage
1006
1032
  },
1007
1033
  tools: availableTools
1008
1034
  });
@@ -1015,7 +1041,7 @@ class WrappedResponses extends Responses {
1015
1041
  ...posthogParams,
1016
1042
  model: openAIParams.model,
1017
1043
  provider: 'openai',
1018
- input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1044
+ input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
1019
1045
  output: [],
1020
1046
  latency: 0,
1021
1047
  baseURL: this.baseURL,
@@ -1051,7 +1077,7 @@ class WrappedResponses extends Responses {
1051
1077
  ...posthogParams,
1052
1078
  model: openAIParams.model ?? result.model,
1053
1079
  provider: 'openai',
1054
- input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1080
+ input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
1055
1081
  output: result.output,
1056
1082
  latency,
1057
1083
  baseURL: this.baseURL,
@@ -1061,7 +1087,8 @@ class WrappedResponses extends Responses {
1061
1087
  inputTokens: result.usage?.input_tokens ?? 0,
1062
1088
  outputTokens: result.usage?.output_tokens ?? 0,
1063
1089
  reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0,
1064
- cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0
1090
+ cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0,
1091
+ rawUsage: result.usage
1065
1092
  }
1066
1093
  });
1067
1094
  return result;
@@ -1071,7 +1098,7 @@ class WrappedResponses extends Responses {
1071
1098
  ...posthogParams,
1072
1099
  model: openAIParams.model,
1073
1100
  provider: 'openai',
1074
- input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1101
+ input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
1075
1102
  output: [],
1076
1103
  latency: 0,
1077
1104
  baseURL: this.baseURL,
@@ -1120,7 +1147,8 @@ class WrappedEmbeddings extends Embeddings {
1120
1147
  params: body,
1121
1148
  httpStatus: 200,
1122
1149
  usage: {
1123
- inputTokens: result.usage?.prompt_tokens ?? 0
1150
+ inputTokens: result.usage?.prompt_tokens ?? 0,
1151
+ rawUsage: result.usage
1124
1152
  }
1125
1153
  });
1126
1154
  return result;
@@ -1203,7 +1231,8 @@ class WrappedTranscriptions extends Transcriptions {
1203
1231
  if ('usage' in chunk && chunk.usage) {
1204
1232
  usage = {
1205
1233
  inputTokens: chunk.usage?.type === 'tokens' ? chunk.usage.input_tokens ?? 0 : 0,
1206
- outputTokens: chunk.usage?.type === 'tokens' ? chunk.usage.output_tokens ?? 0 : 0
1234
+ outputTokens: chunk.usage?.type === 'tokens' ? chunk.usage.output_tokens ?? 0 : 0,
1235
+ rawUsage: chunk.usage
1207
1236
  };
1208
1237
  }
1209
1238
  }
@@ -1264,7 +1293,8 @@ class WrappedTranscriptions extends Transcriptions {
1264
1293
  httpStatus: 200,
1265
1294
  usage: {
1266
1295
  inputTokens: result.usage?.type === 'tokens' ? result.usage.input_tokens ?? 0 : 0,
1267
- outputTokens: result.usage?.type === 'tokens' ? result.usage.output_tokens ?? 0 : 0
1296
+ outputTokens: result.usage?.type === 'tokens' ? result.usage.output_tokens ?? 0 : 0,
1297
+ rawUsage: result.usage
1268
1298
  }
1269
1299
  });
1270
1300
  return result;