@posthog/ai 7.2.1 → 7.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,7 +6,7 @@ var openai = require('openai');
6
6
  var buffer = require('buffer');
7
7
  var uuid = require('uuid');
8
8
 
9
- var version = "7.2.1";
9
+ var version = "7.3.0";
10
10
 
11
11
  // Type guards for safer type checking
12
12
 
@@ -85,6 +85,14 @@ const formatResponseOpenAI = response => {
85
85
  });
86
86
  }
87
87
  }
88
+
89
+ // Handle audio output (gpt-4o-audio-preview)
90
+ if (choice.message.audio) {
91
+ content.push({
92
+ type: 'audio',
93
+ ...choice.message.audio
94
+ });
95
+ }
88
96
  }
89
97
  if (content.length > 0) {
90
98
  output.push({
@@ -466,6 +474,15 @@ function formatOpenAIResponsesInput(input, instructions) {
466
474
 
467
475
  const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
468
476
 
477
+ // ============================================
478
+ // Multimodal Feature Toggle
479
+ // ============================================
480
+
481
+ const isMultimodalEnabled = () => {
482
+ const val = process.env._INTERNAL_LLMA_MULTIMODAL || '';
483
+ return val.toLowerCase() === 'true' || val === '1' || val.toLowerCase() === 'yes';
484
+ };
485
+
469
486
  // ============================================
470
487
  // Base64 Detection Helpers
471
488
  // ============================================
@@ -493,6 +510,7 @@ const isRawBase64 = str => {
493
510
  return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
494
511
  };
495
512
  function redactBase64DataUrl(str) {
513
+ if (isMultimodalEnabled()) return str;
496
514
  if (!isString(str)) return str;
497
515
 
498
516
  // Check for data URL format
@@ -555,6 +573,15 @@ const sanitizeOpenAIImage = item => {
555
573
  }
556
574
  };
557
575
  }
576
+
577
+ // Handle audio format
578
+ if (item.type === 'audio' && 'data' in item) {
579
+ if (isMultimodalEnabled()) return item;
580
+ return {
581
+ ...item,
582
+ data: REDACTED_IMAGE_PLACEHOLDER
583
+ };
584
+ }
558
585
  return item;
559
586
  };
560
587
 
@@ -618,6 +645,7 @@ class WrappedCompletions extends Completions {
618
645
  try {
619
646
  const contentBlocks = [];
620
647
  let accumulatedContent = '';
648
+ let modelFromResponse;
621
649
  let usage = {
622
650
  inputTokens: 0,
623
651
  outputTokens: 0,
@@ -627,6 +655,10 @@ class WrappedCompletions extends Completions {
627
655
  // Map to track in-progress tool calls
628
656
  const toolCallsInProgress = new Map();
629
657
  for await (const chunk of stream1) {
658
+ // Extract model from chunk (Chat Completions chunks have model field)
659
+ if (!modelFromResponse && chunk.model) {
660
+ modelFromResponse = chunk.model;
661
+ }
630
662
  const choice = chunk?.choices?.[0];
631
663
  const chunkWebSearchCount = calculateWebSearchCount(chunk);
632
664
  if (chunkWebSearchCount > 0 && chunkWebSearchCount > (usage.webSearchCount ?? 0)) {
@@ -720,7 +752,7 @@ class WrappedCompletions extends Completions {
720
752
  await sendEventToPosthog({
721
753
  client: this.phClient,
722
754
  ...posthogParams,
723
- model: openAIParams.model,
755
+ model: openAIParams.model ?? modelFromResponse,
724
756
  provider: 'openai',
725
757
  input: sanitizeOpenAI(openAIParams.messages),
726
758
  output: formattedOutput,
@@ -774,7 +806,7 @@ class WrappedCompletions extends Completions {
774
806
  await sendEventToPosthog({
775
807
  client: this.phClient,
776
808
  ...posthogParams,
777
- model: openAIParams.model,
809
+ model: openAIParams.model ?? result.model,
778
810
  provider: 'openai',
779
811
  input: sanitizeOpenAI(openAIParams.messages),
780
812
  output: formattedOutput,
@@ -798,7 +830,7 @@ class WrappedCompletions extends Completions {
798
830
  await sendEventToPosthog({
799
831
  client: this.phClient,
800
832
  ...posthogParams,
801
- model: String(openAIParams.model ?? ''),
833
+ model: openAIParams.model,
802
834
  provider: 'openai',
803
835
  input: sanitizeOpenAI(openAIParams.messages),
804
836
  output: [],
@@ -847,6 +879,7 @@ class WrappedResponses extends Responses {
847
879
  (async () => {
848
880
  try {
849
881
  let finalContent = [];
882
+ let modelFromResponse;
850
883
  let usage = {
851
884
  inputTokens: 0,
852
885
  outputTokens: 0,
@@ -854,6 +887,10 @@ class WrappedResponses extends Responses {
854
887
  };
855
888
  for await (const chunk of stream1) {
856
889
  if ('response' in chunk && chunk.response) {
890
+ // Extract model from response object in chunk (for stored prompts)
891
+ if (!modelFromResponse && chunk.response.model) {
892
+ modelFromResponse = chunk.response.model;
893
+ }
857
894
  const chunkWebSearchCount = calculateWebSearchCount(chunk.response);
858
895
  if (chunkWebSearchCount > 0 && chunkWebSearchCount > (usage.webSearchCount ?? 0)) {
859
896
  usage.webSearchCount = chunkWebSearchCount;
@@ -877,8 +914,7 @@ class WrappedResponses extends Responses {
877
914
  await sendEventToPosthog({
878
915
  client: this.phClient,
879
916
  ...posthogParams,
880
- //@ts-expect-error
881
- model: openAIParams.model,
917
+ model: openAIParams.model ?? modelFromResponse,
882
918
  provider: 'openai',
883
919
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
884
920
  output: finalContent,
@@ -900,7 +936,6 @@ class WrappedResponses extends Responses {
900
936
  await sendEventToPosthog({
901
937
  client: this.phClient,
902
938
  ...posthogParams,
903
- //@ts-expect-error
904
939
  model: openAIParams.model,
905
940
  provider: 'openai',
906
941
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
@@ -933,8 +968,7 @@ class WrappedResponses extends Responses {
933
968
  await sendEventToPosthog({
934
969
  client: this.phClient,
935
970
  ...posthogParams,
936
- //@ts-expect-error
937
- model: openAIParams.model,
971
+ model: openAIParams.model ?? result.model,
938
972
  provider: 'openai',
939
973
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
940
974
  output: formattedOutput,
@@ -958,7 +992,7 @@ class WrappedResponses extends Responses {
958
992
  await sendEventToPosthog({
959
993
  client: this.phClient,
960
994
  ...posthogParams,
961
- model: String(openAIParams.model ?? ''),
995
+ model: openAIParams.model,
962
996
  provider: 'openai',
963
997
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
964
998
  output: [],
@@ -995,7 +1029,7 @@ class WrappedResponses extends Responses {
995
1029
  await sendEventToPosthog({
996
1030
  client: this.phClient,
997
1031
  ...posthogParams,
998
- model: String(openAIParams.model ?? ''),
1032
+ model: openAIParams.model ?? result.model,
999
1033
  provider: 'openai',
1000
1034
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1001
1035
  output: result.output,
@@ -1016,7 +1050,7 @@ class WrappedResponses extends Responses {
1016
1050
  await sendEventToPosthog({
1017
1051
  client: this.phClient,
1018
1052
  ...posthogParams,
1019
- model: String(openAIParams.model ?? ''),
1053
+ model: openAIParams.model,
1020
1054
  provider: 'openai',
1021
1055
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1022
1056
  output: [],
@@ -1206,7 +1240,7 @@ class WrappedTranscriptions extends Transcriptions {
1206
1240
  await sendEventToPosthog({
1207
1241
  client: this.phClient,
1208
1242
  ...posthogParams,
1209
- model: String(openAIParams.model ?? ''),
1243
+ model: openAIParams.model,
1210
1244
  provider: 'openai',
1211
1245
  input: openAIParams.prompt,
1212
1246
  output: result.text,
@@ -1226,7 +1260,7 @@ class WrappedTranscriptions extends Transcriptions {
1226
1260
  await sendEventToPosthog({
1227
1261
  client: this.phClient,
1228
1262
  ...posthogParams,
1229
- model: String(openAIParams.model ?? ''),
1263
+ model: openAIParams.model,
1230
1264
  provider: 'openai',
1231
1265
  input: openAIParams.prompt,
1232
1266
  output: [],