@posthog/ai 7.2.1 → 7.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -3,7 +3,6 @@
3
3
  var openai = require('openai');
4
4
  var buffer = require('buffer');
5
5
  var uuid = require('uuid');
6
- var ai = require('ai');
7
6
  var AnthropicOriginal = require('@anthropic-ai/sdk');
8
7
  var genai = require('@google/genai');
9
8
 
@@ -26,7 +25,7 @@ function _interopNamespaceDefault(e) {
26
25
 
27
26
  var uuid__namespace = /*#__PURE__*/_interopNamespaceDefault(uuid);
28
27
 
29
- var version = "7.2.1";
28
+ var version = "7.3.0";
30
29
 
31
30
  // Type guards for safer type checking
32
31
  const isString = value => {
@@ -133,6 +132,13 @@ const formatResponseOpenAI = response => {
133
132
  });
134
133
  }
135
134
  }
135
+ // Handle audio output (gpt-4o-audio-preview)
136
+ if (choice.message.audio) {
137
+ content.push({
138
+ type: 'audio',
139
+ ...choice.message.audio
140
+ });
141
+ }
136
142
  }
137
143
  if (content.length > 0) {
138
144
  output.push({
@@ -214,6 +220,19 @@ const formatResponseGemini = response => {
214
220
  arguments: part.functionCall.args
215
221
  }
216
222
  });
223
+ } else if (part.inlineData) {
224
+ // Handle audio/media inline data
225
+ const mimeType = part.inlineData.mimeType || 'audio/pcm';
226
+ let data = part.inlineData.data;
227
+ // Handle binary data (Buffer/Uint8Array -> base64)
228
+ if (data instanceof Uint8Array || buffer.Buffer.isBuffer(data)) {
229
+ data = buffer.Buffer.from(data).toString('base64');
230
+ }
231
+ content.push({
232
+ type: 'audio',
233
+ mime_type: mimeType,
234
+ data: data
235
+ });
217
236
  }
218
237
  }
219
238
  if (content.length > 0) {
@@ -618,6 +637,13 @@ function formatOpenAIResponsesInput(input, instructions) {
618
637
 
619
638
  const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
620
639
  // ============================================
640
+ // Multimodal Feature Toggle
641
+ // ============================================
642
+ const isMultimodalEnabled = () => {
643
+ const val = process.env._INTERNAL_LLMA_MULTIMODAL || '';
644
+ return val.toLowerCase() === 'true' || val === '1' || val.toLowerCase() === 'yes';
645
+ };
646
+ // ============================================
621
647
  // Base64 Detection Helpers
622
648
  // ============================================
623
649
  const isBase64DataUrl = str => {
@@ -642,6 +668,7 @@ const isRawBase64 = str => {
642
668
  return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
643
669
  };
644
670
  function redactBase64DataUrl(str) {
671
+ if (isMultimodalEnabled()) return str;
645
672
  if (!isString(str)) return str;
646
673
  // Check for data URL format
647
674
  if (isBase64DataUrl(str)) {
@@ -692,12 +719,21 @@ const sanitizeOpenAIImage = item => {
692
719
  }
693
720
  };
694
721
  }
722
+ // Handle audio format
723
+ if (item.type === 'audio' && 'data' in item) {
724
+ if (isMultimodalEnabled()) return item;
725
+ return {
726
+ ...item,
727
+ data: REDACTED_IMAGE_PLACEHOLDER
728
+ };
729
+ }
695
730
  return item;
696
731
  };
697
732
  const sanitizeAnthropicImage = item => {
733
+ if (isMultimodalEnabled()) return item;
698
734
  if (!isObject(item)) return item;
699
- // Handle Anthropic's image format
700
- if (item.type === 'image' && 'source' in item && isObject(item.source) && item.source.type === 'base64' && 'data' in item.source) {
735
+ // Handle Anthropic's image and document formats (same structure, different type field)
736
+ if ((item.type === 'image' || item.type === 'document') && 'source' in item && isObject(item.source) && item.source.type === 'base64' && 'data' in item.source) {
701
737
  return {
702
738
  ...item,
703
739
  source: {
@@ -709,8 +745,9 @@ const sanitizeAnthropicImage = item => {
709
745
  return item;
710
746
  };
711
747
  const sanitizeGeminiPart = part => {
748
+ if (isMultimodalEnabled()) return part;
712
749
  if (!isObject(part)) return part;
713
- // Handle Gemini's inline data format
750
+ // Handle Gemini's inline data format (images, audio, PDFs all use inlineData)
714
751
  if ('inlineData' in part && isObject(part.inlineData) && 'data' in part.inlineData) {
715
752
  return {
716
753
  ...part,
@@ -755,6 +792,7 @@ const sanitizeLangChainImage = item => {
755
792
  }
756
793
  // Anthropic style
757
794
  if (item.type === 'image' && 'source' in item && isObject(item.source) && 'data' in item.source) {
795
+ if (isMultimodalEnabled()) return item;
758
796
  return {
759
797
  ...item,
760
798
  source: {
@@ -840,6 +878,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
840
878
  try {
841
879
  const contentBlocks = [];
842
880
  let accumulatedContent = '';
881
+ let modelFromResponse;
843
882
  let usage = {
844
883
  inputTokens: 0,
845
884
  outputTokens: 0,
@@ -848,6 +887,10 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
848
887
  // Map to track in-progress tool calls
849
888
  const toolCallsInProgress = new Map();
850
889
  for await (const chunk of stream1) {
890
+ // Extract model from chunk (Chat Completions chunks have model field)
891
+ if (!modelFromResponse && chunk.model) {
892
+ modelFromResponse = chunk.model;
893
+ }
851
894
  const choice = chunk?.choices?.[0];
852
895
  const chunkWebSearchCount = calculateWebSearchCount(chunk);
853
896
  if (chunkWebSearchCount > 0 && chunkWebSearchCount > (usage.webSearchCount ?? 0)) {
@@ -935,7 +978,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
935
978
  await sendEventToPosthog({
936
979
  client: this.phClient,
937
980
  ...posthogParams,
938
- model: openAIParams.model,
981
+ model: openAIParams.model ?? modelFromResponse,
939
982
  provider: 'openai',
940
983
  input: sanitizeOpenAI(openAIParams.messages),
941
984
  output: formattedOutput,
@@ -988,7 +1031,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
988
1031
  await sendEventToPosthog({
989
1032
  client: this.phClient,
990
1033
  ...posthogParams,
991
- model: openAIParams.model,
1034
+ model: openAIParams.model ?? result.model,
992
1035
  provider: 'openai',
993
1036
  input: sanitizeOpenAI(openAIParams.messages),
994
1037
  output: formattedOutput,
@@ -1012,7 +1055,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
1012
1055
  await sendEventToPosthog({
1013
1056
  client: this.phClient,
1014
1057
  ...posthogParams,
1015
- model: String(openAIParams.model ?? ''),
1058
+ model: openAIParams.model,
1016
1059
  provider: 'openai',
1017
1060
  input: sanitizeOpenAI(openAIParams.messages),
1018
1061
  output: [],
@@ -1054,6 +1097,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
1054
1097
  (async () => {
1055
1098
  try {
1056
1099
  let finalContent = [];
1100
+ let modelFromResponse;
1057
1101
  let usage = {
1058
1102
  inputTokens: 0,
1059
1103
  outputTokens: 0,
@@ -1061,6 +1105,10 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
1061
1105
  };
1062
1106
  for await (const chunk of stream1) {
1063
1107
  if ('response' in chunk && chunk.response) {
1108
+ // Extract model from response object in chunk (for stored prompts)
1109
+ if (!modelFromResponse && chunk.response.model) {
1110
+ modelFromResponse = chunk.response.model;
1111
+ }
1064
1112
  const chunkWebSearchCount = calculateWebSearchCount(chunk.response);
1065
1113
  if (chunkWebSearchCount > 0 && chunkWebSearchCount > (usage.webSearchCount ?? 0)) {
1066
1114
  usage.webSearchCount = chunkWebSearchCount;
@@ -1084,8 +1132,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
1084
1132
  await sendEventToPosthog({
1085
1133
  client: this.phClient,
1086
1134
  ...posthogParams,
1087
- //@ts-expect-error
1088
- model: openAIParams.model,
1135
+ model: openAIParams.model ?? modelFromResponse,
1089
1136
  provider: 'openai',
1090
1137
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1091
1138
  output: finalContent,
@@ -1107,7 +1154,6 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
1107
1154
  await sendEventToPosthog({
1108
1155
  client: this.phClient,
1109
1156
  ...posthogParams,
1110
- //@ts-expect-error
1111
1157
  model: openAIParams.model,
1112
1158
  provider: 'openai',
1113
1159
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
@@ -1140,8 +1186,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
1140
1186
  await sendEventToPosthog({
1141
1187
  client: this.phClient,
1142
1188
  ...posthogParams,
1143
- //@ts-expect-error
1144
- model: openAIParams.model,
1189
+ model: openAIParams.model ?? result.model,
1145
1190
  provider: 'openai',
1146
1191
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1147
1192
  output: formattedOutput,
@@ -1165,7 +1210,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
1165
1210
  await sendEventToPosthog({
1166
1211
  client: this.phClient,
1167
1212
  ...posthogParams,
1168
- model: String(openAIParams.model ?? ''),
1213
+ model: openAIParams.model,
1169
1214
  provider: 'openai',
1170
1215
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1171
1216
  output: [],
@@ -1202,7 +1247,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
1202
1247
  await sendEventToPosthog({
1203
1248
  client: this.phClient,
1204
1249
  ...posthogParams,
1205
- model: String(openAIParams.model ?? ''),
1250
+ model: openAIParams.model ?? result.model,
1206
1251
  provider: 'openai',
1207
1252
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1208
1253
  output: result.output,
@@ -1223,7 +1268,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
1223
1268
  await sendEventToPosthog({
1224
1269
  client: this.phClient,
1225
1270
  ...posthogParams,
1226
- model: String(openAIParams.model ?? ''),
1271
+ model: openAIParams.model,
1227
1272
  provider: 'openai',
1228
1273
  input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1229
1274
  output: [],
@@ -1398,7 +1443,7 @@ class WrappedTranscriptions extends Transcriptions {
1398
1443
  await sendEventToPosthog({
1399
1444
  client: this.phClient,
1400
1445
  ...posthogParams,
1401
- model: String(openAIParams.model ?? ''),
1446
+ model: openAIParams.model,
1402
1447
  provider: 'openai',
1403
1448
  input: openAIParams.prompt,
1404
1449
  output: result.text,
@@ -1418,7 +1463,7 @@ class WrappedTranscriptions extends Transcriptions {
1418
1463
  await sendEventToPosthog({
1419
1464
  client: this.phClient,
1420
1465
  ...posthogParams,
1421
- model: String(openAIParams.model ?? ''),
1466
+ model: openAIParams.model,
1422
1467
  provider: 'openai',
1423
1468
  input: openAIParams.prompt,
1424
1469
  output: [],
@@ -1480,6 +1525,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
1480
1525
  try {
1481
1526
  const contentBlocks = [];
1482
1527
  let accumulatedContent = '';
1528
+ let modelFromResponse;
1483
1529
  let usage = {
1484
1530
  inputTokens: 0,
1485
1531
  outputTokens: 0
@@ -1487,6 +1533,10 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
1487
1533
  // Map to track in-progress tool calls
1488
1534
  const toolCallsInProgress = new Map();
1489
1535
  for await (const chunk of stream1) {
1536
+ // Extract model from response if not in params
1537
+ if (!modelFromResponse && chunk.model) {
1538
+ modelFromResponse = chunk.model;
1539
+ }
1490
1540
  const choice = chunk?.choices?.[0];
1491
1541
  // Handle text content
1492
1542
  const deltaContent = choice?.delta?.content;
@@ -1568,7 +1618,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
1568
1618
  await sendEventToPosthog({
1569
1619
  client: this.phClient,
1570
1620
  ...posthogParams,
1571
- model: openAIParams.model,
1621
+ model: openAIParams.model ?? modelFromResponse,
1572
1622
  provider: 'azure',
1573
1623
  input: sanitizeOpenAI(openAIParams.messages),
1574
1624
  output: formattedOutput,
@@ -1612,7 +1662,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
1612
1662
  await sendEventToPosthog({
1613
1663
  client: this.phClient,
1614
1664
  ...posthogParams,
1615
- model: openAIParams.model,
1665
+ model: openAIParams.model ?? result.model,
1616
1666
  provider: 'azure',
1617
1667
  input: openAIParams.messages,
1618
1668
  output: formatResponseOpenAI(result),
@@ -1676,11 +1726,18 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
1676
1726
  (async () => {
1677
1727
  try {
1678
1728
  let finalContent = [];
1729
+ let modelFromResponse;
1679
1730
  let usage = {
1680
1731
  inputTokens: 0,
1681
1732
  outputTokens: 0
1682
1733
  };
1683
1734
  for await (const chunk of stream1) {
1735
+ if ('response' in chunk && chunk.response) {
1736
+ // Extract model from response if not in params (for stored prompts)
1737
+ if (!modelFromResponse && chunk.response.model) {
1738
+ modelFromResponse = chunk.response.model;
1739
+ }
1740
+ }
1684
1741
  if (chunk.type === 'response.completed' && 'response' in chunk && chunk.response?.output && chunk.response.output.length > 0) {
1685
1742
  finalContent = chunk.response.output;
1686
1743
  }
@@ -1697,10 +1754,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
1697
1754
  await sendEventToPosthog({
1698
1755
  client: this.phClient,
1699
1756
  ...posthogParams,
1700
- //@ts-expect-error
1701
- model: openAIParams.model,
1757
+ model: openAIParams.model ?? modelFromResponse,
1702
1758
  provider: 'azure',
1703
- input: openAIParams.input,
1759
+ input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1704
1760
  output: finalContent,
1705
1761
  latency,
1706
1762
  baseURL: this.baseURL,
@@ -1713,10 +1769,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
1713
1769
  await sendEventToPosthog({
1714
1770
  client: this.phClient,
1715
1771
  ...posthogParams,
1716
- //@ts-expect-error
1717
1772
  model: openAIParams.model,
1718
1773
  provider: 'azure',
1719
- input: openAIParams.input,
1774
+ input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1720
1775
  output: [],
1721
1776
  latency: 0,
1722
1777
  baseURL: this.baseURL,
@@ -1742,10 +1797,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
1742
1797
  await sendEventToPosthog({
1743
1798
  client: this.phClient,
1744
1799
  ...posthogParams,
1745
- //@ts-expect-error
1746
- model: openAIParams.model,
1800
+ model: openAIParams.model ?? result.model,
1747
1801
  provider: 'azure',
1748
- input: openAIParams.input,
1802
+ input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1749
1803
  output: result.output,
1750
1804
  latency,
1751
1805
  baseURL: this.baseURL,
@@ -1765,10 +1819,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
1765
1819
  await sendEventToPosthog({
1766
1820
  client: this.phClient,
1767
1821
  ...posthogParams,
1768
- //@ts-expect-error
1769
1822
  model: openAIParams.model,
1770
1823
  provider: 'azure',
1771
- input: openAIParams.input,
1824
+ input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1772
1825
  output: [],
1773
1826
  latency: 0,
1774
1827
  baseURL: this.baseURL,
@@ -1798,9 +1851,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
1798
1851
  await sendEventToPosthog({
1799
1852
  client: this.phClient,
1800
1853
  ...posthogParams,
1801
- model: String(openAIParams.model ?? ''),
1854
+ model: openAIParams.model ?? result.model,
1802
1855
  provider: 'azure',
1803
- input: openAIParams.input,
1856
+ input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1804
1857
  output: result.output,
1805
1858
  latency,
1806
1859
  baseURL: this.baseURL,
@@ -1818,9 +1871,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
1818
1871
  await sendEventToPosthog({
1819
1872
  client: this.phClient,
1820
1873
  ...posthogParams,
1821
- model: String(openAIParams.model ?? ''),
1874
+ model: openAIParams.model,
1822
1875
  provider: 'azure',
1823
- input: openAIParams.input,
1876
+ input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
1824
1877
  output: [],
1825
1878
  latency: 0,
1826
1879
  baseURL: this.baseURL,
@@ -2093,67 +2146,117 @@ const extractProvider = model => {
2093
2146
  const providerName = provider.split('.')[0];
2094
2147
  return providerName;
2095
2148
  };
2096
- const createInstrumentationMiddleware = (phClient, model, options) => {
2097
- const middleware = {
2098
- wrapGenerate: async ({
2099
- doGenerate,
2100
- params
2101
- }) => {
2149
+ // Extract web search count from provider metadata (works for both V2 and V3)
2150
+ const extractWebSearchCount = (providerMetadata, usage) => {
2151
+ // Try Anthropic-specific extraction
2152
+ if (providerMetadata && typeof providerMetadata === 'object' && 'anthropic' in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === 'object' && 'server_tool_use' in providerMetadata.anthropic) {
2153
+ const serverToolUse = providerMetadata.anthropic.server_tool_use;
2154
+ if (serverToolUse && typeof serverToolUse === 'object' && 'web_search_requests' in serverToolUse && typeof serverToolUse.web_search_requests === 'number') {
2155
+ return serverToolUse.web_search_requests;
2156
+ }
2157
+ }
2158
+ // Fall back to generic calculation
2159
+ return calculateWebSearchCount({
2160
+ usage,
2161
+ providerMetadata
2162
+ });
2163
+ };
2164
+ // Extract additional token values from provider metadata
2165
+ const extractAdditionalTokenValues = providerMetadata => {
2166
+ if (providerMetadata && typeof providerMetadata === 'object' && 'anthropic' in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === 'object' && 'cacheCreationInputTokens' in providerMetadata.anthropic) {
2167
+ return {
2168
+ cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
2169
+ };
2170
+ }
2171
+ return {};
2172
+ };
2173
+ // Helper to extract numeric token value from V2 (number) or V3 (object with .total) usage formats
2174
+ const extractTokenCount = value => {
2175
+ if (typeof value === 'number') {
2176
+ return value;
2177
+ }
2178
+ if (value && typeof value === 'object' && 'total' in value && typeof value.total === 'number') {
2179
+ return value.total;
2180
+ }
2181
+ return undefined;
2182
+ };
2183
+ // Helper to extract reasoning tokens from V2 (usage.reasoningTokens) or V3 (usage.outputTokens.reasoning)
2184
+ const extractReasoningTokens = usage => {
2185
+ // V2 style: top-level reasoningTokens
2186
+ if ('reasoningTokens' in usage) {
2187
+ return usage.reasoningTokens;
2188
+ }
2189
+ // V3 style: nested in outputTokens.reasoning
2190
+ if ('outputTokens' in usage && usage.outputTokens && typeof usage.outputTokens === 'object' && 'reasoning' in usage.outputTokens) {
2191
+ return usage.outputTokens.reasoning;
2192
+ }
2193
+ return undefined;
2194
+ };
2195
+ // Helper to extract cached input tokens from V2 (usage.cachedInputTokens) or V3 (usage.inputTokens.cacheRead)
2196
+ const extractCacheReadTokens = usage => {
2197
+ // V2 style: top-level cachedInputTokens
2198
+ if ('cachedInputTokens' in usage) {
2199
+ return usage.cachedInputTokens;
2200
+ }
2201
+ // V3 style: nested in inputTokens.cacheRead
2202
+ if ('inputTokens' in usage && usage.inputTokens && typeof usage.inputTokens === 'object' && 'cacheRead' in usage.inputTokens) {
2203
+ return usage.inputTokens.cacheRead;
2204
+ }
2205
+ return undefined;
2206
+ };
2207
+ /**
2208
+ * Wraps a Vercel AI SDK language model (V2 or V3) with PostHog tracing.
2209
+ * Automatically detects the model version and applies appropriate instrumentation.
2210
+ */
2211
+ const wrapVercelLanguageModel = (model, phClient, options) => {
2212
+ const traceId = options.posthogTraceId ?? uuid.v4();
2213
+ const mergedOptions = {
2214
+ ...options,
2215
+ posthogTraceId: traceId,
2216
+ posthogDistinctId: options.posthogDistinctId,
2217
+ posthogProperties: {
2218
+ ...options.posthogProperties,
2219
+ $ai_framework: 'vercel',
2220
+ $ai_framework_version: model.specificationVersion === 'v3' ? '6' : '5'
2221
+ }
2222
+ };
2223
+ // Create wrapped model that preserves the original type
2224
+ const wrappedModel = {
2225
+ ...model,
2226
+ doGenerate: async params => {
2102
2227
  const startTime = Date.now();
2103
2228
  const mergedParams = {
2104
- ...options,
2105
- ...mapVercelParams(params),
2106
- posthogProperties: {
2107
- ...options.posthogProperties,
2108
- $ai_framework: 'vercel'
2109
- }
2229
+ ...mergedOptions,
2230
+ ...mapVercelParams(params)
2110
2231
  };
2111
2232
  const availableTools = extractAvailableToolCalls('vercel', params);
2112
2233
  try {
2113
- const result = await doGenerate();
2114
- const modelId = options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
2115
- const provider = options.posthogProviderOverride ?? extractProvider(model);
2234
+ const result = await model.doGenerate(params);
2235
+ const modelId = mergedOptions.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
2236
+ const provider = mergedOptions.posthogProviderOverride ?? extractProvider(model);
2116
2237
  const baseURL = ''; // cannot currently get baseURL from vercel
2117
2238
  const content = mapVercelOutput(result.content);
2118
2239
  const latency = (Date.now() - startTime) / 1000;
2119
2240
  const providerMetadata = result.providerMetadata;
2120
- const additionalTokenValues = {
2121
- ...(providerMetadata?.anthropic ? {
2122
- cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
2123
- } : {})
2124
- };
2125
- // Calculate web search count based on provider
2126
- let webSearchCount = 0;
2127
- if (providerMetadata?.anthropic && typeof providerMetadata.anthropic === 'object' && 'server_tool_use' in providerMetadata.anthropic) {
2128
- // Anthropic-specific extraction
2129
- const serverToolUse = providerMetadata.anthropic.server_tool_use;
2130
- if (serverToolUse && typeof serverToolUse === 'object' && 'web_search_requests' in serverToolUse && typeof serverToolUse.web_search_requests === 'number') {
2131
- webSearchCount = serverToolUse.web_search_requests;
2132
- }
2133
- } else {
2134
- // For other providers through Vercel, pass available metadata to helper
2135
- // Note: Vercel abstracts provider responses, so we may not have access to
2136
- // raw citations/annotations unless Vercel exposes them in usage/metadata
2137
- webSearchCount = calculateWebSearchCount({
2138
- usage: result.usage,
2139
- providerMetadata: providerMetadata
2140
- });
2141
- }
2241
+ const additionalTokenValues = extractAdditionalTokenValues(providerMetadata);
2242
+ const webSearchCount = extractWebSearchCount(providerMetadata, result.usage);
2243
+ // V2 usage has simple numbers, V3 has objects with .total - normalize both
2244
+ const usageObj = result.usage;
2142
2245
  const usage = {
2143
- inputTokens: result.usage.inputTokens,
2144
- outputTokens: result.usage.outputTokens,
2145
- reasoningTokens: result.usage.reasoningTokens,
2146
- cacheReadInputTokens: result.usage.cachedInputTokens,
2246
+ inputTokens: extractTokenCount(result.usage.inputTokens),
2247
+ outputTokens: extractTokenCount(result.usage.outputTokens),
2248
+ reasoningTokens: extractReasoningTokens(usageObj),
2249
+ cacheReadInputTokens: extractCacheReadTokens(usageObj),
2147
2250
  webSearchCount,
2148
2251
  ...additionalTokenValues
2149
2252
  };
2150
2253
  await sendEventToPosthog({
2151
2254
  client: phClient,
2152
- distinctId: options.posthogDistinctId,
2153
- traceId: options.posthogTraceId ?? uuid.v4(),
2255
+ distinctId: mergedOptions.posthogDistinctId,
2256
+ traceId: mergedOptions.posthogTraceId ?? uuid.v4(),
2154
2257
  model: modelId,
2155
2258
  provider: provider,
2156
- input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
2259
+ input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
2157
2260
  output: content,
2158
2261
  latency,
2159
2262
  baseURL,
@@ -2161,18 +2264,18 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
2161
2264
  httpStatus: 200,
2162
2265
  usage,
2163
2266
  tools: availableTools,
2164
- captureImmediate: options.posthogCaptureImmediate
2267
+ captureImmediate: mergedOptions.posthogCaptureImmediate
2165
2268
  });
2166
2269
  return result;
2167
2270
  } catch (error) {
2168
2271
  const modelId = model.modelId;
2169
2272
  await sendEventToPosthog({
2170
2273
  client: phClient,
2171
- distinctId: options.posthogDistinctId,
2172
- traceId: options.posthogTraceId ?? uuid.v4(),
2274
+ distinctId: mergedOptions.posthogDistinctId,
2275
+ traceId: mergedOptions.posthogTraceId ?? uuid.v4(),
2173
2276
  model: modelId,
2174
2277
  provider: model.provider,
2175
- input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
2278
+ input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
2176
2279
  output: [],
2177
2280
  latency: 0,
2178
2281
  baseURL: '',
@@ -2185,30 +2288,23 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
2185
2288
  isError: true,
2186
2289
  error: truncate(JSON.stringify(error)),
2187
2290
  tools: availableTools,
2188
- captureImmediate: options.posthogCaptureImmediate
2291
+ captureImmediate: mergedOptions.posthogCaptureImmediate
2189
2292
  });
2190
2293
  throw error;
2191
2294
  }
2192
2295
  },
2193
- wrapStream: async ({
2194
- doStream,
2195
- params
2196
- }) => {
2296
+ doStream: async params => {
2197
2297
  const startTime = Date.now();
2198
2298
  let generatedText = '';
2199
2299
  let reasoningText = '';
2200
2300
  let usage = {};
2201
2301
  let providerMetadata = undefined;
2202
2302
  const mergedParams = {
2203
- ...options,
2204
- ...mapVercelParams(params),
2205
- posthogProperties: {
2206
- ...options.posthogProperties,
2207
- $ai_framework: 'vercel'
2208
- }
2303
+ ...mergedOptions,
2304
+ ...mapVercelParams(params)
2209
2305
  };
2210
- const modelId = options.posthogModelOverride ?? model.modelId;
2211
- const provider = options.posthogProviderOverride ?? extractProvider(model);
2306
+ const modelId = mergedOptions.posthogModelOverride ?? model.modelId;
2307
+ const provider = mergedOptions.posthogProviderOverride ?? extractProvider(model);
2212
2308
  const availableTools = extractAvailableToolCalls('vercel', params);
2213
2309
  const baseURL = ''; // cannot currently get baseURL from vercel
2214
2310
  // Map to track in-progress tool calls
@@ -2217,15 +2313,15 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
2217
2313
  const {
2218
2314
  stream,
2219
2315
  ...rest
2220
- } = await doStream();
2316
+ } = await model.doStream(params);
2221
2317
  const transformStream = new TransformStream({
2222
2318
  transform(chunk, controller) {
2223
- // Handle new v5 streaming patterns
2319
+ // Handle streaming patterns - compatible with both V2 and V3
2224
2320
  if (chunk.type === 'text-delta') {
2225
2321
  generatedText += chunk.delta;
2226
2322
  }
2227
2323
  if (chunk.type === 'reasoning-delta') {
2228
- reasoningText += chunk.delta; // New in v5
2324
+ reasoningText += chunk.delta;
2229
2325
  }
2230
2326
  // Handle tool call chunks
2231
2327
  if (chunk.type === 'tool-input-start') {
@@ -2245,7 +2341,6 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
2245
2341
  }
2246
2342
  if (chunk.type === 'tool-input-end') {
2247
2343
  // Tool call is complete, keep it in the map for final processing
2248
- // Nothing specific to do here, the tool call is already complete
2249
2344
  }
2250
2345
  if (chunk.type === 'tool-call') {
2251
2346
  // Direct tool call chunk (complete tool call)
@@ -2257,14 +2352,13 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
2257
2352
  }
2258
2353
  if (chunk.type === 'finish') {
2259
2354
  providerMetadata = chunk.providerMetadata;
2260
- const additionalTokenValues = providerMetadata && typeof providerMetadata === 'object' && 'anthropic' in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === 'object' && 'cacheCreationInputTokens' in providerMetadata.anthropic ? {
2261
- cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
2262
- } : {};
2355
+ const additionalTokenValues = extractAdditionalTokenValues(providerMetadata);
2356
+ const chunkUsage = chunk.usage || {};
2263
2357
  usage = {
2264
- inputTokens: chunk.usage?.inputTokens,
2265
- outputTokens: chunk.usage?.outputTokens,
2266
- reasoningTokens: chunk.usage?.reasoningTokens,
2267
- cacheReadInputTokens: chunk.usage?.cachedInputTokens,
2358
+ inputTokens: extractTokenCount(chunk.usage?.inputTokens),
2359
+ outputTokens: extractTokenCount(chunk.usage?.outputTokens),
2360
+ reasoningTokens: extractReasoningTokens(chunkUsage),
2361
+ cacheReadInputTokens: extractCacheReadTokens(chunkUsage),
2268
2362
  ...additionalTokenValues
2269
2363
  };
2270
2364
  }
@@ -2304,23 +2398,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
2304
2398
  role: 'assistant',
2305
2399
  content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
2306
2400
  }] : [];
2307
- // Calculate web search count based on provider
2308
- let webSearchCount = 0;
2309
- if (providerMetadata && typeof providerMetadata === 'object' && 'anthropic' in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === 'object' && 'server_tool_use' in providerMetadata.anthropic) {
2310
- // Anthropic-specific extraction
2311
- const serverToolUse = providerMetadata.anthropic.server_tool_use;
2312
- if (serverToolUse && typeof serverToolUse === 'object' && 'web_search_requests' in serverToolUse && typeof serverToolUse.web_search_requests === 'number') {
2313
- webSearchCount = serverToolUse.web_search_requests;
2314
- }
2315
- } else {
2316
- // For other providers through Vercel, pass available metadata to helper
2317
- // Note: Vercel abstracts provider responses, so we may not have access to
2318
- // raw citations/annotations unless Vercel exposes them in usage/metadata
2319
- webSearchCount = calculateWebSearchCount({
2320
- usage: usage,
2321
- providerMetadata: providerMetadata
2322
- });
2323
- }
2401
+ const webSearchCount = extractWebSearchCount(providerMetadata, usage);
2324
2402
  // Update usage with web search count
2325
2403
  const finalUsage = {
2326
2404
  ...usage,
@@ -2328,11 +2406,11 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
2328
2406
  };
2329
2407
  await sendEventToPosthog({
2330
2408
  client: phClient,
2331
- distinctId: options.posthogDistinctId,
2332
- traceId: options.posthogTraceId ?? uuid.v4(),
2409
+ distinctId: mergedOptions.posthogDistinctId,
2410
+ traceId: mergedOptions.posthogTraceId ?? uuid.v4(),
2333
2411
  model: modelId,
2334
2412
  provider: provider,
2335
- input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
2413
+ input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
2336
2414
  output: output,
2337
2415
  latency,
2338
2416
  baseURL,
@@ -2340,7 +2418,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
2340
2418
  httpStatus: 200,
2341
2419
  usage: finalUsage,
2342
2420
  tools: availableTools,
2343
- captureImmediate: options.posthogCaptureImmediate
2421
+ captureImmediate: mergedOptions.posthogCaptureImmediate
2344
2422
  });
2345
2423
  }
2346
2424
  });
@@ -2351,11 +2429,11 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
2351
2429
  } catch (error) {
2352
2430
  await sendEventToPosthog({
2353
2431
  client: phClient,
2354
- distinctId: options.posthogDistinctId,
2355
- traceId: options.posthogTraceId ?? uuid.v4(),
2432
+ distinctId: mergedOptions.posthogDistinctId,
2433
+ traceId: mergedOptions.posthogTraceId ?? uuid.v4(),
2356
2434
  model: modelId,
2357
2435
  provider: provider,
2358
- input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
2436
+ input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
2359
2437
  output: [],
2360
2438
  latency: 0,
2361
2439
  baseURL: '',
@@ -2368,25 +2446,12 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
2368
2446
  isError: true,
2369
2447
  error: truncate(JSON.stringify(error)),
2370
2448
  tools: availableTools,
2371
- captureImmediate: options.posthogCaptureImmediate
2449
+ captureImmediate: mergedOptions.posthogCaptureImmediate
2372
2450
  });
2373
2451
  throw error;
2374
2452
  }
2375
2453
  }
2376
2454
  };
2377
- return middleware;
2378
- };
2379
- const wrapVercelLanguageModel = (model, phClient, options) => {
2380
- const traceId = options.posthogTraceId ?? uuid.v4();
2381
- const middleware = createInstrumentationMiddleware(phClient, model, {
2382
- ...options,
2383
- posthogTraceId: traceId,
2384
- posthogDistinctId: options.posthogDistinctId
2385
- });
2386
- const wrappedModel = ai.wrapLanguageModel({
2387
- model,
2388
- middleware
2389
- });
2390
2455
  return wrappedModel;
2391
2456
  };
2392
2457
 
@@ -2810,6 +2875,39 @@ class WrappedModels {
2810
2875
  throw error;
2811
2876
  }
2812
2877
  }
2878
+ formatPartsAsContentBlocks(parts) {
2879
+ const blocks = [];
2880
+ for (const part of parts) {
2881
+ // Handle dict/object with text field
2882
+ if (part && typeof part === 'object' && 'text' in part && part.text) {
2883
+ blocks.push({
2884
+ type: 'text',
2885
+ text: String(part.text)
2886
+ });
2887
+ }
2888
+ // Handle string parts
2889
+ else if (typeof part === 'string') {
2890
+ blocks.push({
2891
+ type: 'text',
2892
+ text: part
2893
+ });
2894
+ }
2895
+ // Handle inlineData (images, audio, PDFs)
2896
+ else if (part && typeof part === 'object' && 'inlineData' in part) {
2897
+ const inlineData = part.inlineData;
2898
+ const mimeType = inlineData.mimeType || inlineData.mime_type || '';
2899
+ const contentType = mimeType.startsWith('image/') ? 'image' : 'document';
2900
+ blocks.push({
2901
+ type: contentType,
2902
+ inline_data: {
2903
+ data: inlineData.data,
2904
+ mime_type: mimeType
2905
+ }
2906
+ });
2907
+ }
2908
+ }
2909
+ return blocks;
2910
+ }
2813
2911
  formatInput(contents) {
2814
2912
  if (typeof contents === 'string') {
2815
2913
  return [{
@@ -2834,20 +2932,24 @@ class WrappedModels {
2834
2932
  };
2835
2933
  }
2836
2934
  if ('content' in obj && obj.content) {
2935
+ // If content is a list, format it as content blocks
2936
+ if (Array.isArray(obj.content)) {
2937
+ const contentBlocks = this.formatPartsAsContentBlocks(obj.content);
2938
+ return {
2939
+ role: isString(obj.role) ? obj.role : 'user',
2940
+ content: contentBlocks
2941
+ };
2942
+ }
2837
2943
  return {
2838
2944
  role: isString(obj.role) ? obj.role : 'user',
2839
2945
  content: obj.content
2840
2946
  };
2841
2947
  }
2842
2948
  if ('parts' in obj && Array.isArray(obj.parts)) {
2949
+ const contentBlocks = this.formatPartsAsContentBlocks(obj.parts);
2843
2950
  return {
2844
2951
  role: isString(obj.role) ? obj.role : 'user',
2845
- content: obj.parts.map(part => {
2846
- if (part && typeof part === 'object' && 'text' in part) {
2847
- return part.text;
2848
- }
2849
- return part;
2850
- })
2952
+ content: contentBlocks
2851
2953
  };
2852
2954
  }
2853
2955
  }
@@ -3425,7 +3527,7 @@ var BaseCallbackHandler = class extends BaseCallbackHandlerMethodsClass {
3425
3527
  }
3426
3528
  static fromMethods(methods) {
3427
3529
  class Handler extends BaseCallbackHandler {
3428
- name = uuid__namespace.v4();
3530
+ name = uuid__namespace.v7();
3429
3531
  constructor() {
3430
3532
  super();
3431
3533
  Object.assign(this, methods);