@posthog/ai 7.2.2 → 7.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.cjs +1 -1
- package/dist/anthropic/index.cjs.map +1 -1
- package/dist/anthropic/index.mjs +1 -1
- package/dist/anthropic/index.mjs.map +1 -1
- package/dist/gemini/index.cjs +1 -1
- package/dist/gemini/index.cjs.map +1 -1
- package/dist/gemini/index.d.ts +2 -7
- package/dist/gemini/index.mjs +1 -1
- package/dist/gemini/index.mjs.map +1 -1
- package/dist/index.cjs +167 -141
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +9 -9
- package/dist/index.mjs +167 -141
- package/dist/index.mjs.map +1 -1
- package/dist/langchain/index.cjs +1 -1
- package/dist/langchain/index.cjs.map +1 -1
- package/dist/langchain/index.mjs +1 -1
- package/dist/langchain/index.mjs.map +1 -1
- package/dist/openai/index.cjs +21 -14
- package/dist/openai/index.cjs.map +1 -1
- package/dist/openai/index.mjs +21 -14
- package/dist/openai/index.mjs.map +1 -1
- package/dist/vercel/index.cjs +131 -113
- package/dist/vercel/index.cjs.map +1 -1
- package/dist/vercel/index.d.ts +7 -2
- package/dist/vercel/index.mjs +131 -113
- package/dist/vercel/index.mjs.map +1 -1
- package/package.json +11 -6
package/dist/index.cjs
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
var openai = require('openai');
|
|
4
4
|
var buffer = require('buffer');
|
|
5
5
|
var uuid = require('uuid');
|
|
6
|
-
var ai = require('ai');
|
|
7
6
|
var AnthropicOriginal = require('@anthropic-ai/sdk');
|
|
8
7
|
var genai = require('@google/genai');
|
|
9
8
|
|
|
@@ -26,7 +25,7 @@ function _interopNamespaceDefault(e) {
|
|
|
26
25
|
|
|
27
26
|
var uuid__namespace = /*#__PURE__*/_interopNamespaceDefault(uuid);
|
|
28
27
|
|
|
29
|
-
var version = "7.
|
|
28
|
+
var version = "7.3.1";
|
|
30
29
|
|
|
31
30
|
// Type guards for safer type checking
|
|
32
31
|
const isString = value => {
|
|
@@ -879,6 +878,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
879
878
|
try {
|
|
880
879
|
const contentBlocks = [];
|
|
881
880
|
let accumulatedContent = '';
|
|
881
|
+
let modelFromResponse;
|
|
882
882
|
let usage = {
|
|
883
883
|
inputTokens: 0,
|
|
884
884
|
outputTokens: 0,
|
|
@@ -887,6 +887,10 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
887
887
|
// Map to track in-progress tool calls
|
|
888
888
|
const toolCallsInProgress = new Map();
|
|
889
889
|
for await (const chunk of stream1) {
|
|
890
|
+
// Extract model from chunk (Chat Completions chunks have model field)
|
|
891
|
+
if (!modelFromResponse && chunk.model) {
|
|
892
|
+
modelFromResponse = chunk.model;
|
|
893
|
+
}
|
|
890
894
|
const choice = chunk?.choices?.[0];
|
|
891
895
|
const chunkWebSearchCount = calculateWebSearchCount(chunk);
|
|
892
896
|
if (chunkWebSearchCount > 0 && chunkWebSearchCount > (usage.webSearchCount ?? 0)) {
|
|
@@ -974,7 +978,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
974
978
|
await sendEventToPosthog({
|
|
975
979
|
client: this.phClient,
|
|
976
980
|
...posthogParams,
|
|
977
|
-
model: openAIParams.model,
|
|
981
|
+
model: openAIParams.model ?? modelFromResponse,
|
|
978
982
|
provider: 'openai',
|
|
979
983
|
input: sanitizeOpenAI(openAIParams.messages),
|
|
980
984
|
output: formattedOutput,
|
|
@@ -1027,7 +1031,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
1027
1031
|
await sendEventToPosthog({
|
|
1028
1032
|
client: this.phClient,
|
|
1029
1033
|
...posthogParams,
|
|
1030
|
-
model: openAIParams.model,
|
|
1034
|
+
model: openAIParams.model ?? result.model,
|
|
1031
1035
|
provider: 'openai',
|
|
1032
1036
|
input: sanitizeOpenAI(openAIParams.messages),
|
|
1033
1037
|
output: formattedOutput,
|
|
@@ -1051,7 +1055,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
1051
1055
|
await sendEventToPosthog({
|
|
1052
1056
|
client: this.phClient,
|
|
1053
1057
|
...posthogParams,
|
|
1054
|
-
model:
|
|
1058
|
+
model: openAIParams.model,
|
|
1055
1059
|
provider: 'openai',
|
|
1056
1060
|
input: sanitizeOpenAI(openAIParams.messages),
|
|
1057
1061
|
output: [],
|
|
@@ -1093,6 +1097,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1093
1097
|
(async () => {
|
|
1094
1098
|
try {
|
|
1095
1099
|
let finalContent = [];
|
|
1100
|
+
let modelFromResponse;
|
|
1096
1101
|
let usage = {
|
|
1097
1102
|
inputTokens: 0,
|
|
1098
1103
|
outputTokens: 0,
|
|
@@ -1100,6 +1105,10 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1100
1105
|
};
|
|
1101
1106
|
for await (const chunk of stream1) {
|
|
1102
1107
|
if ('response' in chunk && chunk.response) {
|
|
1108
|
+
// Extract model from response object in chunk (for stored prompts)
|
|
1109
|
+
if (!modelFromResponse && chunk.response.model) {
|
|
1110
|
+
modelFromResponse = chunk.response.model;
|
|
1111
|
+
}
|
|
1103
1112
|
const chunkWebSearchCount = calculateWebSearchCount(chunk.response);
|
|
1104
1113
|
if (chunkWebSearchCount > 0 && chunkWebSearchCount > (usage.webSearchCount ?? 0)) {
|
|
1105
1114
|
usage.webSearchCount = chunkWebSearchCount;
|
|
@@ -1123,8 +1132,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1123
1132
|
await sendEventToPosthog({
|
|
1124
1133
|
client: this.phClient,
|
|
1125
1134
|
...posthogParams,
|
|
1126
|
-
|
|
1127
|
-
model: openAIParams.model,
|
|
1135
|
+
model: openAIParams.model ?? modelFromResponse,
|
|
1128
1136
|
provider: 'openai',
|
|
1129
1137
|
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1130
1138
|
output: finalContent,
|
|
@@ -1146,7 +1154,6 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1146
1154
|
await sendEventToPosthog({
|
|
1147
1155
|
client: this.phClient,
|
|
1148
1156
|
...posthogParams,
|
|
1149
|
-
//@ts-expect-error
|
|
1150
1157
|
model: openAIParams.model,
|
|
1151
1158
|
provider: 'openai',
|
|
1152
1159
|
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
@@ -1179,8 +1186,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1179
1186
|
await sendEventToPosthog({
|
|
1180
1187
|
client: this.phClient,
|
|
1181
1188
|
...posthogParams,
|
|
1182
|
-
|
|
1183
|
-
model: openAIParams.model,
|
|
1189
|
+
model: openAIParams.model ?? result.model,
|
|
1184
1190
|
provider: 'openai',
|
|
1185
1191
|
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1186
1192
|
output: formattedOutput,
|
|
@@ -1204,7 +1210,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1204
1210
|
await sendEventToPosthog({
|
|
1205
1211
|
client: this.phClient,
|
|
1206
1212
|
...posthogParams,
|
|
1207
|
-
model:
|
|
1213
|
+
model: openAIParams.model,
|
|
1208
1214
|
provider: 'openai',
|
|
1209
1215
|
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1210
1216
|
output: [],
|
|
@@ -1241,7 +1247,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1241
1247
|
await sendEventToPosthog({
|
|
1242
1248
|
client: this.phClient,
|
|
1243
1249
|
...posthogParams,
|
|
1244
|
-
model:
|
|
1250
|
+
model: openAIParams.model ?? result.model,
|
|
1245
1251
|
provider: 'openai',
|
|
1246
1252
|
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1247
1253
|
output: result.output,
|
|
@@ -1262,7 +1268,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1262
1268
|
await sendEventToPosthog({
|
|
1263
1269
|
client: this.phClient,
|
|
1264
1270
|
...posthogParams,
|
|
1265
|
-
model:
|
|
1271
|
+
model: openAIParams.model,
|
|
1266
1272
|
provider: 'openai',
|
|
1267
1273
|
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1268
1274
|
output: [],
|
|
@@ -1437,7 +1443,7 @@ class WrappedTranscriptions extends Transcriptions {
|
|
|
1437
1443
|
await sendEventToPosthog({
|
|
1438
1444
|
client: this.phClient,
|
|
1439
1445
|
...posthogParams,
|
|
1440
|
-
model:
|
|
1446
|
+
model: openAIParams.model,
|
|
1441
1447
|
provider: 'openai',
|
|
1442
1448
|
input: openAIParams.prompt,
|
|
1443
1449
|
output: result.text,
|
|
@@ -1457,7 +1463,7 @@ class WrappedTranscriptions extends Transcriptions {
|
|
|
1457
1463
|
await sendEventToPosthog({
|
|
1458
1464
|
client: this.phClient,
|
|
1459
1465
|
...posthogParams,
|
|
1460
|
-
model:
|
|
1466
|
+
model: openAIParams.model,
|
|
1461
1467
|
provider: 'openai',
|
|
1462
1468
|
input: openAIParams.prompt,
|
|
1463
1469
|
output: [],
|
|
@@ -1519,6 +1525,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
|
|
|
1519
1525
|
try {
|
|
1520
1526
|
const contentBlocks = [];
|
|
1521
1527
|
let accumulatedContent = '';
|
|
1528
|
+
let modelFromResponse;
|
|
1522
1529
|
let usage = {
|
|
1523
1530
|
inputTokens: 0,
|
|
1524
1531
|
outputTokens: 0
|
|
@@ -1526,6 +1533,10 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
|
|
|
1526
1533
|
// Map to track in-progress tool calls
|
|
1527
1534
|
const toolCallsInProgress = new Map();
|
|
1528
1535
|
for await (const chunk of stream1) {
|
|
1536
|
+
// Extract model from response if not in params
|
|
1537
|
+
if (!modelFromResponse && chunk.model) {
|
|
1538
|
+
modelFromResponse = chunk.model;
|
|
1539
|
+
}
|
|
1529
1540
|
const choice = chunk?.choices?.[0];
|
|
1530
1541
|
// Handle text content
|
|
1531
1542
|
const deltaContent = choice?.delta?.content;
|
|
@@ -1607,7 +1618,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
|
|
|
1607
1618
|
await sendEventToPosthog({
|
|
1608
1619
|
client: this.phClient,
|
|
1609
1620
|
...posthogParams,
|
|
1610
|
-
model: openAIParams.model,
|
|
1621
|
+
model: openAIParams.model ?? modelFromResponse,
|
|
1611
1622
|
provider: 'azure',
|
|
1612
1623
|
input: sanitizeOpenAI(openAIParams.messages),
|
|
1613
1624
|
output: formattedOutput,
|
|
@@ -1651,7 +1662,7 @@ class WrappedCompletions extends openai.AzureOpenAI.Chat.Completions {
|
|
|
1651
1662
|
await sendEventToPosthog({
|
|
1652
1663
|
client: this.phClient,
|
|
1653
1664
|
...posthogParams,
|
|
1654
|
-
model: openAIParams.model,
|
|
1665
|
+
model: openAIParams.model ?? result.model,
|
|
1655
1666
|
provider: 'azure',
|
|
1656
1667
|
input: openAIParams.messages,
|
|
1657
1668
|
output: formatResponseOpenAI(result),
|
|
@@ -1715,11 +1726,18 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
|
|
|
1715
1726
|
(async () => {
|
|
1716
1727
|
try {
|
|
1717
1728
|
let finalContent = [];
|
|
1729
|
+
let modelFromResponse;
|
|
1718
1730
|
let usage = {
|
|
1719
1731
|
inputTokens: 0,
|
|
1720
1732
|
outputTokens: 0
|
|
1721
1733
|
};
|
|
1722
1734
|
for await (const chunk of stream1) {
|
|
1735
|
+
if ('response' in chunk && chunk.response) {
|
|
1736
|
+
// Extract model from response if not in params (for stored prompts)
|
|
1737
|
+
if (!modelFromResponse && chunk.response.model) {
|
|
1738
|
+
modelFromResponse = chunk.response.model;
|
|
1739
|
+
}
|
|
1740
|
+
}
|
|
1723
1741
|
if (chunk.type === 'response.completed' && 'response' in chunk && chunk.response?.output && chunk.response.output.length > 0) {
|
|
1724
1742
|
finalContent = chunk.response.output;
|
|
1725
1743
|
}
|
|
@@ -1736,10 +1754,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
|
|
|
1736
1754
|
await sendEventToPosthog({
|
|
1737
1755
|
client: this.phClient,
|
|
1738
1756
|
...posthogParams,
|
|
1739
|
-
|
|
1740
|
-
model: openAIParams.model,
|
|
1757
|
+
model: openAIParams.model ?? modelFromResponse,
|
|
1741
1758
|
provider: 'azure',
|
|
1742
|
-
input: openAIParams.input,
|
|
1759
|
+
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1743
1760
|
output: finalContent,
|
|
1744
1761
|
latency,
|
|
1745
1762
|
baseURL: this.baseURL,
|
|
@@ -1752,10 +1769,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
|
|
|
1752
1769
|
await sendEventToPosthog({
|
|
1753
1770
|
client: this.phClient,
|
|
1754
1771
|
...posthogParams,
|
|
1755
|
-
//@ts-expect-error
|
|
1756
1772
|
model: openAIParams.model,
|
|
1757
1773
|
provider: 'azure',
|
|
1758
|
-
input: openAIParams.input,
|
|
1774
|
+
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1759
1775
|
output: [],
|
|
1760
1776
|
latency: 0,
|
|
1761
1777
|
baseURL: this.baseURL,
|
|
@@ -1781,10 +1797,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
|
|
|
1781
1797
|
await sendEventToPosthog({
|
|
1782
1798
|
client: this.phClient,
|
|
1783
1799
|
...posthogParams,
|
|
1784
|
-
|
|
1785
|
-
model: openAIParams.model,
|
|
1800
|
+
model: openAIParams.model ?? result.model,
|
|
1786
1801
|
provider: 'azure',
|
|
1787
|
-
input: openAIParams.input,
|
|
1802
|
+
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1788
1803
|
output: result.output,
|
|
1789
1804
|
latency,
|
|
1790
1805
|
baseURL: this.baseURL,
|
|
@@ -1804,10 +1819,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
|
|
|
1804
1819
|
await sendEventToPosthog({
|
|
1805
1820
|
client: this.phClient,
|
|
1806
1821
|
...posthogParams,
|
|
1807
|
-
//@ts-expect-error
|
|
1808
1822
|
model: openAIParams.model,
|
|
1809
1823
|
provider: 'azure',
|
|
1810
|
-
input: openAIParams.input,
|
|
1824
|
+
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1811
1825
|
output: [],
|
|
1812
1826
|
latency: 0,
|
|
1813
1827
|
baseURL: this.baseURL,
|
|
@@ -1837,9 +1851,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
|
|
|
1837
1851
|
await sendEventToPosthog({
|
|
1838
1852
|
client: this.phClient,
|
|
1839
1853
|
...posthogParams,
|
|
1840
|
-
model:
|
|
1854
|
+
model: openAIParams.model ?? result.model,
|
|
1841
1855
|
provider: 'azure',
|
|
1842
|
-
input: openAIParams.input,
|
|
1856
|
+
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1843
1857
|
output: result.output,
|
|
1844
1858
|
latency,
|
|
1845
1859
|
baseURL: this.baseURL,
|
|
@@ -1857,9 +1871,9 @@ class WrappedResponses extends openai.AzureOpenAI.Responses {
|
|
|
1857
1871
|
await sendEventToPosthog({
|
|
1858
1872
|
client: this.phClient,
|
|
1859
1873
|
...posthogParams,
|
|
1860
|
-
model:
|
|
1874
|
+
model: openAIParams.model,
|
|
1861
1875
|
provider: 'azure',
|
|
1862
|
-
input: openAIParams.input,
|
|
1876
|
+
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1863
1877
|
output: [],
|
|
1864
1878
|
latency: 0,
|
|
1865
1879
|
baseURL: this.baseURL,
|
|
@@ -2132,67 +2146,117 @@ const extractProvider = model => {
|
|
|
2132
2146
|
const providerName = provider.split('.')[0];
|
|
2133
2147
|
return providerName;
|
|
2134
2148
|
};
|
|
2135
|
-
|
|
2136
|
-
|
|
2137
|
-
|
|
2138
|
-
|
|
2139
|
-
|
|
2140
|
-
|
|
2149
|
+
// Extract web search count from provider metadata (works for both V2 and V3)
|
|
2150
|
+
const extractWebSearchCount = (providerMetadata, usage) => {
|
|
2151
|
+
// Try Anthropic-specific extraction
|
|
2152
|
+
if (providerMetadata && typeof providerMetadata === 'object' && 'anthropic' in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === 'object' && 'server_tool_use' in providerMetadata.anthropic) {
|
|
2153
|
+
const serverToolUse = providerMetadata.anthropic.server_tool_use;
|
|
2154
|
+
if (serverToolUse && typeof serverToolUse === 'object' && 'web_search_requests' in serverToolUse && typeof serverToolUse.web_search_requests === 'number') {
|
|
2155
|
+
return serverToolUse.web_search_requests;
|
|
2156
|
+
}
|
|
2157
|
+
}
|
|
2158
|
+
// Fall back to generic calculation
|
|
2159
|
+
return calculateWebSearchCount({
|
|
2160
|
+
usage,
|
|
2161
|
+
providerMetadata
|
|
2162
|
+
});
|
|
2163
|
+
};
|
|
2164
|
+
// Extract additional token values from provider metadata
|
|
2165
|
+
const extractAdditionalTokenValues = providerMetadata => {
|
|
2166
|
+
if (providerMetadata && typeof providerMetadata === 'object' && 'anthropic' in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === 'object' && 'cacheCreationInputTokens' in providerMetadata.anthropic) {
|
|
2167
|
+
return {
|
|
2168
|
+
cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
|
|
2169
|
+
};
|
|
2170
|
+
}
|
|
2171
|
+
return {};
|
|
2172
|
+
};
|
|
2173
|
+
// Helper to extract numeric token value from V2 (number) or V3 (object with .total) usage formats
|
|
2174
|
+
const extractTokenCount = value => {
|
|
2175
|
+
if (typeof value === 'number') {
|
|
2176
|
+
return value;
|
|
2177
|
+
}
|
|
2178
|
+
if (value && typeof value === 'object' && 'total' in value && typeof value.total === 'number') {
|
|
2179
|
+
return value.total;
|
|
2180
|
+
}
|
|
2181
|
+
return undefined;
|
|
2182
|
+
};
|
|
2183
|
+
// Helper to extract reasoning tokens from V2 (usage.reasoningTokens) or V3 (usage.outputTokens.reasoning)
|
|
2184
|
+
const extractReasoningTokens = usage => {
|
|
2185
|
+
// V2 style: top-level reasoningTokens
|
|
2186
|
+
if ('reasoningTokens' in usage) {
|
|
2187
|
+
return usage.reasoningTokens;
|
|
2188
|
+
}
|
|
2189
|
+
// V3 style: nested in outputTokens.reasoning
|
|
2190
|
+
if ('outputTokens' in usage && usage.outputTokens && typeof usage.outputTokens === 'object' && 'reasoning' in usage.outputTokens) {
|
|
2191
|
+
return usage.outputTokens.reasoning;
|
|
2192
|
+
}
|
|
2193
|
+
return undefined;
|
|
2194
|
+
};
|
|
2195
|
+
// Helper to extract cached input tokens from V2 (usage.cachedInputTokens) or V3 (usage.inputTokens.cacheRead)
|
|
2196
|
+
const extractCacheReadTokens = usage => {
|
|
2197
|
+
// V2 style: top-level cachedInputTokens
|
|
2198
|
+
if ('cachedInputTokens' in usage) {
|
|
2199
|
+
return usage.cachedInputTokens;
|
|
2200
|
+
}
|
|
2201
|
+
// V3 style: nested in inputTokens.cacheRead
|
|
2202
|
+
if ('inputTokens' in usage && usage.inputTokens && typeof usage.inputTokens === 'object' && 'cacheRead' in usage.inputTokens) {
|
|
2203
|
+
return usage.inputTokens.cacheRead;
|
|
2204
|
+
}
|
|
2205
|
+
return undefined;
|
|
2206
|
+
};
|
|
2207
|
+
/**
|
|
2208
|
+
* Wraps a Vercel AI SDK language model (V2 or V3) with PostHog tracing.
|
|
2209
|
+
* Automatically detects the model version and applies appropriate instrumentation.
|
|
2210
|
+
*/
|
|
2211
|
+
const wrapVercelLanguageModel = (model, phClient, options) => {
|
|
2212
|
+
const traceId = options.posthogTraceId ?? uuid.v4();
|
|
2213
|
+
const mergedOptions = {
|
|
2214
|
+
...options,
|
|
2215
|
+
posthogTraceId: traceId,
|
|
2216
|
+
posthogDistinctId: options.posthogDistinctId,
|
|
2217
|
+
posthogProperties: {
|
|
2218
|
+
...options.posthogProperties,
|
|
2219
|
+
$ai_framework: 'vercel',
|
|
2220
|
+
$ai_framework_version: model.specificationVersion === 'v3' ? '6' : '5'
|
|
2221
|
+
}
|
|
2222
|
+
};
|
|
2223
|
+
// Create wrapped model that preserves the original type
|
|
2224
|
+
const wrappedModel = {
|
|
2225
|
+
...model,
|
|
2226
|
+
doGenerate: async params => {
|
|
2141
2227
|
const startTime = Date.now();
|
|
2142
2228
|
const mergedParams = {
|
|
2143
|
-
...
|
|
2144
|
-
...mapVercelParams(params)
|
|
2145
|
-
posthogProperties: {
|
|
2146
|
-
...options.posthogProperties,
|
|
2147
|
-
$ai_framework: 'vercel'
|
|
2148
|
-
}
|
|
2229
|
+
...mergedOptions,
|
|
2230
|
+
...mapVercelParams(params)
|
|
2149
2231
|
};
|
|
2150
2232
|
const availableTools = extractAvailableToolCalls('vercel', params);
|
|
2151
2233
|
try {
|
|
2152
|
-
const result = await doGenerate();
|
|
2153
|
-
const modelId =
|
|
2154
|
-
const provider =
|
|
2234
|
+
const result = await model.doGenerate(params);
|
|
2235
|
+
const modelId = mergedOptions.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
|
|
2236
|
+
const provider = mergedOptions.posthogProviderOverride ?? extractProvider(model);
|
|
2155
2237
|
const baseURL = ''; // cannot currently get baseURL from vercel
|
|
2156
2238
|
const content = mapVercelOutput(result.content);
|
|
2157
2239
|
const latency = (Date.now() - startTime) / 1000;
|
|
2158
2240
|
const providerMetadata = result.providerMetadata;
|
|
2159
|
-
const additionalTokenValues =
|
|
2160
|
-
|
|
2161
|
-
|
|
2162
|
-
|
|
2163
|
-
};
|
|
2164
|
-
// Calculate web search count based on provider
|
|
2165
|
-
let webSearchCount = 0;
|
|
2166
|
-
if (providerMetadata?.anthropic && typeof providerMetadata.anthropic === 'object' && 'server_tool_use' in providerMetadata.anthropic) {
|
|
2167
|
-
// Anthropic-specific extraction
|
|
2168
|
-
const serverToolUse = providerMetadata.anthropic.server_tool_use;
|
|
2169
|
-
if (serverToolUse && typeof serverToolUse === 'object' && 'web_search_requests' in serverToolUse && typeof serverToolUse.web_search_requests === 'number') {
|
|
2170
|
-
webSearchCount = serverToolUse.web_search_requests;
|
|
2171
|
-
}
|
|
2172
|
-
} else {
|
|
2173
|
-
// For other providers through Vercel, pass available metadata to helper
|
|
2174
|
-
// Note: Vercel abstracts provider responses, so we may not have access to
|
|
2175
|
-
// raw citations/annotations unless Vercel exposes them in usage/metadata
|
|
2176
|
-
webSearchCount = calculateWebSearchCount({
|
|
2177
|
-
usage: result.usage,
|
|
2178
|
-
providerMetadata: providerMetadata
|
|
2179
|
-
});
|
|
2180
|
-
}
|
|
2241
|
+
const additionalTokenValues = extractAdditionalTokenValues(providerMetadata);
|
|
2242
|
+
const webSearchCount = extractWebSearchCount(providerMetadata, result.usage);
|
|
2243
|
+
// V2 usage has simple numbers, V3 has objects with .total - normalize both
|
|
2244
|
+
const usageObj = result.usage;
|
|
2181
2245
|
const usage = {
|
|
2182
|
-
inputTokens: result.usage.inputTokens,
|
|
2183
|
-
outputTokens: result.usage.outputTokens,
|
|
2184
|
-
reasoningTokens:
|
|
2185
|
-
cacheReadInputTokens:
|
|
2246
|
+
inputTokens: extractTokenCount(result.usage.inputTokens),
|
|
2247
|
+
outputTokens: extractTokenCount(result.usage.outputTokens),
|
|
2248
|
+
reasoningTokens: extractReasoningTokens(usageObj),
|
|
2249
|
+
cacheReadInputTokens: extractCacheReadTokens(usageObj),
|
|
2186
2250
|
webSearchCount,
|
|
2187
2251
|
...additionalTokenValues
|
|
2188
2252
|
};
|
|
2189
2253
|
await sendEventToPosthog({
|
|
2190
2254
|
client: phClient,
|
|
2191
|
-
distinctId:
|
|
2192
|
-
traceId:
|
|
2255
|
+
distinctId: mergedOptions.posthogDistinctId,
|
|
2256
|
+
traceId: mergedOptions.posthogTraceId ?? uuid.v4(),
|
|
2193
2257
|
model: modelId,
|
|
2194
2258
|
provider: provider,
|
|
2195
|
-
input:
|
|
2259
|
+
input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
|
|
2196
2260
|
output: content,
|
|
2197
2261
|
latency,
|
|
2198
2262
|
baseURL,
|
|
@@ -2200,18 +2264,18 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
2200
2264
|
httpStatus: 200,
|
|
2201
2265
|
usage,
|
|
2202
2266
|
tools: availableTools,
|
|
2203
|
-
captureImmediate:
|
|
2267
|
+
captureImmediate: mergedOptions.posthogCaptureImmediate
|
|
2204
2268
|
});
|
|
2205
2269
|
return result;
|
|
2206
2270
|
} catch (error) {
|
|
2207
2271
|
const modelId = model.modelId;
|
|
2208
2272
|
await sendEventToPosthog({
|
|
2209
2273
|
client: phClient,
|
|
2210
|
-
distinctId:
|
|
2211
|
-
traceId:
|
|
2274
|
+
distinctId: mergedOptions.posthogDistinctId,
|
|
2275
|
+
traceId: mergedOptions.posthogTraceId ?? uuid.v4(),
|
|
2212
2276
|
model: modelId,
|
|
2213
2277
|
provider: model.provider,
|
|
2214
|
-
input:
|
|
2278
|
+
input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
|
|
2215
2279
|
output: [],
|
|
2216
2280
|
latency: 0,
|
|
2217
2281
|
baseURL: '',
|
|
@@ -2224,30 +2288,23 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
2224
2288
|
isError: true,
|
|
2225
2289
|
error: truncate(JSON.stringify(error)),
|
|
2226
2290
|
tools: availableTools,
|
|
2227
|
-
captureImmediate:
|
|
2291
|
+
captureImmediate: mergedOptions.posthogCaptureImmediate
|
|
2228
2292
|
});
|
|
2229
2293
|
throw error;
|
|
2230
2294
|
}
|
|
2231
2295
|
},
|
|
2232
|
-
|
|
2233
|
-
doStream,
|
|
2234
|
-
params
|
|
2235
|
-
}) => {
|
|
2296
|
+
doStream: async params => {
|
|
2236
2297
|
const startTime = Date.now();
|
|
2237
2298
|
let generatedText = '';
|
|
2238
2299
|
let reasoningText = '';
|
|
2239
2300
|
let usage = {};
|
|
2240
2301
|
let providerMetadata = undefined;
|
|
2241
2302
|
const mergedParams = {
|
|
2242
|
-
...
|
|
2243
|
-
...mapVercelParams(params)
|
|
2244
|
-
posthogProperties: {
|
|
2245
|
-
...options.posthogProperties,
|
|
2246
|
-
$ai_framework: 'vercel'
|
|
2247
|
-
}
|
|
2303
|
+
...mergedOptions,
|
|
2304
|
+
...mapVercelParams(params)
|
|
2248
2305
|
};
|
|
2249
|
-
const modelId =
|
|
2250
|
-
const provider =
|
|
2306
|
+
const modelId = mergedOptions.posthogModelOverride ?? model.modelId;
|
|
2307
|
+
const provider = mergedOptions.posthogProviderOverride ?? extractProvider(model);
|
|
2251
2308
|
const availableTools = extractAvailableToolCalls('vercel', params);
|
|
2252
2309
|
const baseURL = ''; // cannot currently get baseURL from vercel
|
|
2253
2310
|
// Map to track in-progress tool calls
|
|
@@ -2256,15 +2313,15 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
2256
2313
|
const {
|
|
2257
2314
|
stream,
|
|
2258
2315
|
...rest
|
|
2259
|
-
} = await doStream();
|
|
2316
|
+
} = await model.doStream(params);
|
|
2260
2317
|
const transformStream = new TransformStream({
|
|
2261
2318
|
transform(chunk, controller) {
|
|
2262
|
-
// Handle
|
|
2319
|
+
// Handle streaming patterns - compatible with both V2 and V3
|
|
2263
2320
|
if (chunk.type === 'text-delta') {
|
|
2264
2321
|
generatedText += chunk.delta;
|
|
2265
2322
|
}
|
|
2266
2323
|
if (chunk.type === 'reasoning-delta') {
|
|
2267
|
-
reasoningText += chunk.delta;
|
|
2324
|
+
reasoningText += chunk.delta;
|
|
2268
2325
|
}
|
|
2269
2326
|
// Handle tool call chunks
|
|
2270
2327
|
if (chunk.type === 'tool-input-start') {
|
|
@@ -2284,7 +2341,6 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
2284
2341
|
}
|
|
2285
2342
|
if (chunk.type === 'tool-input-end') {
|
|
2286
2343
|
// Tool call is complete, keep it in the map for final processing
|
|
2287
|
-
// Nothing specific to do here, the tool call is already complete
|
|
2288
2344
|
}
|
|
2289
2345
|
if (chunk.type === 'tool-call') {
|
|
2290
2346
|
// Direct tool call chunk (complete tool call)
|
|
@@ -2296,14 +2352,13 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
2296
2352
|
}
|
|
2297
2353
|
if (chunk.type === 'finish') {
|
|
2298
2354
|
providerMetadata = chunk.providerMetadata;
|
|
2299
|
-
const additionalTokenValues = providerMetadata
|
|
2300
|
-
|
|
2301
|
-
} : {};
|
|
2355
|
+
const additionalTokenValues = extractAdditionalTokenValues(providerMetadata);
|
|
2356
|
+
const chunkUsage = chunk.usage || {};
|
|
2302
2357
|
usage = {
|
|
2303
|
-
inputTokens: chunk.usage?.inputTokens,
|
|
2304
|
-
outputTokens: chunk.usage?.outputTokens,
|
|
2305
|
-
reasoningTokens:
|
|
2306
|
-
cacheReadInputTokens:
|
|
2358
|
+
inputTokens: extractTokenCount(chunk.usage?.inputTokens),
|
|
2359
|
+
outputTokens: extractTokenCount(chunk.usage?.outputTokens),
|
|
2360
|
+
reasoningTokens: extractReasoningTokens(chunkUsage),
|
|
2361
|
+
cacheReadInputTokens: extractCacheReadTokens(chunkUsage),
|
|
2307
2362
|
...additionalTokenValues
|
|
2308
2363
|
};
|
|
2309
2364
|
}
|
|
@@ -2343,23 +2398,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
2343
2398
|
role: 'assistant',
|
|
2344
2399
|
content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
|
|
2345
2400
|
}] : [];
|
|
2346
|
-
|
|
2347
|
-
let webSearchCount = 0;
|
|
2348
|
-
if (providerMetadata && typeof providerMetadata === 'object' && 'anthropic' in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === 'object' && 'server_tool_use' in providerMetadata.anthropic) {
|
|
2349
|
-
// Anthropic-specific extraction
|
|
2350
|
-
const serverToolUse = providerMetadata.anthropic.server_tool_use;
|
|
2351
|
-
if (serverToolUse && typeof serverToolUse === 'object' && 'web_search_requests' in serverToolUse && typeof serverToolUse.web_search_requests === 'number') {
|
|
2352
|
-
webSearchCount = serverToolUse.web_search_requests;
|
|
2353
|
-
}
|
|
2354
|
-
} else {
|
|
2355
|
-
// For other providers through Vercel, pass available metadata to helper
|
|
2356
|
-
// Note: Vercel abstracts provider responses, so we may not have access to
|
|
2357
|
-
// raw citations/annotations unless Vercel exposes them in usage/metadata
|
|
2358
|
-
webSearchCount = calculateWebSearchCount({
|
|
2359
|
-
usage: usage,
|
|
2360
|
-
providerMetadata: providerMetadata
|
|
2361
|
-
});
|
|
2362
|
-
}
|
|
2401
|
+
const webSearchCount = extractWebSearchCount(providerMetadata, usage);
|
|
2363
2402
|
// Update usage with web search count
|
|
2364
2403
|
const finalUsage = {
|
|
2365
2404
|
...usage,
|
|
@@ -2367,11 +2406,11 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
2367
2406
|
};
|
|
2368
2407
|
await sendEventToPosthog({
|
|
2369
2408
|
client: phClient,
|
|
2370
|
-
distinctId:
|
|
2371
|
-
traceId:
|
|
2409
|
+
distinctId: mergedOptions.posthogDistinctId,
|
|
2410
|
+
traceId: mergedOptions.posthogTraceId ?? uuid.v4(),
|
|
2372
2411
|
model: modelId,
|
|
2373
2412
|
provider: provider,
|
|
2374
|
-
input:
|
|
2413
|
+
input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
|
|
2375
2414
|
output: output,
|
|
2376
2415
|
latency,
|
|
2377
2416
|
baseURL,
|
|
@@ -2379,7 +2418,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
2379
2418
|
httpStatus: 200,
|
|
2380
2419
|
usage: finalUsage,
|
|
2381
2420
|
tools: availableTools,
|
|
2382
|
-
captureImmediate:
|
|
2421
|
+
captureImmediate: mergedOptions.posthogCaptureImmediate
|
|
2383
2422
|
});
|
|
2384
2423
|
}
|
|
2385
2424
|
});
|
|
@@ -2390,11 +2429,11 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
2390
2429
|
} catch (error) {
|
|
2391
2430
|
await sendEventToPosthog({
|
|
2392
2431
|
client: phClient,
|
|
2393
|
-
distinctId:
|
|
2394
|
-
traceId:
|
|
2432
|
+
distinctId: mergedOptions.posthogDistinctId,
|
|
2433
|
+
traceId: mergedOptions.posthogTraceId ?? uuid.v4(),
|
|
2395
2434
|
model: modelId,
|
|
2396
2435
|
provider: provider,
|
|
2397
|
-
input:
|
|
2436
|
+
input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
|
|
2398
2437
|
output: [],
|
|
2399
2438
|
latency: 0,
|
|
2400
2439
|
baseURL: '',
|
|
@@ -2407,25 +2446,12 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
2407
2446
|
isError: true,
|
|
2408
2447
|
error: truncate(JSON.stringify(error)),
|
|
2409
2448
|
tools: availableTools,
|
|
2410
|
-
captureImmediate:
|
|
2449
|
+
captureImmediate: mergedOptions.posthogCaptureImmediate
|
|
2411
2450
|
});
|
|
2412
2451
|
throw error;
|
|
2413
2452
|
}
|
|
2414
2453
|
}
|
|
2415
2454
|
};
|
|
2416
|
-
return middleware;
|
|
2417
|
-
};
|
|
2418
|
-
const wrapVercelLanguageModel = (model, phClient, options) => {
|
|
2419
|
-
const traceId = options.posthogTraceId ?? uuid.v4();
|
|
2420
|
-
const middleware = createInstrumentationMiddleware(phClient, model, {
|
|
2421
|
-
...options,
|
|
2422
|
-
posthogTraceId: traceId,
|
|
2423
|
-
posthogDistinctId: options.posthogDistinctId
|
|
2424
|
-
});
|
|
2425
|
-
const wrappedModel = ai.wrapLanguageModel({
|
|
2426
|
-
model,
|
|
2427
|
-
middleware
|
|
2428
|
-
});
|
|
2429
2455
|
return wrappedModel;
|
|
2430
2456
|
};
|
|
2431
2457
|
|