@posthog/ai 5.2.3 → 6.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.cjs +37 -5
- package/dist/anthropic/index.cjs.map +1 -1
- package/dist/anthropic/index.mjs +37 -5
- package/dist/anthropic/index.mjs.map +1 -1
- package/dist/gemini/index.cjs +67 -25
- package/dist/gemini/index.cjs.map +1 -1
- package/dist/gemini/index.d.ts +0 -1
- package/dist/gemini/index.mjs +67 -25
- package/dist/gemini/index.mjs.map +1 -1
- package/dist/index.cjs +329 -123
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +3 -3
- package/dist/index.mjs +330 -124
- package/dist/index.mjs.map +1 -1
- package/dist/langchain/index.cjs +29 -9
- package/dist/langchain/index.cjs.map +1 -1
- package/dist/langchain/index.d.ts +1 -0
- package/dist/langchain/index.mjs +29 -9
- package/dist/langchain/index.mjs.map +1 -1
- package/dist/openai/index.cjs +106 -5
- package/dist/openai/index.cjs.map +1 -1
- package/dist/openai/index.mjs +106 -5
- package/dist/openai/index.mjs.map +1 -1
- package/dist/vercel/index.cjs +117 -80
- package/dist/vercel/index.cjs.map +1 -1
- package/dist/vercel/index.d.ts +2 -2
- package/dist/vercel/index.mjs +118 -81
- package/dist/vercel/index.mjs.map +1 -1
- package/package.json +6 -5
package/dist/index.mjs
CHANGED
|
@@ -2,7 +2,7 @@ import { OpenAI, AzureOpenAI } from 'openai';
|
|
|
2
2
|
import * as uuid from 'uuid';
|
|
3
3
|
import { v4 } from 'uuid';
|
|
4
4
|
import { Buffer } from 'buffer';
|
|
5
|
-
import {
|
|
5
|
+
import { wrapLanguageModel } from 'ai';
|
|
6
6
|
import AnthropicOriginal from '@anthropic-ai/sdk';
|
|
7
7
|
import { GoogleGenAI } from '@google/genai';
|
|
8
8
|
|
|
@@ -23,30 +23,171 @@ const getModelParams = params => {
|
|
|
23
23
|
return modelParams;
|
|
24
24
|
};
|
|
25
25
|
const formatResponseAnthropic = response => {
|
|
26
|
-
// Example approach if "response.content" holds array of text segments, etc.
|
|
27
26
|
const output = [];
|
|
27
|
+
const content = [];
|
|
28
28
|
for (const choice of response.content ?? []) {
|
|
29
|
-
if (choice?.text) {
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
29
|
+
if (choice?.type === 'text' && choice?.text) {
|
|
30
|
+
content.push({
|
|
31
|
+
type: 'text',
|
|
32
|
+
text: choice.text
|
|
33
|
+
});
|
|
34
|
+
} else if (choice?.type === 'tool_use' && choice?.name && choice?.id) {
|
|
35
|
+
content.push({
|
|
36
|
+
type: 'function',
|
|
37
|
+
id: choice.id,
|
|
38
|
+
function: {
|
|
39
|
+
name: choice.name,
|
|
40
|
+
arguments: choice.input || {}
|
|
41
|
+
}
|
|
33
42
|
});
|
|
34
43
|
}
|
|
35
44
|
}
|
|
45
|
+
if (content.length > 0) {
|
|
46
|
+
output.push({
|
|
47
|
+
role: 'assistant',
|
|
48
|
+
content
|
|
49
|
+
});
|
|
50
|
+
}
|
|
36
51
|
return output;
|
|
37
52
|
};
|
|
38
53
|
const formatResponseOpenAI = response => {
|
|
39
54
|
const output = [];
|
|
40
|
-
|
|
41
|
-
|
|
55
|
+
if (response.choices) {
|
|
56
|
+
for (const choice of response.choices) {
|
|
57
|
+
const content = [];
|
|
58
|
+
let role = 'assistant';
|
|
59
|
+
if (choice.message) {
|
|
60
|
+
if (choice.message.role) {
|
|
61
|
+
role = choice.message.role;
|
|
62
|
+
}
|
|
63
|
+
if (choice.message.content) {
|
|
64
|
+
content.push({
|
|
65
|
+
type: 'text',
|
|
66
|
+
text: choice.message.content
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
if (choice.message.tool_calls) {
|
|
70
|
+
for (const toolCall of choice.message.tool_calls) {
|
|
71
|
+
content.push({
|
|
72
|
+
type: 'function',
|
|
73
|
+
id: toolCall.id,
|
|
74
|
+
function: {
|
|
75
|
+
name: toolCall.function.name,
|
|
76
|
+
arguments: toolCall.function.arguments
|
|
77
|
+
}
|
|
78
|
+
});
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
if (content.length > 0) {
|
|
83
|
+
output.push({
|
|
84
|
+
role,
|
|
85
|
+
content
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
// Handle Responses API format
|
|
91
|
+
if (response.output) {
|
|
92
|
+
const content = [];
|
|
93
|
+
let role = 'assistant';
|
|
94
|
+
for (const item of response.output) {
|
|
95
|
+
if (item.type === 'message') {
|
|
96
|
+
role = item.role;
|
|
97
|
+
if (item.content && Array.isArray(item.content)) {
|
|
98
|
+
for (const contentItem of item.content) {
|
|
99
|
+
if (contentItem.type === 'output_text' && contentItem.text) {
|
|
100
|
+
content.push({
|
|
101
|
+
type: 'text',
|
|
102
|
+
text: contentItem.text
|
|
103
|
+
});
|
|
104
|
+
} else if (contentItem.text) {
|
|
105
|
+
content.push({
|
|
106
|
+
type: 'text',
|
|
107
|
+
text: contentItem.text
|
|
108
|
+
});
|
|
109
|
+
} else if (contentItem.type === 'input_image' && contentItem.image_url) {
|
|
110
|
+
content.push({
|
|
111
|
+
type: 'image',
|
|
112
|
+
image: contentItem.image_url
|
|
113
|
+
});
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
} else if (item.content) {
|
|
117
|
+
content.push({
|
|
118
|
+
type: 'text',
|
|
119
|
+
text: String(item.content)
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
} else if (item.type === 'function_call') {
|
|
123
|
+
content.push({
|
|
124
|
+
type: 'function',
|
|
125
|
+
id: item.call_id || item.id || '',
|
|
126
|
+
function: {
|
|
127
|
+
name: item.name,
|
|
128
|
+
arguments: item.arguments || {}
|
|
129
|
+
}
|
|
130
|
+
});
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
if (content.length > 0) {
|
|
42
134
|
output.push({
|
|
43
|
-
role
|
|
44
|
-
content
|
|
135
|
+
role,
|
|
136
|
+
content
|
|
45
137
|
});
|
|
46
138
|
}
|
|
47
139
|
}
|
|
48
140
|
return output;
|
|
49
141
|
};
|
|
142
|
+
const formatResponseGemini = response => {
|
|
143
|
+
const output = [];
|
|
144
|
+
if (response.candidates && Array.isArray(response.candidates)) {
|
|
145
|
+
for (const candidate of response.candidates) {
|
|
146
|
+
if (candidate.content && candidate.content.parts) {
|
|
147
|
+
const content = [];
|
|
148
|
+
for (const part of candidate.content.parts) {
|
|
149
|
+
if (part.text) {
|
|
150
|
+
content.push({
|
|
151
|
+
type: 'text',
|
|
152
|
+
text: part.text
|
|
153
|
+
});
|
|
154
|
+
} else if (part.functionCall) {
|
|
155
|
+
content.push({
|
|
156
|
+
type: 'function',
|
|
157
|
+
function: {
|
|
158
|
+
name: part.functionCall.name,
|
|
159
|
+
arguments: part.functionCall.args
|
|
160
|
+
}
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
if (content.length > 0) {
|
|
165
|
+
output.push({
|
|
166
|
+
role: 'assistant',
|
|
167
|
+
content
|
|
168
|
+
});
|
|
169
|
+
}
|
|
170
|
+
} else if (candidate.text) {
|
|
171
|
+
output.push({
|
|
172
|
+
role: 'assistant',
|
|
173
|
+
content: [{
|
|
174
|
+
type: 'text',
|
|
175
|
+
text: candidate.text
|
|
176
|
+
}]
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
} else if (response.text) {
|
|
181
|
+
output.push({
|
|
182
|
+
role: 'assistant',
|
|
183
|
+
content: [{
|
|
184
|
+
type: 'text',
|
|
185
|
+
text: response.text
|
|
186
|
+
}]
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
return output;
|
|
190
|
+
};
|
|
50
191
|
const mergeSystemPrompt = (params, provider) => {
|
|
51
192
|
{
|
|
52
193
|
const messages = params.messages || [];
|
|
@@ -76,6 +217,35 @@ const truncate = str => {
|
|
|
76
217
|
return str;
|
|
77
218
|
}
|
|
78
219
|
};
|
|
220
|
+
/**
|
|
221
|
+
* Extract available tool calls from the request parameters.
|
|
222
|
+
* These are the tools provided to the LLM, not the tool calls in the response.
|
|
223
|
+
*/
|
|
224
|
+
const extractAvailableToolCalls = (provider, params) => {
|
|
225
|
+
if (provider === 'anthropic') {
|
|
226
|
+
if (params.tools) {
|
|
227
|
+
return params.tools;
|
|
228
|
+
}
|
|
229
|
+
return null;
|
|
230
|
+
} else if (provider === 'gemini') {
|
|
231
|
+
if (params.config && params.config.tools) {
|
|
232
|
+
return params.config.tools;
|
|
233
|
+
}
|
|
234
|
+
return null;
|
|
235
|
+
} else if (provider === 'openai') {
|
|
236
|
+
if (params.tools) {
|
|
237
|
+
return params.tools;
|
|
238
|
+
}
|
|
239
|
+
return null;
|
|
240
|
+
} else if (provider === 'vercel') {
|
|
241
|
+
// Vercel AI SDK stores tools in params.mode.tools when mode type is 'regular'
|
|
242
|
+
if (params.mode?.type === 'regular' && params.mode.tools) {
|
|
243
|
+
return params.mode.tools;
|
|
244
|
+
}
|
|
245
|
+
return null;
|
|
246
|
+
}
|
|
247
|
+
return null;
|
|
248
|
+
};
|
|
79
249
|
function sanitizeValues(obj) {
|
|
80
250
|
if (obj === undefined || obj === null) {
|
|
81
251
|
return obj;
|
|
@@ -245,6 +415,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
245
415
|
}
|
|
246
416
|
}
|
|
247
417
|
const latency = (Date.now() - startTime) / 1000;
|
|
418
|
+
const availableTools = extractAvailableToolCalls('openai', openAIParams);
|
|
248
419
|
await sendEventToPosthog({
|
|
249
420
|
client: this.phClient,
|
|
250
421
|
distinctId: posthogDistinctId,
|
|
@@ -261,6 +432,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
261
432
|
params: body,
|
|
262
433
|
httpStatus: 200,
|
|
263
434
|
usage,
|
|
435
|
+
tools: availableTools,
|
|
264
436
|
captureImmediate: posthogCaptureImmediate
|
|
265
437
|
});
|
|
266
438
|
} catch (error) {
|
|
@@ -295,6 +467,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
295
467
|
const wrappedPromise = parentPromise.then(async result => {
|
|
296
468
|
if ('choices' in result) {
|
|
297
469
|
const latency = (Date.now() - startTime) / 1000;
|
|
470
|
+
const availableTools = extractAvailableToolCalls('openai', openAIParams);
|
|
298
471
|
await sendEventToPosthog({
|
|
299
472
|
client: this.phClient,
|
|
300
473
|
distinctId: posthogDistinctId,
|
|
@@ -313,6 +486,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
313
486
|
reasoningTokens: result.usage?.completion_tokens_details?.reasoning_tokens ?? 0,
|
|
314
487
|
cacheReadInputTokens: result.usage?.prompt_tokens_details?.cached_tokens ?? 0
|
|
315
488
|
},
|
|
489
|
+
tools: availableTools,
|
|
316
490
|
captureImmediate: posthogCaptureImmediate
|
|
317
491
|
});
|
|
318
492
|
}
|
|
@@ -389,6 +563,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
389
563
|
}
|
|
390
564
|
}
|
|
391
565
|
const latency = (Date.now() - startTime) / 1000;
|
|
566
|
+
const availableTools = extractAvailableToolCalls('openai', openAIParams);
|
|
392
567
|
await sendEventToPosthog({
|
|
393
568
|
client: this.phClient,
|
|
394
569
|
distinctId: posthogDistinctId,
|
|
@@ -403,6 +578,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
403
578
|
params: body,
|
|
404
579
|
httpStatus: 200,
|
|
405
580
|
usage,
|
|
581
|
+
tools: availableTools,
|
|
406
582
|
captureImmediate: posthogCaptureImmediate
|
|
407
583
|
});
|
|
408
584
|
} catch (error) {
|
|
@@ -437,6 +613,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
437
613
|
const wrappedPromise = parentPromise.then(async result => {
|
|
438
614
|
if ('output' in result) {
|
|
439
615
|
const latency = (Date.now() - startTime) / 1000;
|
|
616
|
+
const availableTools = extractAvailableToolCalls('openai', openAIParams);
|
|
440
617
|
await sendEventToPosthog({
|
|
441
618
|
client: this.phClient,
|
|
442
619
|
distinctId: posthogDistinctId,
|
|
@@ -445,7 +622,9 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
445
622
|
model: openAIParams.model,
|
|
446
623
|
provider: 'openai',
|
|
447
624
|
input: openAIParams.input,
|
|
448
|
-
output:
|
|
625
|
+
output: formatResponseOpenAI({
|
|
626
|
+
output: result.output
|
|
627
|
+
}),
|
|
449
628
|
latency,
|
|
450
629
|
baseURL: this.baseURL ?? '',
|
|
451
630
|
params: body,
|
|
@@ -456,6 +635,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
456
635
|
reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0,
|
|
457
636
|
cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0
|
|
458
637
|
},
|
|
638
|
+
tools: availableTools,
|
|
459
639
|
captureImmediate: posthogCaptureImmediate
|
|
460
640
|
});
|
|
461
641
|
}
|
|
@@ -935,7 +1115,7 @@ class WrappedResponses extends AzureOpenAI.Responses {
|
|
|
935
1115
|
const mapVercelParams = params => {
|
|
936
1116
|
return {
|
|
937
1117
|
temperature: params.temperature,
|
|
938
|
-
|
|
1118
|
+
max_output_tokens: params.maxOutputTokens,
|
|
939
1119
|
top_p: params.topP,
|
|
940
1120
|
frequency_penalty: params.frequencyPenalty,
|
|
941
1121
|
presence_penalty: params.presencePenalty,
|
|
@@ -943,78 +1123,67 @@ const mapVercelParams = params => {
|
|
|
943
1123
|
stream: params.stream
|
|
944
1124
|
};
|
|
945
1125
|
};
|
|
946
|
-
const mapVercelPrompt =
|
|
947
|
-
// normalize single inputs into an array of messages
|
|
948
|
-
let promptsArray;
|
|
949
|
-
if (typeof prompt === 'string') {
|
|
950
|
-
promptsArray = [{
|
|
951
|
-
role: 'user',
|
|
952
|
-
content: prompt
|
|
953
|
-
}];
|
|
954
|
-
} else if (!Array.isArray(prompt)) {
|
|
955
|
-
promptsArray = [prompt];
|
|
956
|
-
} else {
|
|
957
|
-
promptsArray = prompt;
|
|
958
|
-
}
|
|
1126
|
+
const mapVercelPrompt = messages => {
|
|
959
1127
|
// Map and truncate individual content
|
|
960
|
-
const inputs =
|
|
961
|
-
let content
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
}
|
|
977
|
-
}
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
type: 'file',
|
|
981
|
-
content: {
|
|
1128
|
+
const inputs = messages.map(message => {
|
|
1129
|
+
let content;
|
|
1130
|
+
// Handle system role which has string content
|
|
1131
|
+
if (message.role === 'system') {
|
|
1132
|
+
content = [{
|
|
1133
|
+
type: 'text',
|
|
1134
|
+
text: truncate(String(message.content))
|
|
1135
|
+
}];
|
|
1136
|
+
} else {
|
|
1137
|
+
// Handle other roles which have array content
|
|
1138
|
+
if (Array.isArray(message.content)) {
|
|
1139
|
+
content = message.content.map(c => {
|
|
1140
|
+
if (c.type === 'text') {
|
|
1141
|
+
return {
|
|
1142
|
+
type: 'text',
|
|
1143
|
+
text: truncate(c.text)
|
|
1144
|
+
};
|
|
1145
|
+
} else if (c.type === 'file') {
|
|
1146
|
+
return {
|
|
1147
|
+
type: 'file',
|
|
982
1148
|
file: c.data instanceof URL ? c.data.toString() : 'raw files not supported',
|
|
983
|
-
|
|
984
|
-
}
|
|
985
|
-
}
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
1149
|
+
mediaType: c.mediaType
|
|
1150
|
+
};
|
|
1151
|
+
} else if (c.type === 'reasoning') {
|
|
1152
|
+
return {
|
|
1153
|
+
type: 'reasoning',
|
|
1154
|
+
text: truncate(c.reasoning)
|
|
1155
|
+
};
|
|
1156
|
+
} else if (c.type === 'tool-call') {
|
|
1157
|
+
return {
|
|
1158
|
+
type: 'tool-call',
|
|
990
1159
|
toolCallId: c.toolCallId,
|
|
991
1160
|
toolName: c.toolName,
|
|
992
|
-
|
|
993
|
-
}
|
|
994
|
-
}
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
type: 'tool-result',
|
|
998
|
-
content: {
|
|
1161
|
+
input: c.input
|
|
1162
|
+
};
|
|
1163
|
+
} else if (c.type === 'tool-result') {
|
|
1164
|
+
return {
|
|
1165
|
+
type: 'tool-result',
|
|
999
1166
|
toolCallId: c.toolCallId,
|
|
1000
1167
|
toolName: c.toolName,
|
|
1001
|
-
|
|
1168
|
+
output: c.output,
|
|
1002
1169
|
isError: c.isError
|
|
1003
|
-
}
|
|
1170
|
+
};
|
|
1171
|
+
}
|
|
1172
|
+
return {
|
|
1173
|
+
type: 'text',
|
|
1174
|
+
text: ''
|
|
1004
1175
|
};
|
|
1005
|
-
}
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
text: truncate(p.content)
|
|
1014
|
-
};
|
|
1176
|
+
});
|
|
1177
|
+
} else {
|
|
1178
|
+
// Fallback for non-array content
|
|
1179
|
+
content = [{
|
|
1180
|
+
type: 'text',
|
|
1181
|
+
text: truncate(String(message.content))
|
|
1182
|
+
}];
|
|
1183
|
+
}
|
|
1015
1184
|
}
|
|
1016
1185
|
return {
|
|
1017
|
-
role:
|
|
1186
|
+
role: message.role,
|
|
1018
1187
|
content
|
|
1019
1188
|
};
|
|
1020
1189
|
});
|
|
@@ -1046,7 +1215,32 @@ const mapVercelPrompt = prompt => {
|
|
|
1046
1215
|
return inputs;
|
|
1047
1216
|
};
|
|
1048
1217
|
const mapVercelOutput = result => {
|
|
1049
|
-
|
|
1218
|
+
const content = [];
|
|
1219
|
+
if (result.text) {
|
|
1220
|
+
content.push({
|
|
1221
|
+
type: 'text',
|
|
1222
|
+
text: truncate(result.text)
|
|
1223
|
+
});
|
|
1224
|
+
}
|
|
1225
|
+
if (result.toolCalls && Array.isArray(result.toolCalls)) {
|
|
1226
|
+
for (const toolCall of result.toolCalls) {
|
|
1227
|
+
content.push({
|
|
1228
|
+
type: 'function',
|
|
1229
|
+
id: toolCall.toolCallId,
|
|
1230
|
+
function: {
|
|
1231
|
+
name: toolCall.toolName,
|
|
1232
|
+
arguments: typeof toolCall.args === 'string' ? toolCall.args : JSON.stringify(toolCall.args)
|
|
1233
|
+
}
|
|
1234
|
+
});
|
|
1235
|
+
}
|
|
1236
|
+
}
|
|
1237
|
+
if (content.length > 0) {
|
|
1238
|
+
return [{
|
|
1239
|
+
role: 'assistant',
|
|
1240
|
+
content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
|
|
1241
|
+
}];
|
|
1242
|
+
}
|
|
1243
|
+
// Fallback to original behavior for other result types TODO: check if we can remove this
|
|
1050
1244
|
const normalizedResult = typeof result === 'string' ? {
|
|
1051
1245
|
text: result
|
|
1052
1246
|
} : result;
|
|
@@ -1057,8 +1251,8 @@ const mapVercelOutput = result => {
|
|
|
1057
1251
|
...(normalizedResult.object ? {
|
|
1058
1252
|
object: normalizedResult.object
|
|
1059
1253
|
} : {}),
|
|
1060
|
-
...(normalizedResult.
|
|
1061
|
-
reasoning: normalizedResult.
|
|
1254
|
+
...(normalizedResult.reasoningText ? {
|
|
1255
|
+
reasoning: normalizedResult.reasoningText
|
|
1062
1256
|
} : {}),
|
|
1063
1257
|
...(normalizedResult.response ? {
|
|
1064
1258
|
response: normalizedResult.response
|
|
@@ -1117,14 +1311,14 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
1117
1311
|
...options,
|
|
1118
1312
|
...mapVercelParams(params)
|
|
1119
1313
|
};
|
|
1314
|
+
const availableTools = extractAvailableToolCalls('vercel', params);
|
|
1120
1315
|
try {
|
|
1121
1316
|
const result = await doGenerate();
|
|
1122
|
-
const latency = (Date.now() - startTime) / 1000;
|
|
1123
1317
|
const modelId = options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
|
|
1124
1318
|
const provider = options.posthogProviderOverride ?? extractProvider(model);
|
|
1125
1319
|
const baseURL = ''; // cannot currently get baseURL from vercel
|
|
1126
1320
|
const content = mapVercelOutput(result);
|
|
1127
|
-
|
|
1321
|
+
const latency = (Date.now() - startTime) / 1000;
|
|
1128
1322
|
const providerMetadata = result.providerMetadata;
|
|
1129
1323
|
const additionalTokenValues = {
|
|
1130
1324
|
...(providerMetadata?.openai?.reasoningTokens ? {
|
|
@@ -1145,19 +1339,17 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
1145
1339
|
model: modelId,
|
|
1146
1340
|
provider: provider,
|
|
1147
1341
|
input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
|
|
1148
|
-
output:
|
|
1149
|
-
content,
|
|
1150
|
-
role: 'assistant'
|
|
1151
|
-
}],
|
|
1342
|
+
output: content,
|
|
1152
1343
|
latency,
|
|
1153
1344
|
baseURL,
|
|
1154
1345
|
params: mergedParams,
|
|
1155
1346
|
httpStatus: 200,
|
|
1156
1347
|
usage: {
|
|
1157
|
-
inputTokens: result.usage.
|
|
1158
|
-
outputTokens: result.usage.
|
|
1348
|
+
inputTokens: result.usage.inputTokens,
|
|
1349
|
+
outputTokens: result.usage.outputTokens,
|
|
1159
1350
|
...additionalTokenValues
|
|
1160
1351
|
},
|
|
1352
|
+
tools: availableTools,
|
|
1161
1353
|
captureImmediate: options.posthogCaptureImmediate
|
|
1162
1354
|
});
|
|
1163
1355
|
return result;
|
|
@@ -1181,6 +1373,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
1181
1373
|
},
|
|
1182
1374
|
isError: true,
|
|
1183
1375
|
error: truncate(JSON.stringify(error)),
|
|
1376
|
+
tools: availableTools,
|
|
1184
1377
|
captureImmediate: options.posthogCaptureImmediate
|
|
1185
1378
|
});
|
|
1186
1379
|
throw error;
|
|
@@ -1192,6 +1385,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
1192
1385
|
}) => {
|
|
1193
1386
|
const startTime = Date.now();
|
|
1194
1387
|
let generatedText = '';
|
|
1388
|
+
let reasoningText = '';
|
|
1195
1389
|
let usage = {};
|
|
1196
1390
|
const mergedParams = {
|
|
1197
1391
|
...options,
|
|
@@ -1199,6 +1393,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
1199
1393
|
};
|
|
1200
1394
|
const modelId = options.posthogModelOverride ?? model.modelId;
|
|
1201
1395
|
const provider = options.posthogProviderOverride ?? extractProvider(model);
|
|
1396
|
+
const availableTools = extractAvailableToolCalls('vercel', params);
|
|
1202
1397
|
const baseURL = ''; // cannot currently get baseURL from vercel
|
|
1203
1398
|
try {
|
|
1204
1399
|
const {
|
|
@@ -1207,13 +1402,17 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
1207
1402
|
} = await doStream();
|
|
1208
1403
|
const transformStream = new TransformStream({
|
|
1209
1404
|
transform(chunk, controller) {
|
|
1405
|
+
// Handle new v5 streaming patterns
|
|
1210
1406
|
if (chunk.type === 'text-delta') {
|
|
1211
|
-
generatedText += chunk.
|
|
1407
|
+
generatedText += chunk.delta;
|
|
1408
|
+
}
|
|
1409
|
+
if (chunk.type === 'reasoning-delta') {
|
|
1410
|
+
reasoningText += chunk.delta; // New in v5
|
|
1212
1411
|
}
|
|
1213
1412
|
if (chunk.type === 'finish') {
|
|
1214
1413
|
usage = {
|
|
1215
|
-
inputTokens: chunk.usage?.
|
|
1216
|
-
outputTokens: chunk.usage?.
|
|
1414
|
+
inputTokens: chunk.usage?.inputTokens,
|
|
1415
|
+
outputTokens: chunk.usage?.outputTokens
|
|
1217
1416
|
};
|
|
1218
1417
|
if (chunk.providerMetadata?.openai?.reasoningTokens) {
|
|
1219
1418
|
usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens;
|
|
@@ -1232,6 +1431,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
1232
1431
|
},
|
|
1233
1432
|
flush: async () => {
|
|
1234
1433
|
const latency = (Date.now() - startTime) / 1000;
|
|
1434
|
+
const outputContent = reasoningText ? `${reasoningText}\n\n${generatedText}` : generatedText;
|
|
1235
1435
|
await sendEventToPosthog({
|
|
1236
1436
|
client: phClient,
|
|
1237
1437
|
distinctId: options.posthogDistinctId,
|
|
@@ -1240,7 +1440,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
1240
1440
|
provider: provider,
|
|
1241
1441
|
input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
|
|
1242
1442
|
output: [{
|
|
1243
|
-
content:
|
|
1443
|
+
content: outputContent,
|
|
1244
1444
|
role: 'assistant'
|
|
1245
1445
|
}],
|
|
1246
1446
|
latency,
|
|
@@ -1248,6 +1448,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
1248
1448
|
params: mergedParams,
|
|
1249
1449
|
httpStatus: 200,
|
|
1250
1450
|
usage,
|
|
1451
|
+
tools: availableTools,
|
|
1251
1452
|
captureImmediate: options.posthogCaptureImmediate
|
|
1252
1453
|
});
|
|
1253
1454
|
}
|
|
@@ -1275,6 +1476,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
1275
1476
|
},
|
|
1276
1477
|
isError: true,
|
|
1277
1478
|
error: truncate(JSON.stringify(error)),
|
|
1479
|
+
tools: availableTools,
|
|
1278
1480
|
captureImmediate: options.posthogCaptureImmediate
|
|
1279
1481
|
});
|
|
1280
1482
|
throw error;
|
|
@@ -1290,7 +1492,7 @@ const wrapVercelLanguageModel = (model, phClient, options) => {
|
|
|
1290
1492
|
posthogTraceId: traceId,
|
|
1291
1493
|
posthogDistinctId: options.posthogDistinctId
|
|
1292
1494
|
});
|
|
1293
|
-
const wrappedModel =
|
|
1495
|
+
const wrappedModel = wrapLanguageModel({
|
|
1294
1496
|
model,
|
|
1295
1497
|
middleware
|
|
1296
1498
|
});
|
|
@@ -1357,6 +1559,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
|
|
|
1357
1559
|
}
|
|
1358
1560
|
}
|
|
1359
1561
|
const latency = (Date.now() - startTime) / 1000;
|
|
1562
|
+
const availableTools = extractAvailableToolCalls('anthropic', anthropicParams);
|
|
1360
1563
|
await sendEventToPosthog({
|
|
1361
1564
|
client: this.phClient,
|
|
1362
1565
|
distinctId: posthogDistinctId,
|
|
@@ -1373,6 +1576,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
|
|
|
1373
1576
|
params: body,
|
|
1374
1577
|
httpStatus: 200,
|
|
1375
1578
|
usage,
|
|
1579
|
+
tools: availableTools,
|
|
1376
1580
|
captureImmediate: posthogCaptureImmediate
|
|
1377
1581
|
});
|
|
1378
1582
|
} catch (error) {
|
|
@@ -1408,6 +1612,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
|
|
|
1408
1612
|
const wrappedPromise = parentPromise.then(async result => {
|
|
1409
1613
|
if ('content' in result) {
|
|
1410
1614
|
const latency = (Date.now() - startTime) / 1000;
|
|
1615
|
+
const availableTools = extractAvailableToolCalls('anthropic', anthropicParams);
|
|
1411
1616
|
await sendEventToPosthog({
|
|
1412
1617
|
client: this.phClient,
|
|
1413
1618
|
distinctId: posthogDistinctId,
|
|
@@ -1426,6 +1631,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
|
|
|
1426
1631
|
cacheCreationInputTokens: result.usage.cache_creation_input_tokens ?? 0,
|
|
1427
1632
|
cacheReadInputTokens: result.usage.cache_read_input_tokens ?? 0
|
|
1428
1633
|
},
|
|
1634
|
+
tools: availableTools,
|
|
1429
1635
|
captureImmediate: posthogCaptureImmediate
|
|
1430
1636
|
});
|
|
1431
1637
|
}
|
|
@@ -1488,6 +1694,7 @@ class WrappedModels {
|
|
|
1488
1694
|
try {
|
|
1489
1695
|
const response = await this.client.models.generateContent(geminiParams);
|
|
1490
1696
|
const latency = (Date.now() - startTime) / 1000;
|
|
1697
|
+
const availableTools = extractAvailableToolCalls('gemini', geminiParams);
|
|
1491
1698
|
await sendEventToPosthog({
|
|
1492
1699
|
client: this.phClient,
|
|
1493
1700
|
distinctId: posthogDistinctId,
|
|
@@ -1495,7 +1702,7 @@ class WrappedModels {
|
|
|
1495
1702
|
model: geminiParams.model,
|
|
1496
1703
|
provider: 'gemini',
|
|
1497
1704
|
input: this.formatInput(geminiParams.contents),
|
|
1498
|
-
output:
|
|
1705
|
+
output: formatResponseGemini(response),
|
|
1499
1706
|
latency,
|
|
1500
1707
|
baseURL: 'https://generativelanguage.googleapis.com',
|
|
1501
1708
|
params: params,
|
|
@@ -1504,6 +1711,7 @@ class WrappedModels {
|
|
|
1504
1711
|
inputTokens: response.usageMetadata?.promptTokenCount ?? 0,
|
|
1505
1712
|
outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0
|
|
1506
1713
|
},
|
|
1714
|
+
tools: availableTools,
|
|
1507
1715
|
captureImmediate: posthogCaptureImmediate
|
|
1508
1716
|
});
|
|
1509
1717
|
return response;
|
|
@@ -1563,6 +1771,7 @@ class WrappedModels {
|
|
|
1563
1771
|
yield chunk;
|
|
1564
1772
|
}
|
|
1565
1773
|
const latency = (Date.now() - startTime) / 1000;
|
|
1774
|
+
const availableTools = extractAvailableToolCalls('gemini', geminiParams);
|
|
1566
1775
|
await sendEventToPosthog({
|
|
1567
1776
|
client: this.phClient,
|
|
1568
1777
|
distinctId: posthogDistinctId,
|
|
@@ -1579,6 +1788,7 @@ class WrappedModels {
|
|
|
1579
1788
|
params: params,
|
|
1580
1789
|
httpStatus: 200,
|
|
1581
1790
|
usage,
|
|
1791
|
+
tools: availableTools,
|
|
1582
1792
|
captureImmediate: posthogCaptureImmediate
|
|
1583
1793
|
});
|
|
1584
1794
|
} catch (error) {
|
|
@@ -1660,30 +1870,6 @@ class WrappedModels {
|
|
|
1660
1870
|
content: String(contents)
|
|
1661
1871
|
}];
|
|
1662
1872
|
}
|
|
1663
|
-
formatOutput(response) {
|
|
1664
|
-
if (response.text) {
|
|
1665
|
-
return [{
|
|
1666
|
-
role: 'assistant',
|
|
1667
|
-
content: response.text
|
|
1668
|
-
}];
|
|
1669
|
-
}
|
|
1670
|
-
if (response.candidates && Array.isArray(response.candidates)) {
|
|
1671
|
-
return response.candidates.map(candidate => {
|
|
1672
|
-
if (candidate.content && candidate.content.parts) {
|
|
1673
|
-
const text = candidate.content.parts.filter(part => part.text).map(part => part.text).join('');
|
|
1674
|
-
return {
|
|
1675
|
-
role: 'assistant',
|
|
1676
|
-
content: text
|
|
1677
|
-
};
|
|
1678
|
-
}
|
|
1679
|
-
return {
|
|
1680
|
-
role: 'assistant',
|
|
1681
|
-
content: String(candidate)
|
|
1682
|
-
};
|
|
1683
|
-
});
|
|
1684
|
-
}
|
|
1685
|
-
return [];
|
|
1686
|
-
}
|
|
1687
1873
|
}
|
|
1688
1874
|
|
|
1689
1875
|
function getDefaultExportFromCjs (x) {
|
|
@@ -2380,7 +2566,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
2380
2566
|
};
|
|
2381
2567
|
if (extraParams) {
|
|
2382
2568
|
generation.modelParams = getModelParams(extraParams.invocation_params);
|
|
2383
|
-
if (extraParams.invocation_params.tools) {
|
|
2569
|
+
if (extraParams.invocation_params && extraParams.invocation_params.tools) {
|
|
2384
2570
|
generation.tools = extraParams.invocation_params.tools;
|
|
2385
2571
|
}
|
|
2386
2572
|
}
|
|
@@ -2489,7 +2675,7 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
2489
2675
|
$ai_base_url: run.baseUrl
|
|
2490
2676
|
};
|
|
2491
2677
|
if (run.tools) {
|
|
2492
|
-
eventProperties['$ai_tools'] =
|
|
2678
|
+
eventProperties['$ai_tools'] = run.tools;
|
|
2493
2679
|
}
|
|
2494
2680
|
if (output instanceof Error) {
|
|
2495
2681
|
eventProperties['$ai_http_status'] = output.status || 500;
|
|
@@ -2511,13 +2697,20 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
2511
2697
|
let completions;
|
|
2512
2698
|
if (output.generations && Array.isArray(output.generations)) {
|
|
2513
2699
|
const lastGeneration = output.generations[output.generations.length - 1];
|
|
2514
|
-
if (Array.isArray(lastGeneration)) {
|
|
2515
|
-
|
|
2516
|
-
|
|
2517
|
-
|
|
2518
|
-
|
|
2519
|
-
|
|
2520
|
-
|
|
2700
|
+
if (Array.isArray(lastGeneration) && lastGeneration.length > 0) {
|
|
2701
|
+
// Check if this is a ChatGeneration by looking at the first item
|
|
2702
|
+
const isChatGeneration = 'message' in lastGeneration[0] && lastGeneration[0].message;
|
|
2703
|
+
if (isChatGeneration) {
|
|
2704
|
+
// For ChatGeneration, convert messages to dict format
|
|
2705
|
+
completions = lastGeneration.map(gen => {
|
|
2706
|
+
return this._convertMessageToDict(gen.message);
|
|
2707
|
+
});
|
|
2708
|
+
} else {
|
|
2709
|
+
// For non-ChatGeneration, extract raw response
|
|
2710
|
+
completions = lastGeneration.map(gen => {
|
|
2711
|
+
return this._extractRawResponse(gen);
|
|
2712
|
+
});
|
|
2713
|
+
}
|
|
2521
2714
|
}
|
|
2522
2715
|
}
|
|
2523
2716
|
if (completions) {
|
|
@@ -2568,6 +2761,19 @@ class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
2568
2761
|
}
|
|
2569
2762
|
}));
|
|
2570
2763
|
}
|
|
2764
|
+
_extractRawResponse(generation) {
|
|
2765
|
+
// Extract the response from the last response of the LLM call
|
|
2766
|
+
// We return the text of the response if not empty
|
|
2767
|
+
if (generation.text != null && generation.text.trim() !== '') {
|
|
2768
|
+
return generation.text.trim();
|
|
2769
|
+
} else if (generation.message) {
|
|
2770
|
+
// Additional kwargs contains the response in case of tool usage
|
|
2771
|
+
return generation.message.additional_kwargs || generation.message.additionalKwargs || {};
|
|
2772
|
+
} else {
|
|
2773
|
+
// Not tool usage, some LLM responses can be simply empty
|
|
2774
|
+
return '';
|
|
2775
|
+
}
|
|
2776
|
+
}
|
|
2571
2777
|
_convertMessageToDict(message) {
|
|
2572
2778
|
let messageDict = {};
|
|
2573
2779
|
const messageType = message.getType();
|