@posthog/ai 7.5.4 → 7.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.cjs +71 -63
- package/dist/anthropic/index.cjs.map +1 -1
- package/dist/anthropic/index.mjs +71 -63
- package/dist/anthropic/index.mjs.map +1 -1
- package/dist/gemini/index.cjs +106 -54
- package/dist/gemini/index.cjs.map +1 -1
- package/dist/gemini/index.mjs +106 -54
- package/dist/gemini/index.mjs.map +1 -1
- package/dist/index.cjs +308 -223
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +308 -223
- package/dist/index.mjs.map +1 -1
- package/dist/langchain/index.cjs +161 -136
- package/dist/langchain/index.cjs.map +1 -1
- package/dist/langchain/index.mjs +161 -136
- package/dist/langchain/index.mjs.map +1 -1
- package/dist/openai/index.cjs +163 -133
- package/dist/openai/index.cjs.map +1 -1
- package/dist/openai/index.mjs +163 -133
- package/dist/openai/index.mjs.map +1 -1
- package/dist/vercel/index.cjs +82 -57
- package/dist/vercel/index.cjs.map +1 -1
- package/dist/vercel/index.mjs +82 -57
- package/dist/vercel/index.mjs.map +1 -1
- package/package.json +7 -7
package/dist/index.mjs
CHANGED
|
@@ -6,7 +6,7 @@ import { uuidv7 } from '@posthog/core';
|
|
|
6
6
|
import AnthropicOriginal from '@anthropic-ai/sdk';
|
|
7
7
|
import { GoogleGenAI } from '@google/genai';
|
|
8
8
|
|
|
9
|
-
var version = "7.
|
|
9
|
+
var version = "7.6.1";
|
|
10
10
|
|
|
11
11
|
// Type guards for safer type checking
|
|
12
12
|
const isString = value => {
|
|
@@ -16,6 +16,215 @@ const isObject = value => {
|
|
|
16
16
|
return value !== null && typeof value === 'object' && !Array.isArray(value);
|
|
17
17
|
};
|
|
18
18
|
|
|
19
|
+
const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
|
|
20
|
+
// ============================================
|
|
21
|
+
// Multimodal Feature Toggle
|
|
22
|
+
// ============================================
|
|
23
|
+
const isMultimodalEnabled = () => {
|
|
24
|
+
const val = process.env._INTERNAL_LLMA_MULTIMODAL || '';
|
|
25
|
+
return val.toLowerCase() === 'true' || val === '1' || val.toLowerCase() === 'yes';
|
|
26
|
+
};
|
|
27
|
+
// ============================================
|
|
28
|
+
// Base64 Detection Helpers
|
|
29
|
+
// ============================================
|
|
30
|
+
const isBase64DataUrl = str => {
|
|
31
|
+
return /^data:([^;]+);base64,/.test(str);
|
|
32
|
+
};
|
|
33
|
+
const isValidUrl = str => {
|
|
34
|
+
try {
|
|
35
|
+
new URL(str);
|
|
36
|
+
return true;
|
|
37
|
+
} catch {
|
|
38
|
+
// Not an absolute URL, check if it's a relative URL or path
|
|
39
|
+
return str.startsWith('/') || str.startsWith('./') || str.startsWith('../');
|
|
40
|
+
}
|
|
41
|
+
};
|
|
42
|
+
const isRawBase64 = str => {
|
|
43
|
+
// Skip if it's a valid URL or path
|
|
44
|
+
if (isValidUrl(str)) {
|
|
45
|
+
return false;
|
|
46
|
+
}
|
|
47
|
+
// Check if it's a valid base64 string
|
|
48
|
+
// Base64 images are typically at least a few hundred chars, but we'll be conservative
|
|
49
|
+
return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
|
|
50
|
+
};
|
|
51
|
+
function redactBase64DataUrl(str) {
|
|
52
|
+
if (isMultimodalEnabled()) return str;
|
|
53
|
+
if (!isString(str)) return str;
|
|
54
|
+
// Check for data URL format
|
|
55
|
+
if (isBase64DataUrl(str)) {
|
|
56
|
+
return REDACTED_IMAGE_PLACEHOLDER;
|
|
57
|
+
}
|
|
58
|
+
// Check for raw base64 (Vercel sends raw base64 for inline images)
|
|
59
|
+
if (isRawBase64(str)) {
|
|
60
|
+
return REDACTED_IMAGE_PLACEHOLDER;
|
|
61
|
+
}
|
|
62
|
+
return str;
|
|
63
|
+
}
|
|
64
|
+
const processMessages = (messages, transformContent) => {
|
|
65
|
+
if (!messages) return messages;
|
|
66
|
+
const processContent = content => {
|
|
67
|
+
if (typeof content === 'string') return content;
|
|
68
|
+
if (!content) return content;
|
|
69
|
+
if (Array.isArray(content)) {
|
|
70
|
+
return content.map(transformContent);
|
|
71
|
+
}
|
|
72
|
+
// Handle single object content
|
|
73
|
+
return transformContent(content);
|
|
74
|
+
};
|
|
75
|
+
const processMessage = msg => {
|
|
76
|
+
if (!isObject(msg) || !('content' in msg)) return msg;
|
|
77
|
+
return {
|
|
78
|
+
...msg,
|
|
79
|
+
content: processContent(msg.content)
|
|
80
|
+
};
|
|
81
|
+
};
|
|
82
|
+
// Handle both arrays and single messages
|
|
83
|
+
if (Array.isArray(messages)) {
|
|
84
|
+
return messages.map(processMessage);
|
|
85
|
+
}
|
|
86
|
+
return processMessage(messages);
|
|
87
|
+
};
|
|
88
|
+
// ============================================
|
|
89
|
+
// Provider-Specific Image Sanitizers
|
|
90
|
+
// ============================================
|
|
91
|
+
const sanitizeOpenAIImage = item => {
|
|
92
|
+
if (!isObject(item)) return item;
|
|
93
|
+
// Handle image_url format
|
|
94
|
+
if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
|
|
95
|
+
return {
|
|
96
|
+
...item,
|
|
97
|
+
image_url: {
|
|
98
|
+
...item.image_url,
|
|
99
|
+
url: redactBase64DataUrl(item.image_url.url)
|
|
100
|
+
}
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
// Handle audio format
|
|
104
|
+
if (item.type === 'audio' && 'data' in item) {
|
|
105
|
+
if (isMultimodalEnabled()) return item;
|
|
106
|
+
return {
|
|
107
|
+
...item,
|
|
108
|
+
data: REDACTED_IMAGE_PLACEHOLDER
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
return item;
|
|
112
|
+
};
|
|
113
|
+
const sanitizeOpenAIResponseImage = item => {
|
|
114
|
+
if (!isObject(item)) return item;
|
|
115
|
+
// Handle input_image format
|
|
116
|
+
if (item.type === 'input_image' && 'image_url' in item) {
|
|
117
|
+
return {
|
|
118
|
+
...item,
|
|
119
|
+
image_url: redactBase64DataUrl(item.image_url)
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
return item;
|
|
123
|
+
};
|
|
124
|
+
const sanitizeAnthropicImage = item => {
|
|
125
|
+
if (isMultimodalEnabled()) return item;
|
|
126
|
+
if (!isObject(item)) return item;
|
|
127
|
+
// Handle Anthropic's image and document formats (same structure, different type field)
|
|
128
|
+
if ((item.type === 'image' || item.type === 'document') && 'source' in item && isObject(item.source) && item.source.type === 'base64' && 'data' in item.source) {
|
|
129
|
+
return {
|
|
130
|
+
...item,
|
|
131
|
+
source: {
|
|
132
|
+
...item.source,
|
|
133
|
+
data: REDACTED_IMAGE_PLACEHOLDER
|
|
134
|
+
}
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
return item;
|
|
138
|
+
};
|
|
139
|
+
const sanitizeGeminiPart = part => {
|
|
140
|
+
if (isMultimodalEnabled()) return part;
|
|
141
|
+
if (!isObject(part)) return part;
|
|
142
|
+
// Handle Gemini's inline data format (images, audio, PDFs all use inlineData)
|
|
143
|
+
if ('inlineData' in part && isObject(part.inlineData) && 'data' in part.inlineData) {
|
|
144
|
+
return {
|
|
145
|
+
...part,
|
|
146
|
+
inlineData: {
|
|
147
|
+
...part.inlineData,
|
|
148
|
+
data: REDACTED_IMAGE_PLACEHOLDER
|
|
149
|
+
}
|
|
150
|
+
};
|
|
151
|
+
}
|
|
152
|
+
return part;
|
|
153
|
+
};
|
|
154
|
+
const processGeminiItem = item => {
|
|
155
|
+
if (!isObject(item)) return item;
|
|
156
|
+
// If it has parts, process them
|
|
157
|
+
if ('parts' in item && item.parts) {
|
|
158
|
+
const parts = Array.isArray(item.parts) ? item.parts.map(sanitizeGeminiPart) : sanitizeGeminiPart(item.parts);
|
|
159
|
+
return {
|
|
160
|
+
...item,
|
|
161
|
+
parts
|
|
162
|
+
};
|
|
163
|
+
}
|
|
164
|
+
return item;
|
|
165
|
+
};
|
|
166
|
+
const sanitizeLangChainImage = item => {
|
|
167
|
+
if (!isObject(item)) return item;
|
|
168
|
+
// OpenAI style
|
|
169
|
+
if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
|
|
170
|
+
return {
|
|
171
|
+
...item,
|
|
172
|
+
image_url: {
|
|
173
|
+
...item.image_url,
|
|
174
|
+
url: redactBase64DataUrl(item.image_url.url)
|
|
175
|
+
}
|
|
176
|
+
};
|
|
177
|
+
}
|
|
178
|
+
// Direct image with data field
|
|
179
|
+
if (item.type === 'image' && 'data' in item) {
|
|
180
|
+
return {
|
|
181
|
+
...item,
|
|
182
|
+
data: redactBase64DataUrl(item.data)
|
|
183
|
+
};
|
|
184
|
+
}
|
|
185
|
+
// Anthropic style
|
|
186
|
+
if (item.type === 'image' && 'source' in item && isObject(item.source) && 'data' in item.source) {
|
|
187
|
+
if (isMultimodalEnabled()) return item;
|
|
188
|
+
return {
|
|
189
|
+
...item,
|
|
190
|
+
source: {
|
|
191
|
+
...item.source,
|
|
192
|
+
data: redactBase64DataUrl(item.source.data)
|
|
193
|
+
}
|
|
194
|
+
};
|
|
195
|
+
}
|
|
196
|
+
// Google style
|
|
197
|
+
if (item.type === 'media' && 'data' in item) {
|
|
198
|
+
return {
|
|
199
|
+
...item,
|
|
200
|
+
data: redactBase64DataUrl(item.data)
|
|
201
|
+
};
|
|
202
|
+
}
|
|
203
|
+
return item;
|
|
204
|
+
};
|
|
205
|
+
// Export individual sanitizers for tree-shaking
|
|
206
|
+
const sanitizeOpenAI = data => {
|
|
207
|
+
return processMessages(data, sanitizeOpenAIImage);
|
|
208
|
+
};
|
|
209
|
+
const sanitizeOpenAIResponse = data => {
|
|
210
|
+
return processMessages(data, sanitizeOpenAIResponseImage);
|
|
211
|
+
};
|
|
212
|
+
const sanitizeAnthropic = data => {
|
|
213
|
+
return processMessages(data, sanitizeAnthropicImage);
|
|
214
|
+
};
|
|
215
|
+
const sanitizeGemini = data => {
|
|
216
|
+
// Gemini has a different structure with 'parts' directly on items instead of 'content'
|
|
217
|
+
// So we need custom processing instead of using processMessages
|
|
218
|
+
if (!data) return data;
|
|
219
|
+
if (Array.isArray(data)) {
|
|
220
|
+
return data.map(processGeminiItem);
|
|
221
|
+
}
|
|
222
|
+
return processGeminiItem(data);
|
|
223
|
+
};
|
|
224
|
+
const sanitizeLangChain = data => {
|
|
225
|
+
return processMessages(data, sanitizeLangChainImage);
|
|
226
|
+
};
|
|
227
|
+
|
|
19
228
|
// limit large outputs by truncating to 200kb (approx 200k bytes)
|
|
20
229
|
const MAX_OUTPUT_SIZE = 200000;
|
|
21
230
|
const STRING_FORMAT = 'utf8';
|
|
@@ -209,6 +418,8 @@ const formatResponseGemini = response => {
|
|
|
209
418
|
if (data instanceof Uint8Array || Buffer.isBuffer(data)) {
|
|
210
419
|
data = Buffer.from(data).toString('base64');
|
|
211
420
|
}
|
|
421
|
+
// Sanitize base64 data for images and other large inline data
|
|
422
|
+
data = redactBase64DataUrl(data);
|
|
212
423
|
content.push({
|
|
213
424
|
type: 'audio',
|
|
214
425
|
mime_type: mimeType,
|
|
@@ -557,6 +768,9 @@ const sendEventToPosthog = async ({
|
|
|
557
768
|
} : {}),
|
|
558
769
|
...(usage.webSearchCount ? {
|
|
559
770
|
$ai_web_search_count: usage.webSearchCount
|
|
771
|
+
} : {}),
|
|
772
|
+
...(usage.rawUsage ? {
|
|
773
|
+
$ai_usage: usage.rawUsage
|
|
560
774
|
} : {})
|
|
561
775
|
};
|
|
562
776
|
const properties = {
|
|
@@ -645,201 +859,6 @@ function formatOpenAIResponsesInput(input, instructions) {
|
|
|
645
859
|
return messages;
|
|
646
860
|
}
|
|
647
861
|
|
|
648
|
-
const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
|
|
649
|
-
// ============================================
|
|
650
|
-
// Multimodal Feature Toggle
|
|
651
|
-
// ============================================
|
|
652
|
-
const isMultimodalEnabled = () => {
|
|
653
|
-
const val = process.env._INTERNAL_LLMA_MULTIMODAL || '';
|
|
654
|
-
return val.toLowerCase() === 'true' || val === '1' || val.toLowerCase() === 'yes';
|
|
655
|
-
};
|
|
656
|
-
// ============================================
|
|
657
|
-
// Base64 Detection Helpers
|
|
658
|
-
// ============================================
|
|
659
|
-
const isBase64DataUrl = str => {
|
|
660
|
-
return /^data:([^;]+);base64,/.test(str);
|
|
661
|
-
};
|
|
662
|
-
const isValidUrl = str => {
|
|
663
|
-
try {
|
|
664
|
-
new URL(str);
|
|
665
|
-
return true;
|
|
666
|
-
} catch {
|
|
667
|
-
// Not an absolute URL, check if it's a relative URL or path
|
|
668
|
-
return str.startsWith('/') || str.startsWith('./') || str.startsWith('../');
|
|
669
|
-
}
|
|
670
|
-
};
|
|
671
|
-
const isRawBase64 = str => {
|
|
672
|
-
// Skip if it's a valid URL or path
|
|
673
|
-
if (isValidUrl(str)) {
|
|
674
|
-
return false;
|
|
675
|
-
}
|
|
676
|
-
// Check if it's a valid base64 string
|
|
677
|
-
// Base64 images are typically at least a few hundred chars, but we'll be conservative
|
|
678
|
-
return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
|
|
679
|
-
};
|
|
680
|
-
function redactBase64DataUrl(str) {
|
|
681
|
-
if (isMultimodalEnabled()) return str;
|
|
682
|
-
if (!isString(str)) return str;
|
|
683
|
-
// Check for data URL format
|
|
684
|
-
if (isBase64DataUrl(str)) {
|
|
685
|
-
return REDACTED_IMAGE_PLACEHOLDER;
|
|
686
|
-
}
|
|
687
|
-
// Check for raw base64 (Vercel sends raw base64 for inline images)
|
|
688
|
-
if (isRawBase64(str)) {
|
|
689
|
-
return REDACTED_IMAGE_PLACEHOLDER;
|
|
690
|
-
}
|
|
691
|
-
return str;
|
|
692
|
-
}
|
|
693
|
-
const processMessages = (messages, transformContent) => {
|
|
694
|
-
if (!messages) return messages;
|
|
695
|
-
const processContent = content => {
|
|
696
|
-
if (typeof content === 'string') return content;
|
|
697
|
-
if (!content) return content;
|
|
698
|
-
if (Array.isArray(content)) {
|
|
699
|
-
return content.map(transformContent);
|
|
700
|
-
}
|
|
701
|
-
// Handle single object content
|
|
702
|
-
return transformContent(content);
|
|
703
|
-
};
|
|
704
|
-
const processMessage = msg => {
|
|
705
|
-
if (!isObject(msg) || !('content' in msg)) return msg;
|
|
706
|
-
return {
|
|
707
|
-
...msg,
|
|
708
|
-
content: processContent(msg.content)
|
|
709
|
-
};
|
|
710
|
-
};
|
|
711
|
-
// Handle both arrays and single messages
|
|
712
|
-
if (Array.isArray(messages)) {
|
|
713
|
-
return messages.map(processMessage);
|
|
714
|
-
}
|
|
715
|
-
return processMessage(messages);
|
|
716
|
-
};
|
|
717
|
-
// ============================================
|
|
718
|
-
// Provider-Specific Image Sanitizers
|
|
719
|
-
// ============================================
|
|
720
|
-
const sanitizeOpenAIImage = item => {
|
|
721
|
-
if (!isObject(item)) return item;
|
|
722
|
-
// Handle image_url format
|
|
723
|
-
if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
|
|
724
|
-
return {
|
|
725
|
-
...item,
|
|
726
|
-
image_url: {
|
|
727
|
-
...item.image_url,
|
|
728
|
-
url: redactBase64DataUrl(item.image_url.url)
|
|
729
|
-
}
|
|
730
|
-
};
|
|
731
|
-
}
|
|
732
|
-
// Handle audio format
|
|
733
|
-
if (item.type === 'audio' && 'data' in item) {
|
|
734
|
-
if (isMultimodalEnabled()) return item;
|
|
735
|
-
return {
|
|
736
|
-
...item,
|
|
737
|
-
data: REDACTED_IMAGE_PLACEHOLDER
|
|
738
|
-
};
|
|
739
|
-
}
|
|
740
|
-
return item;
|
|
741
|
-
};
|
|
742
|
-
const sanitizeAnthropicImage = item => {
|
|
743
|
-
if (isMultimodalEnabled()) return item;
|
|
744
|
-
if (!isObject(item)) return item;
|
|
745
|
-
// Handle Anthropic's image and document formats (same structure, different type field)
|
|
746
|
-
if ((item.type === 'image' || item.type === 'document') && 'source' in item && isObject(item.source) && item.source.type === 'base64' && 'data' in item.source) {
|
|
747
|
-
return {
|
|
748
|
-
...item,
|
|
749
|
-
source: {
|
|
750
|
-
...item.source,
|
|
751
|
-
data: REDACTED_IMAGE_PLACEHOLDER
|
|
752
|
-
}
|
|
753
|
-
};
|
|
754
|
-
}
|
|
755
|
-
return item;
|
|
756
|
-
};
|
|
757
|
-
const sanitizeGeminiPart = part => {
|
|
758
|
-
if (isMultimodalEnabled()) return part;
|
|
759
|
-
if (!isObject(part)) return part;
|
|
760
|
-
// Handle Gemini's inline data format (images, audio, PDFs all use inlineData)
|
|
761
|
-
if ('inlineData' in part && isObject(part.inlineData) && 'data' in part.inlineData) {
|
|
762
|
-
return {
|
|
763
|
-
...part,
|
|
764
|
-
inlineData: {
|
|
765
|
-
...part.inlineData,
|
|
766
|
-
data: REDACTED_IMAGE_PLACEHOLDER
|
|
767
|
-
}
|
|
768
|
-
};
|
|
769
|
-
}
|
|
770
|
-
return part;
|
|
771
|
-
};
|
|
772
|
-
const processGeminiItem = item => {
|
|
773
|
-
if (!isObject(item)) return item;
|
|
774
|
-
// If it has parts, process them
|
|
775
|
-
if ('parts' in item && item.parts) {
|
|
776
|
-
const parts = Array.isArray(item.parts) ? item.parts.map(sanitizeGeminiPart) : sanitizeGeminiPart(item.parts);
|
|
777
|
-
return {
|
|
778
|
-
...item,
|
|
779
|
-
parts
|
|
780
|
-
};
|
|
781
|
-
}
|
|
782
|
-
return item;
|
|
783
|
-
};
|
|
784
|
-
const sanitizeLangChainImage = item => {
|
|
785
|
-
if (!isObject(item)) return item;
|
|
786
|
-
// OpenAI style
|
|
787
|
-
if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
|
|
788
|
-
return {
|
|
789
|
-
...item,
|
|
790
|
-
image_url: {
|
|
791
|
-
...item.image_url,
|
|
792
|
-
url: redactBase64DataUrl(item.image_url.url)
|
|
793
|
-
}
|
|
794
|
-
};
|
|
795
|
-
}
|
|
796
|
-
// Direct image with data field
|
|
797
|
-
if (item.type === 'image' && 'data' in item) {
|
|
798
|
-
return {
|
|
799
|
-
...item,
|
|
800
|
-
data: redactBase64DataUrl(item.data)
|
|
801
|
-
};
|
|
802
|
-
}
|
|
803
|
-
// Anthropic style
|
|
804
|
-
if (item.type === 'image' && 'source' in item && isObject(item.source) && 'data' in item.source) {
|
|
805
|
-
if (isMultimodalEnabled()) return item;
|
|
806
|
-
return {
|
|
807
|
-
...item,
|
|
808
|
-
source: {
|
|
809
|
-
...item.source,
|
|
810
|
-
data: redactBase64DataUrl(item.source.data)
|
|
811
|
-
}
|
|
812
|
-
};
|
|
813
|
-
}
|
|
814
|
-
// Google style
|
|
815
|
-
if (item.type === 'media' && 'data' in item) {
|
|
816
|
-
return {
|
|
817
|
-
...item,
|
|
818
|
-
data: redactBase64DataUrl(item.data)
|
|
819
|
-
};
|
|
820
|
-
}
|
|
821
|
-
return item;
|
|
822
|
-
};
|
|
823
|
-
// Export individual sanitizers for tree-shaking
|
|
824
|
-
const sanitizeOpenAI = data => {
|
|
825
|
-
return processMessages(data, sanitizeOpenAIImage);
|
|
826
|
-
};
|
|
827
|
-
const sanitizeAnthropic = data => {
|
|
828
|
-
return processMessages(data, sanitizeAnthropicImage);
|
|
829
|
-
};
|
|
830
|
-
const sanitizeGemini = data => {
|
|
831
|
-
// Gemini has a different structure with 'parts' directly on items instead of 'content'
|
|
832
|
-
// So we need custom processing instead of using processMessages
|
|
833
|
-
if (!data) return data;
|
|
834
|
-
if (Array.isArray(data)) {
|
|
835
|
-
return data.map(processGeminiItem);
|
|
836
|
-
}
|
|
837
|
-
return processGeminiItem(data);
|
|
838
|
-
};
|
|
839
|
-
const sanitizeLangChain = data => {
|
|
840
|
-
return processMessages(data, sanitizeLangChainImage);
|
|
841
|
-
};
|
|
842
|
-
|
|
843
862
|
const Chat = OpenAI.Chat;
|
|
844
863
|
const Completions = Chat.Completions;
|
|
845
864
|
const Responses = OpenAI.Responses;
|
|
@@ -896,6 +915,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
896
915
|
};
|
|
897
916
|
// Map to track in-progress tool calls
|
|
898
917
|
const toolCallsInProgress = new Map();
|
|
918
|
+
let rawUsageData;
|
|
899
919
|
for await (const chunk of stream1) {
|
|
900
920
|
// Extract model from chunk (Chat Completions chunks have model field)
|
|
901
921
|
if (!modelFromResponse && chunk.model) {
|
|
@@ -943,6 +963,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
943
963
|
}
|
|
944
964
|
// Handle usage information
|
|
945
965
|
if (chunk.usage) {
|
|
966
|
+
rawUsageData = chunk.usage;
|
|
946
967
|
usage = {
|
|
947
968
|
...usage,
|
|
948
969
|
inputTokens: chunk.usage.prompt_tokens ?? 0,
|
|
@@ -1001,7 +1022,8 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
1001
1022
|
outputTokens: usage.outputTokens,
|
|
1002
1023
|
reasoningTokens: usage.reasoningTokens,
|
|
1003
1024
|
cacheReadInputTokens: usage.cacheReadInputTokens,
|
|
1004
|
-
webSearchCount: usage.webSearchCount
|
|
1025
|
+
webSearchCount: usage.webSearchCount,
|
|
1026
|
+
rawUsage: rawUsageData
|
|
1005
1027
|
},
|
|
1006
1028
|
tools: availableTools
|
|
1007
1029
|
});
|
|
@@ -1052,7 +1074,8 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
1052
1074
|
outputTokens: result.usage?.completion_tokens ?? 0,
|
|
1053
1075
|
reasoningTokens: result.usage?.completion_tokens_details?.reasoning_tokens ?? 0,
|
|
1054
1076
|
cacheReadInputTokens: result.usage?.prompt_tokens_details?.cached_tokens ?? 0,
|
|
1055
|
-
webSearchCount: calculateWebSearchCount(result)
|
|
1077
|
+
webSearchCount: calculateWebSearchCount(result),
|
|
1078
|
+
rawUsage: result.usage
|
|
1056
1079
|
},
|
|
1057
1080
|
tools: availableTools
|
|
1058
1081
|
});
|
|
@@ -1110,6 +1133,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1110
1133
|
outputTokens: 0,
|
|
1111
1134
|
webSearchCount: 0
|
|
1112
1135
|
};
|
|
1136
|
+
let rawUsageData;
|
|
1113
1137
|
for await (const chunk of stream1) {
|
|
1114
1138
|
if ('response' in chunk && chunk.response) {
|
|
1115
1139
|
// Extract model from response object in chunk (for stored prompts)
|
|
@@ -1125,6 +1149,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1125
1149
|
finalContent = chunk.response.output;
|
|
1126
1150
|
}
|
|
1127
1151
|
if ('response' in chunk && chunk.response?.usage) {
|
|
1152
|
+
rawUsageData = chunk.response.usage;
|
|
1128
1153
|
usage = {
|
|
1129
1154
|
...usage,
|
|
1130
1155
|
inputTokens: chunk.response.usage.input_tokens ?? 0,
|
|
@@ -1141,7 +1166,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1141
1166
|
...posthogParams,
|
|
1142
1167
|
model: openAIParams.model ?? modelFromResponse,
|
|
1143
1168
|
provider: 'openai',
|
|
1144
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1169
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1145
1170
|
output: finalContent,
|
|
1146
1171
|
latency,
|
|
1147
1172
|
baseURL: this.baseURL,
|
|
@@ -1152,7 +1177,8 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1152
1177
|
outputTokens: usage.outputTokens,
|
|
1153
1178
|
reasoningTokens: usage.reasoningTokens,
|
|
1154
1179
|
cacheReadInputTokens: usage.cacheReadInputTokens,
|
|
1155
|
-
webSearchCount: usage.webSearchCount
|
|
1180
|
+
webSearchCount: usage.webSearchCount,
|
|
1181
|
+
rawUsage: rawUsageData
|
|
1156
1182
|
},
|
|
1157
1183
|
tools: availableTools
|
|
1158
1184
|
});
|
|
@@ -1162,7 +1188,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1162
1188
|
...posthogParams,
|
|
1163
1189
|
model: openAIParams.model,
|
|
1164
1190
|
provider: 'openai',
|
|
1165
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1191
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1166
1192
|
output: [],
|
|
1167
1193
|
latency: 0,
|
|
1168
1194
|
baseURL: this.baseURL,
|
|
@@ -1193,7 +1219,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1193
1219
|
...posthogParams,
|
|
1194
1220
|
model: openAIParams.model ?? result.model,
|
|
1195
1221
|
provider: 'openai',
|
|
1196
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1222
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1197
1223
|
output: formattedOutput,
|
|
1198
1224
|
latency,
|
|
1199
1225
|
baseURL: this.baseURL,
|
|
@@ -1204,7 +1230,8 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1204
1230
|
outputTokens: result.usage?.output_tokens ?? 0,
|
|
1205
1231
|
reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0,
|
|
1206
1232
|
cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0,
|
|
1207
|
-
webSearchCount: calculateWebSearchCount(result)
|
|
1233
|
+
webSearchCount: calculateWebSearchCount(result),
|
|
1234
|
+
rawUsage: result.usage
|
|
1208
1235
|
},
|
|
1209
1236
|
tools: availableTools
|
|
1210
1237
|
});
|
|
@@ -1217,7 +1244,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1217
1244
|
...posthogParams,
|
|
1218
1245
|
model: openAIParams.model,
|
|
1219
1246
|
provider: 'openai',
|
|
1220
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1247
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1221
1248
|
output: [],
|
|
1222
1249
|
latency: 0,
|
|
1223
1250
|
baseURL: this.baseURL,
|
|
@@ -1253,7 +1280,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1253
1280
|
...posthogParams,
|
|
1254
1281
|
model: openAIParams.model ?? result.model,
|
|
1255
1282
|
provider: 'openai',
|
|
1256
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1283
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1257
1284
|
output: result.output,
|
|
1258
1285
|
latency,
|
|
1259
1286
|
baseURL: this.baseURL,
|
|
@@ -1263,7 +1290,8 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1263
1290
|
inputTokens: result.usage?.input_tokens ?? 0,
|
|
1264
1291
|
outputTokens: result.usage?.output_tokens ?? 0,
|
|
1265
1292
|
reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0,
|
|
1266
|
-
cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0
|
|
1293
|
+
cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0,
|
|
1294
|
+
rawUsage: result.usage
|
|
1267
1295
|
}
|
|
1268
1296
|
});
|
|
1269
1297
|
return result;
|
|
@@ -1273,7 +1301,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1273
1301
|
...posthogParams,
|
|
1274
1302
|
model: openAIParams.model,
|
|
1275
1303
|
provider: 'openai',
|
|
1276
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1304
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1277
1305
|
output: [],
|
|
1278
1306
|
latency: 0,
|
|
1279
1307
|
baseURL: this.baseURL,
|
|
@@ -1322,7 +1350,8 @@ let WrappedEmbeddings$1 = class WrappedEmbeddings extends Embeddings {
|
|
|
1322
1350
|
params: body,
|
|
1323
1351
|
httpStatus: 200,
|
|
1324
1352
|
usage: {
|
|
1325
|
-
inputTokens: result.usage?.prompt_tokens ?? 0
|
|
1353
|
+
inputTokens: result.usage?.prompt_tokens ?? 0,
|
|
1354
|
+
rawUsage: result.usage
|
|
1326
1355
|
}
|
|
1327
1356
|
});
|
|
1328
1357
|
return result;
|
|
@@ -1390,7 +1419,8 @@ class WrappedTranscriptions extends Transcriptions {
|
|
|
1390
1419
|
if ('usage' in chunk && chunk.usage) {
|
|
1391
1420
|
usage = {
|
|
1392
1421
|
inputTokens: chunk.usage?.type === 'tokens' ? chunk.usage.input_tokens ?? 0 : 0,
|
|
1393
|
-
outputTokens: chunk.usage?.type === 'tokens' ? chunk.usage.output_tokens ?? 0 : 0
|
|
1422
|
+
outputTokens: chunk.usage?.type === 'tokens' ? chunk.usage.output_tokens ?? 0 : 0,
|
|
1423
|
+
rawUsage: chunk.usage
|
|
1394
1424
|
};
|
|
1395
1425
|
}
|
|
1396
1426
|
}
|
|
@@ -1451,7 +1481,8 @@ class WrappedTranscriptions extends Transcriptions {
|
|
|
1451
1481
|
httpStatus: 200,
|
|
1452
1482
|
usage: {
|
|
1453
1483
|
inputTokens: result.usage?.type === 'tokens' ? result.usage.input_tokens ?? 0 : 0,
|
|
1454
|
-
outputTokens: result.usage?.type === 'tokens' ? result.usage.output_tokens ?? 0 : 0
|
|
1484
|
+
outputTokens: result.usage?.type === 'tokens' ? result.usage.output_tokens ?? 0 : 0,
|
|
1485
|
+
rawUsage: result.usage
|
|
1455
1486
|
}
|
|
1456
1487
|
});
|
|
1457
1488
|
return result;
|
|
@@ -2247,13 +2278,29 @@ const wrapVercelLanguageModel = (model, phClient, options) => {
|
|
|
2247
2278
|
const webSearchCount = extractWebSearchCount(providerMetadata, result.usage);
|
|
2248
2279
|
// V2 usage has simple numbers, V3 has objects with .total - normalize both
|
|
2249
2280
|
const usageObj = result.usage;
|
|
2281
|
+
// Extract raw response for providers that include detailed usage metadata
|
|
2282
|
+
// For Gemini, candidatesTokensDetails is in result.response.body.usageMetadata
|
|
2283
|
+
const rawUsageData = {
|
|
2284
|
+
usage: result.usage,
|
|
2285
|
+
providerMetadata
|
|
2286
|
+
};
|
|
2287
|
+
// Include response body usageMetadata if it contains detailed token breakdown (e.g., candidatesTokensDetails)
|
|
2288
|
+
if (result.response && typeof result.response === 'object') {
|
|
2289
|
+
const responseBody = result.response.body;
|
|
2290
|
+
if (responseBody && typeof responseBody === 'object' && 'usageMetadata' in responseBody) {
|
|
2291
|
+
rawUsageData.rawResponse = {
|
|
2292
|
+
usageMetadata: responseBody.usageMetadata
|
|
2293
|
+
};
|
|
2294
|
+
}
|
|
2295
|
+
}
|
|
2250
2296
|
const usage = {
|
|
2251
2297
|
inputTokens: extractTokenCount(result.usage.inputTokens),
|
|
2252
2298
|
outputTokens: extractTokenCount(result.usage.outputTokens),
|
|
2253
2299
|
reasoningTokens: extractReasoningTokens(usageObj),
|
|
2254
2300
|
cacheReadInputTokens: extractCacheReadTokens(usageObj),
|
|
2255
2301
|
webSearchCount,
|
|
2256
|
-
...additionalTokenValues
|
|
2302
|
+
...additionalTokenValues,
|
|
2303
|
+
rawUsage: rawUsageData
|
|
2257
2304
|
};
|
|
2258
2305
|
adjustAnthropicV3CacheTokens(model, provider, usage);
|
|
2259
2306
|
await sendEventToPosthog({
|
|
@@ -2408,10 +2455,14 @@ const wrapVercelLanguageModel = (model, phClient, options) => {
|
|
|
2408
2455
|
content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
|
|
2409
2456
|
}] : [];
|
|
2410
2457
|
const webSearchCount = extractWebSearchCount(providerMetadata, usage);
|
|
2411
|
-
// Update usage with web search count
|
|
2458
|
+
// Update usage with web search count and raw metadata
|
|
2412
2459
|
const finalUsage = {
|
|
2413
2460
|
...usage,
|
|
2414
|
-
webSearchCount
|
|
2461
|
+
webSearchCount,
|
|
2462
|
+
rawUsage: {
|
|
2463
|
+
usage,
|
|
2464
|
+
providerMetadata
|
|
2465
|
+
}
|
|
2415
2466
|
};
|
|
2416
2467
|
adjustAnthropicV3CacheTokens(model, provider, finalUsage);
|
|
2417
2468
|
await sendEventToPosthog({
|
|
@@ -2504,6 +2555,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
|
|
|
2504
2555
|
cacheReadInputTokens: 0,
|
|
2505
2556
|
webSearchCount: 0
|
|
2506
2557
|
};
|
|
2558
|
+
let lastRawUsage;
|
|
2507
2559
|
if ('tee' in value) {
|
|
2508
2560
|
const [stream1, stream2] = value.tee();
|
|
2509
2561
|
(async () => {
|
|
@@ -2576,12 +2628,14 @@ class WrappedMessages extends AnthropicOriginal.Messages {
|
|
|
2576
2628
|
}
|
|
2577
2629
|
}
|
|
2578
2630
|
if (chunk.type == 'message_start') {
|
|
2631
|
+
lastRawUsage = chunk.message.usage;
|
|
2579
2632
|
usage.inputTokens = chunk.message.usage.input_tokens ?? 0;
|
|
2580
2633
|
usage.cacheCreationInputTokens = chunk.message.usage.cache_creation_input_tokens ?? 0;
|
|
2581
2634
|
usage.cacheReadInputTokens = chunk.message.usage.cache_read_input_tokens ?? 0;
|
|
2582
2635
|
usage.webSearchCount = chunk.message.usage.server_tool_use?.web_search_requests ?? 0;
|
|
2583
2636
|
}
|
|
2584
2637
|
if ('usage' in chunk) {
|
|
2638
|
+
lastRawUsage = chunk.usage;
|
|
2585
2639
|
usage.outputTokens = chunk.usage.output_tokens ?? 0;
|
|
2586
2640
|
// Update web search count if present in delta
|
|
2587
2641
|
if (chunk.usage.server_tool_use?.web_search_requests !== undefined) {
|
|
@@ -2589,6 +2643,7 @@ class WrappedMessages extends AnthropicOriginal.Messages {
|
|
|
2589
2643
|
}
|
|
2590
2644
|
}
|
|
2591
2645
|
}
|
|
2646
|
+
usage.rawUsage = lastRawUsage;
|
|
2592
2647
|
const latency = (Date.now() - startTime) / 1000;
|
|
2593
2648
|
const availableTools = extractAvailableToolCalls('anthropic', anthropicParams);
|
|
2594
2649
|
// Format output to match non-streaming version
|
|
@@ -2662,7 +2717,8 @@ class WrappedMessages extends AnthropicOriginal.Messages {
|
|
|
2662
2717
|
outputTokens: result.usage.output_tokens ?? 0,
|
|
2663
2718
|
cacheCreationInputTokens: result.usage.cache_creation_input_tokens ?? 0,
|
|
2664
2719
|
cacheReadInputTokens: result.usage.cache_read_input_tokens ?? 0,
|
|
2665
|
-
webSearchCount: result.usage.server_tool_use?.web_search_requests ?? 0
|
|
2720
|
+
webSearchCount: result.usage.server_tool_use?.web_search_requests ?? 0,
|
|
2721
|
+
rawUsage: result.usage
|
|
2666
2722
|
},
|
|
2667
2723
|
tools: availableTools
|
|
2668
2724
|
});
|
|
@@ -2736,7 +2792,8 @@ class WrappedModels {
|
|
|
2736
2792
|
outputTokens: metadata?.candidatesTokenCount ?? 0,
|
|
2737
2793
|
reasoningTokens: metadata?.thoughtsTokenCount ?? 0,
|
|
2738
2794
|
cacheReadInputTokens: metadata?.cachedContentTokenCount ?? 0,
|
|
2739
|
-
webSearchCount: calculateGoogleWebSearchCount(response)
|
|
2795
|
+
webSearchCount: calculateGoogleWebSearchCount(response),
|
|
2796
|
+
rawUsage: metadata
|
|
2740
2797
|
},
|
|
2741
2798
|
tools: availableTools
|
|
2742
2799
|
});
|
|
@@ -2772,7 +2829,8 @@ class WrappedModels {
|
|
|
2772
2829
|
let usage = {
|
|
2773
2830
|
inputTokens: 0,
|
|
2774
2831
|
outputTokens: 0,
|
|
2775
|
-
webSearchCount: 0
|
|
2832
|
+
webSearchCount: 0,
|
|
2833
|
+
rawUsage: undefined
|
|
2776
2834
|
};
|
|
2777
2835
|
try {
|
|
2778
2836
|
const stream = await this.client.models.generateContentStream(geminiParams);
|
|
@@ -2830,7 +2888,8 @@ class WrappedModels {
|
|
|
2830
2888
|
outputTokens: metadata.candidatesTokenCount ?? 0,
|
|
2831
2889
|
reasoningTokens: metadata.thoughtsTokenCount ?? 0,
|
|
2832
2890
|
cacheReadInputTokens: metadata.cachedContentTokenCount ?? 0,
|
|
2833
|
-
webSearchCount: usage.webSearchCount
|
|
2891
|
+
webSearchCount: usage.webSearchCount,
|
|
2892
|
+
rawUsage: metadata
|
|
2834
2893
|
};
|
|
2835
2894
|
}
|
|
2836
2895
|
yield chunk;
|
|
@@ -2855,7 +2914,8 @@ class WrappedModels {
|
|
|
2855
2914
|
httpStatus: 200,
|
|
2856
2915
|
usage: {
|
|
2857
2916
|
...usage,
|
|
2858
|
-
webSearchCount: usage.webSearchCount
|
|
2917
|
+
webSearchCount: usage.webSearchCount,
|
|
2918
|
+
rawUsage: usage.rawUsage
|
|
2859
2919
|
},
|
|
2860
2920
|
tools: availableTools
|
|
2861
2921
|
});
|
|
@@ -3293,6 +3353,20 @@ function isSerializableLike(obj) {
|
|
|
3293
3353
|
return obj !== null && typeof obj === "object" && "lc_serializable" in obj && typeof obj.toJSON === "function";
|
|
3294
3354
|
}
|
|
3295
3355
|
/**
|
|
3356
|
+
* Create a "not_implemented" serialization result for objects that cannot be serialized.
|
|
3357
|
+
*/
|
|
3358
|
+
function createNotImplemented(obj) {
|
|
3359
|
+
let id;
|
|
3360
|
+
if (obj !== null && typeof obj === "object") if ("lc_id" in obj && Array.isArray(obj.lc_id)) id = obj.lc_id;
|
|
3361
|
+
else id = [obj.constructor?.name ?? "Object"];
|
|
3362
|
+
else id = [typeof obj];
|
|
3363
|
+
return {
|
|
3364
|
+
lc: 1,
|
|
3365
|
+
type: "not_implemented",
|
|
3366
|
+
id
|
|
3367
|
+
};
|
|
3368
|
+
}
|
|
3369
|
+
/**
|
|
3296
3370
|
* Escape a value if it needs escaping (contains `lc` key).
|
|
3297
3371
|
*
|
|
3298
3372
|
* This is a simpler version of `serializeValue` that doesn't handle Serializable
|
|
@@ -3300,18 +3374,27 @@ function isSerializableLike(obj) {
|
|
|
3300
3374
|
* processed by `toJSON()`.
|
|
3301
3375
|
*
|
|
3302
3376
|
* @param value - The value to potentially escape.
|
|
3377
|
+
* @param pathSet - WeakSet to track ancestor objects in the current path to detect circular references.
|
|
3378
|
+
* Objects are removed after processing to allow shared references (same object in
|
|
3379
|
+
* multiple places) while still detecting true circular references (ancestor in descendant).
|
|
3303
3380
|
* @returns The value with any `lc`-containing objects wrapped in escape markers.
|
|
3304
3381
|
*/
|
|
3305
|
-
function escapeIfNeeded(value) {
|
|
3382
|
+
function escapeIfNeeded(value, pathSet = /* @__PURE__ */ new WeakSet()) {
|
|
3306
3383
|
if (value !== null && typeof value === "object" && !Array.isArray(value)) {
|
|
3384
|
+
if (pathSet.has(value)) return createNotImplemented(value);
|
|
3307
3385
|
if (isSerializableLike(value)) return value;
|
|
3386
|
+
pathSet.add(value);
|
|
3308
3387
|
const record = value;
|
|
3309
|
-
if (needsEscaping(record))
|
|
3388
|
+
if (needsEscaping(record)) {
|
|
3389
|
+
pathSet.delete(value);
|
|
3390
|
+
return escapeObject(record);
|
|
3391
|
+
}
|
|
3310
3392
|
const result = {};
|
|
3311
|
-
for (const [key, val] of Object.entries(record)) result[key] = escapeIfNeeded(val);
|
|
3393
|
+
for (const [key, val] of Object.entries(record)) result[key] = escapeIfNeeded(val, pathSet);
|
|
3394
|
+
pathSet.delete(value);
|
|
3312
3395
|
return result;
|
|
3313
3396
|
}
|
|
3314
|
-
if (Array.isArray(value)) return value.map((item) => escapeIfNeeded(item));
|
|
3397
|
+
if (Array.isArray(value)) return value.map((item) => escapeIfNeeded(item, pathSet));
|
|
3315
3398
|
return value;
|
|
3316
3399
|
}
|
|
3317
3400
|
|
|
@@ -3437,7 +3520,9 @@ var Serializable = class Serializable {
|
|
|
3437
3520
|
if (last in read && read[last] !== void 0) write[last] = write[last] || read[last];
|
|
3438
3521
|
});
|
|
3439
3522
|
const escapedKwargs = {};
|
|
3440
|
-
|
|
3523
|
+
const pathSet = /* @__PURE__ */ new WeakSet();
|
|
3524
|
+
pathSet.add(this);
|
|
3525
|
+
for (const [key, value] of Object.entries(kwargs)) escapedKwargs[key] = escapeIfNeeded(value, pathSet);
|
|
3441
3526
|
const kwargsWithSecrets = Object.keys(secrets).length ? replaceSecrets(escapedKwargs, secrets) : escapedKwargs;
|
|
3442
3527
|
const processedKwargs = mapKeys(kwargsWithSecrets, keyToJson, aliases);
|
|
3443
3528
|
return {
|