@posthog/ai 7.5.4 → 7.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.cjs +71 -63
- package/dist/anthropic/index.cjs.map +1 -1
- package/dist/anthropic/index.mjs +71 -63
- package/dist/anthropic/index.mjs.map +1 -1
- package/dist/gemini/index.cjs +106 -54
- package/dist/gemini/index.cjs.map +1 -1
- package/dist/gemini/index.mjs +106 -54
- package/dist/gemini/index.mjs.map +1 -1
- package/dist/index.cjs +308 -223
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +308 -223
- package/dist/index.mjs.map +1 -1
- package/dist/langchain/index.cjs +161 -136
- package/dist/langchain/index.cjs.map +1 -1
- package/dist/langchain/index.mjs +161 -136
- package/dist/langchain/index.mjs.map +1 -1
- package/dist/openai/index.cjs +163 -133
- package/dist/openai/index.cjs.map +1 -1
- package/dist/openai/index.mjs +163 -133
- package/dist/openai/index.mjs.map +1 -1
- package/dist/vercel/index.cjs +82 -57
- package/dist/vercel/index.cjs.map +1 -1
- package/dist/vercel/index.mjs +82 -57
- package/dist/vercel/index.mjs.map +1 -1
- package/package.json +6 -6
package/dist/index.cjs
CHANGED
|
@@ -30,7 +30,7 @@ function _interopNamespace(e) {
|
|
|
30
30
|
var uuid__namespace = /*#__PURE__*/_interopNamespace(uuid);
|
|
31
31
|
var AnthropicOriginal__default = /*#__PURE__*/_interopDefault(AnthropicOriginal);
|
|
32
32
|
|
|
33
|
-
var version = "7.
|
|
33
|
+
var version = "7.6.0";
|
|
34
34
|
|
|
35
35
|
// Type guards for safer type checking
|
|
36
36
|
const isString = value => {
|
|
@@ -40,6 +40,215 @@ const isObject = value => {
|
|
|
40
40
|
return value !== null && typeof value === 'object' && !Array.isArray(value);
|
|
41
41
|
};
|
|
42
42
|
|
|
43
|
+
const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
|
|
44
|
+
// ============================================
|
|
45
|
+
// Multimodal Feature Toggle
|
|
46
|
+
// ============================================
|
|
47
|
+
const isMultimodalEnabled = () => {
|
|
48
|
+
const val = process.env._INTERNAL_LLMA_MULTIMODAL || '';
|
|
49
|
+
return val.toLowerCase() === 'true' || val === '1' || val.toLowerCase() === 'yes';
|
|
50
|
+
};
|
|
51
|
+
// ============================================
|
|
52
|
+
// Base64 Detection Helpers
|
|
53
|
+
// ============================================
|
|
54
|
+
const isBase64DataUrl = str => {
|
|
55
|
+
return /^data:([^;]+);base64,/.test(str);
|
|
56
|
+
};
|
|
57
|
+
const isValidUrl = str => {
|
|
58
|
+
try {
|
|
59
|
+
new URL(str);
|
|
60
|
+
return true;
|
|
61
|
+
} catch {
|
|
62
|
+
// Not an absolute URL, check if it's a relative URL or path
|
|
63
|
+
return str.startsWith('/') || str.startsWith('./') || str.startsWith('../');
|
|
64
|
+
}
|
|
65
|
+
};
|
|
66
|
+
const isRawBase64 = str => {
|
|
67
|
+
// Skip if it's a valid URL or path
|
|
68
|
+
if (isValidUrl(str)) {
|
|
69
|
+
return false;
|
|
70
|
+
}
|
|
71
|
+
// Check if it's a valid base64 string
|
|
72
|
+
// Base64 images are typically at least a few hundred chars, but we'll be conservative
|
|
73
|
+
return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
|
|
74
|
+
};
|
|
75
|
+
function redactBase64DataUrl(str) {
|
|
76
|
+
if (isMultimodalEnabled()) return str;
|
|
77
|
+
if (!isString(str)) return str;
|
|
78
|
+
// Check for data URL format
|
|
79
|
+
if (isBase64DataUrl(str)) {
|
|
80
|
+
return REDACTED_IMAGE_PLACEHOLDER;
|
|
81
|
+
}
|
|
82
|
+
// Check for raw base64 (Vercel sends raw base64 for inline images)
|
|
83
|
+
if (isRawBase64(str)) {
|
|
84
|
+
return REDACTED_IMAGE_PLACEHOLDER;
|
|
85
|
+
}
|
|
86
|
+
return str;
|
|
87
|
+
}
|
|
88
|
+
const processMessages = (messages, transformContent) => {
|
|
89
|
+
if (!messages) return messages;
|
|
90
|
+
const processContent = content => {
|
|
91
|
+
if (typeof content === 'string') return content;
|
|
92
|
+
if (!content) return content;
|
|
93
|
+
if (Array.isArray(content)) {
|
|
94
|
+
return content.map(transformContent);
|
|
95
|
+
}
|
|
96
|
+
// Handle single object content
|
|
97
|
+
return transformContent(content);
|
|
98
|
+
};
|
|
99
|
+
const processMessage = msg => {
|
|
100
|
+
if (!isObject(msg) || !('content' in msg)) return msg;
|
|
101
|
+
return {
|
|
102
|
+
...msg,
|
|
103
|
+
content: processContent(msg.content)
|
|
104
|
+
};
|
|
105
|
+
};
|
|
106
|
+
// Handle both arrays and single messages
|
|
107
|
+
if (Array.isArray(messages)) {
|
|
108
|
+
return messages.map(processMessage);
|
|
109
|
+
}
|
|
110
|
+
return processMessage(messages);
|
|
111
|
+
};
|
|
112
|
+
// ============================================
|
|
113
|
+
// Provider-Specific Image Sanitizers
|
|
114
|
+
// ============================================
|
|
115
|
+
const sanitizeOpenAIImage = item => {
|
|
116
|
+
if (!isObject(item)) return item;
|
|
117
|
+
// Handle image_url format
|
|
118
|
+
if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
|
|
119
|
+
return {
|
|
120
|
+
...item,
|
|
121
|
+
image_url: {
|
|
122
|
+
...item.image_url,
|
|
123
|
+
url: redactBase64DataUrl(item.image_url.url)
|
|
124
|
+
}
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
// Handle audio format
|
|
128
|
+
if (item.type === 'audio' && 'data' in item) {
|
|
129
|
+
if (isMultimodalEnabled()) return item;
|
|
130
|
+
return {
|
|
131
|
+
...item,
|
|
132
|
+
data: REDACTED_IMAGE_PLACEHOLDER
|
|
133
|
+
};
|
|
134
|
+
}
|
|
135
|
+
return item;
|
|
136
|
+
};
|
|
137
|
+
const sanitizeOpenAIResponseImage = item => {
|
|
138
|
+
if (!isObject(item)) return item;
|
|
139
|
+
// Handle input_image format
|
|
140
|
+
if (item.type === 'input_image' && 'image_url' in item) {
|
|
141
|
+
return {
|
|
142
|
+
...item,
|
|
143
|
+
image_url: redactBase64DataUrl(item.image_url)
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
return item;
|
|
147
|
+
};
|
|
148
|
+
const sanitizeAnthropicImage = item => {
|
|
149
|
+
if (isMultimodalEnabled()) return item;
|
|
150
|
+
if (!isObject(item)) return item;
|
|
151
|
+
// Handle Anthropic's image and document formats (same structure, different type field)
|
|
152
|
+
if ((item.type === 'image' || item.type === 'document') && 'source' in item && isObject(item.source) && item.source.type === 'base64' && 'data' in item.source) {
|
|
153
|
+
return {
|
|
154
|
+
...item,
|
|
155
|
+
source: {
|
|
156
|
+
...item.source,
|
|
157
|
+
data: REDACTED_IMAGE_PLACEHOLDER
|
|
158
|
+
}
|
|
159
|
+
};
|
|
160
|
+
}
|
|
161
|
+
return item;
|
|
162
|
+
};
|
|
163
|
+
const sanitizeGeminiPart = part => {
|
|
164
|
+
if (isMultimodalEnabled()) return part;
|
|
165
|
+
if (!isObject(part)) return part;
|
|
166
|
+
// Handle Gemini's inline data format (images, audio, PDFs all use inlineData)
|
|
167
|
+
if ('inlineData' in part && isObject(part.inlineData) && 'data' in part.inlineData) {
|
|
168
|
+
return {
|
|
169
|
+
...part,
|
|
170
|
+
inlineData: {
|
|
171
|
+
...part.inlineData,
|
|
172
|
+
data: REDACTED_IMAGE_PLACEHOLDER
|
|
173
|
+
}
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
return part;
|
|
177
|
+
};
|
|
178
|
+
const processGeminiItem = item => {
|
|
179
|
+
if (!isObject(item)) return item;
|
|
180
|
+
// If it has parts, process them
|
|
181
|
+
if ('parts' in item && item.parts) {
|
|
182
|
+
const parts = Array.isArray(item.parts) ? item.parts.map(sanitizeGeminiPart) : sanitizeGeminiPart(item.parts);
|
|
183
|
+
return {
|
|
184
|
+
...item,
|
|
185
|
+
parts
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
return item;
|
|
189
|
+
};
|
|
190
|
+
const sanitizeLangChainImage = item => {
|
|
191
|
+
if (!isObject(item)) return item;
|
|
192
|
+
// OpenAI style
|
|
193
|
+
if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
|
|
194
|
+
return {
|
|
195
|
+
...item,
|
|
196
|
+
image_url: {
|
|
197
|
+
...item.image_url,
|
|
198
|
+
url: redactBase64DataUrl(item.image_url.url)
|
|
199
|
+
}
|
|
200
|
+
};
|
|
201
|
+
}
|
|
202
|
+
// Direct image with data field
|
|
203
|
+
if (item.type === 'image' && 'data' in item) {
|
|
204
|
+
return {
|
|
205
|
+
...item,
|
|
206
|
+
data: redactBase64DataUrl(item.data)
|
|
207
|
+
};
|
|
208
|
+
}
|
|
209
|
+
// Anthropic style
|
|
210
|
+
if (item.type === 'image' && 'source' in item && isObject(item.source) && 'data' in item.source) {
|
|
211
|
+
if (isMultimodalEnabled()) return item;
|
|
212
|
+
return {
|
|
213
|
+
...item,
|
|
214
|
+
source: {
|
|
215
|
+
...item.source,
|
|
216
|
+
data: redactBase64DataUrl(item.source.data)
|
|
217
|
+
}
|
|
218
|
+
};
|
|
219
|
+
}
|
|
220
|
+
// Google style
|
|
221
|
+
if (item.type === 'media' && 'data' in item) {
|
|
222
|
+
return {
|
|
223
|
+
...item,
|
|
224
|
+
data: redactBase64DataUrl(item.data)
|
|
225
|
+
};
|
|
226
|
+
}
|
|
227
|
+
return item;
|
|
228
|
+
};
|
|
229
|
+
// Export individual sanitizers for tree-shaking
|
|
230
|
+
const sanitizeOpenAI = data => {
|
|
231
|
+
return processMessages(data, sanitizeOpenAIImage);
|
|
232
|
+
};
|
|
233
|
+
const sanitizeOpenAIResponse = data => {
|
|
234
|
+
return processMessages(data, sanitizeOpenAIResponseImage);
|
|
235
|
+
};
|
|
236
|
+
const sanitizeAnthropic = data => {
|
|
237
|
+
return processMessages(data, sanitizeAnthropicImage);
|
|
238
|
+
};
|
|
239
|
+
const sanitizeGemini = data => {
|
|
240
|
+
// Gemini has a different structure with 'parts' directly on items instead of 'content'
|
|
241
|
+
// So we need custom processing instead of using processMessages
|
|
242
|
+
if (!data) return data;
|
|
243
|
+
if (Array.isArray(data)) {
|
|
244
|
+
return data.map(processGeminiItem);
|
|
245
|
+
}
|
|
246
|
+
return processGeminiItem(data);
|
|
247
|
+
};
|
|
248
|
+
const sanitizeLangChain = data => {
|
|
249
|
+
return processMessages(data, sanitizeLangChainImage);
|
|
250
|
+
};
|
|
251
|
+
|
|
43
252
|
// limit large outputs by truncating to 200kb (approx 200k bytes)
|
|
44
253
|
const MAX_OUTPUT_SIZE = 200000;
|
|
45
254
|
const STRING_FORMAT = 'utf8';
|
|
@@ -233,6 +442,8 @@ const formatResponseGemini = response => {
|
|
|
233
442
|
if (data instanceof Uint8Array || buffer.Buffer.isBuffer(data)) {
|
|
234
443
|
data = buffer.Buffer.from(data).toString('base64');
|
|
235
444
|
}
|
|
445
|
+
// Sanitize base64 data for images and other large inline data
|
|
446
|
+
data = redactBase64DataUrl(data);
|
|
236
447
|
content.push({
|
|
237
448
|
type: 'audio',
|
|
238
449
|
mime_type: mimeType,
|
|
@@ -581,6 +792,9 @@ const sendEventToPosthog = async ({
|
|
|
581
792
|
} : {}),
|
|
582
793
|
...(usage.webSearchCount ? {
|
|
583
794
|
$ai_web_search_count: usage.webSearchCount
|
|
795
|
+
} : {}),
|
|
796
|
+
...(usage.rawUsage ? {
|
|
797
|
+
$ai_usage: usage.rawUsage
|
|
584
798
|
} : {})
|
|
585
799
|
};
|
|
586
800
|
const properties = {
|
|
@@ -669,201 +883,6 @@ function formatOpenAIResponsesInput(input, instructions) {
|
|
|
669
883
|
return messages;
|
|
670
884
|
}
|
|
671
885
|
|
|
672
|
-
const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
|
|
673
|
-
// ============================================
|
|
674
|
-
// Multimodal Feature Toggle
|
|
675
|
-
// ============================================
|
|
676
|
-
const isMultimodalEnabled = () => {
|
|
677
|
-
const val = process.env._INTERNAL_LLMA_MULTIMODAL || '';
|
|
678
|
-
return val.toLowerCase() === 'true' || val === '1' || val.toLowerCase() === 'yes';
|
|
679
|
-
};
|
|
680
|
-
// ============================================
|
|
681
|
-
// Base64 Detection Helpers
|
|
682
|
-
// ============================================
|
|
683
|
-
const isBase64DataUrl = str => {
|
|
684
|
-
return /^data:([^;]+);base64,/.test(str);
|
|
685
|
-
};
|
|
686
|
-
const isValidUrl = str => {
|
|
687
|
-
try {
|
|
688
|
-
new URL(str);
|
|
689
|
-
return true;
|
|
690
|
-
} catch {
|
|
691
|
-
// Not an absolute URL, check if it's a relative URL or path
|
|
692
|
-
return str.startsWith('/') || str.startsWith('./') || str.startsWith('../');
|
|
693
|
-
}
|
|
694
|
-
};
|
|
695
|
-
const isRawBase64 = str => {
|
|
696
|
-
// Skip if it's a valid URL or path
|
|
697
|
-
if (isValidUrl(str)) {
|
|
698
|
-
return false;
|
|
699
|
-
}
|
|
700
|
-
// Check if it's a valid base64 string
|
|
701
|
-
// Base64 images are typically at least a few hundred chars, but we'll be conservative
|
|
702
|
-
return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
|
|
703
|
-
};
|
|
704
|
-
function redactBase64DataUrl(str) {
|
|
705
|
-
if (isMultimodalEnabled()) return str;
|
|
706
|
-
if (!isString(str)) return str;
|
|
707
|
-
// Check for data URL format
|
|
708
|
-
if (isBase64DataUrl(str)) {
|
|
709
|
-
return REDACTED_IMAGE_PLACEHOLDER;
|
|
710
|
-
}
|
|
711
|
-
// Check for raw base64 (Vercel sends raw base64 for inline images)
|
|
712
|
-
if (isRawBase64(str)) {
|
|
713
|
-
return REDACTED_IMAGE_PLACEHOLDER;
|
|
714
|
-
}
|
|
715
|
-
return str;
|
|
716
|
-
}
|
|
717
|
-
const processMessages = (messages, transformContent) => {
|
|
718
|
-
if (!messages) return messages;
|
|
719
|
-
const processContent = content => {
|
|
720
|
-
if (typeof content === 'string') return content;
|
|
721
|
-
if (!content) return content;
|
|
722
|
-
if (Array.isArray(content)) {
|
|
723
|
-
return content.map(transformContent);
|
|
724
|
-
}
|
|
725
|
-
// Handle single object content
|
|
726
|
-
return transformContent(content);
|
|
727
|
-
};
|
|
728
|
-
const processMessage = msg => {
|
|
729
|
-
if (!isObject(msg) || !('content' in msg)) return msg;
|
|
730
|
-
return {
|
|
731
|
-
...msg,
|
|
732
|
-
content: processContent(msg.content)
|
|
733
|
-
};
|
|
734
|
-
};
|
|
735
|
-
// Handle both arrays and single messages
|
|
736
|
-
if (Array.isArray(messages)) {
|
|
737
|
-
return messages.map(processMessage);
|
|
738
|
-
}
|
|
739
|
-
return processMessage(messages);
|
|
740
|
-
};
|
|
741
|
-
// ============================================
|
|
742
|
-
// Provider-Specific Image Sanitizers
|
|
743
|
-
// ============================================
|
|
744
|
-
const sanitizeOpenAIImage = item => {
|
|
745
|
-
if (!isObject(item)) return item;
|
|
746
|
-
// Handle image_url format
|
|
747
|
-
if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
|
|
748
|
-
return {
|
|
749
|
-
...item,
|
|
750
|
-
image_url: {
|
|
751
|
-
...item.image_url,
|
|
752
|
-
url: redactBase64DataUrl(item.image_url.url)
|
|
753
|
-
}
|
|
754
|
-
};
|
|
755
|
-
}
|
|
756
|
-
// Handle audio format
|
|
757
|
-
if (item.type === 'audio' && 'data' in item) {
|
|
758
|
-
if (isMultimodalEnabled()) return item;
|
|
759
|
-
return {
|
|
760
|
-
...item,
|
|
761
|
-
data: REDACTED_IMAGE_PLACEHOLDER
|
|
762
|
-
};
|
|
763
|
-
}
|
|
764
|
-
return item;
|
|
765
|
-
};
|
|
766
|
-
const sanitizeAnthropicImage = item => {
|
|
767
|
-
if (isMultimodalEnabled()) return item;
|
|
768
|
-
if (!isObject(item)) return item;
|
|
769
|
-
// Handle Anthropic's image and document formats (same structure, different type field)
|
|
770
|
-
if ((item.type === 'image' || item.type === 'document') && 'source' in item && isObject(item.source) && item.source.type === 'base64' && 'data' in item.source) {
|
|
771
|
-
return {
|
|
772
|
-
...item,
|
|
773
|
-
source: {
|
|
774
|
-
...item.source,
|
|
775
|
-
data: REDACTED_IMAGE_PLACEHOLDER
|
|
776
|
-
}
|
|
777
|
-
};
|
|
778
|
-
}
|
|
779
|
-
return item;
|
|
780
|
-
};
|
|
781
|
-
const sanitizeGeminiPart = part => {
|
|
782
|
-
if (isMultimodalEnabled()) return part;
|
|
783
|
-
if (!isObject(part)) return part;
|
|
784
|
-
// Handle Gemini's inline data format (images, audio, PDFs all use inlineData)
|
|
785
|
-
if ('inlineData' in part && isObject(part.inlineData) && 'data' in part.inlineData) {
|
|
786
|
-
return {
|
|
787
|
-
...part,
|
|
788
|
-
inlineData: {
|
|
789
|
-
...part.inlineData,
|
|
790
|
-
data: REDACTED_IMAGE_PLACEHOLDER
|
|
791
|
-
}
|
|
792
|
-
};
|
|
793
|
-
}
|
|
794
|
-
return part;
|
|
795
|
-
};
|
|
796
|
-
const processGeminiItem = item => {
|
|
797
|
-
if (!isObject(item)) return item;
|
|
798
|
-
// If it has parts, process them
|
|
799
|
-
if ('parts' in item && item.parts) {
|
|
800
|
-
const parts = Array.isArray(item.parts) ? item.parts.map(sanitizeGeminiPart) : sanitizeGeminiPart(item.parts);
|
|
801
|
-
return {
|
|
802
|
-
...item,
|
|
803
|
-
parts
|
|
804
|
-
};
|
|
805
|
-
}
|
|
806
|
-
return item;
|
|
807
|
-
};
|
|
808
|
-
const sanitizeLangChainImage = item => {
|
|
809
|
-
if (!isObject(item)) return item;
|
|
810
|
-
// OpenAI style
|
|
811
|
-
if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
|
|
812
|
-
return {
|
|
813
|
-
...item,
|
|
814
|
-
image_url: {
|
|
815
|
-
...item.image_url,
|
|
816
|
-
url: redactBase64DataUrl(item.image_url.url)
|
|
817
|
-
}
|
|
818
|
-
};
|
|
819
|
-
}
|
|
820
|
-
// Direct image with data field
|
|
821
|
-
if (item.type === 'image' && 'data' in item) {
|
|
822
|
-
return {
|
|
823
|
-
...item,
|
|
824
|
-
data: redactBase64DataUrl(item.data)
|
|
825
|
-
};
|
|
826
|
-
}
|
|
827
|
-
// Anthropic style
|
|
828
|
-
if (item.type === 'image' && 'source' in item && isObject(item.source) && 'data' in item.source) {
|
|
829
|
-
if (isMultimodalEnabled()) return item;
|
|
830
|
-
return {
|
|
831
|
-
...item,
|
|
832
|
-
source: {
|
|
833
|
-
...item.source,
|
|
834
|
-
data: redactBase64DataUrl(item.source.data)
|
|
835
|
-
}
|
|
836
|
-
};
|
|
837
|
-
}
|
|
838
|
-
// Google style
|
|
839
|
-
if (item.type === 'media' && 'data' in item) {
|
|
840
|
-
return {
|
|
841
|
-
...item,
|
|
842
|
-
data: redactBase64DataUrl(item.data)
|
|
843
|
-
};
|
|
844
|
-
}
|
|
845
|
-
return item;
|
|
846
|
-
};
|
|
847
|
-
// Export individual sanitizers for tree-shaking
|
|
848
|
-
const sanitizeOpenAI = data => {
|
|
849
|
-
return processMessages(data, sanitizeOpenAIImage);
|
|
850
|
-
};
|
|
851
|
-
const sanitizeAnthropic = data => {
|
|
852
|
-
return processMessages(data, sanitizeAnthropicImage);
|
|
853
|
-
};
|
|
854
|
-
const sanitizeGemini = data => {
|
|
855
|
-
// Gemini has a different structure with 'parts' directly on items instead of 'content'
|
|
856
|
-
// So we need custom processing instead of using processMessages
|
|
857
|
-
if (!data) return data;
|
|
858
|
-
if (Array.isArray(data)) {
|
|
859
|
-
return data.map(processGeminiItem);
|
|
860
|
-
}
|
|
861
|
-
return processGeminiItem(data);
|
|
862
|
-
};
|
|
863
|
-
const sanitizeLangChain = data => {
|
|
864
|
-
return processMessages(data, sanitizeLangChainImage);
|
|
865
|
-
};
|
|
866
|
-
|
|
867
886
|
const Chat = openai.OpenAI.Chat;
|
|
868
887
|
const Completions = Chat.Completions;
|
|
869
888
|
const Responses = openai.OpenAI.Responses;
|
|
@@ -920,6 +939,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
920
939
|
};
|
|
921
940
|
// Map to track in-progress tool calls
|
|
922
941
|
const toolCallsInProgress = new Map();
|
|
942
|
+
let rawUsageData;
|
|
923
943
|
for await (const chunk of stream1) {
|
|
924
944
|
// Extract model from chunk (Chat Completions chunks have model field)
|
|
925
945
|
if (!modelFromResponse && chunk.model) {
|
|
@@ -967,6 +987,7 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
967
987
|
}
|
|
968
988
|
// Handle usage information
|
|
969
989
|
if (chunk.usage) {
|
|
990
|
+
rawUsageData = chunk.usage;
|
|
970
991
|
usage = {
|
|
971
992
|
...usage,
|
|
972
993
|
inputTokens: chunk.usage.prompt_tokens ?? 0,
|
|
@@ -1025,7 +1046,8 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
1025
1046
|
outputTokens: usage.outputTokens,
|
|
1026
1047
|
reasoningTokens: usage.reasoningTokens,
|
|
1027
1048
|
cacheReadInputTokens: usage.cacheReadInputTokens,
|
|
1028
|
-
webSearchCount: usage.webSearchCount
|
|
1049
|
+
webSearchCount: usage.webSearchCount,
|
|
1050
|
+
rawUsage: rawUsageData
|
|
1029
1051
|
},
|
|
1030
1052
|
tools: availableTools
|
|
1031
1053
|
});
|
|
@@ -1076,7 +1098,8 @@ let WrappedCompletions$1 = class WrappedCompletions extends Completions {
|
|
|
1076
1098
|
outputTokens: result.usage?.completion_tokens ?? 0,
|
|
1077
1099
|
reasoningTokens: result.usage?.completion_tokens_details?.reasoning_tokens ?? 0,
|
|
1078
1100
|
cacheReadInputTokens: result.usage?.prompt_tokens_details?.cached_tokens ?? 0,
|
|
1079
|
-
webSearchCount: calculateWebSearchCount(result)
|
|
1101
|
+
webSearchCount: calculateWebSearchCount(result),
|
|
1102
|
+
rawUsage: result.usage
|
|
1080
1103
|
},
|
|
1081
1104
|
tools: availableTools
|
|
1082
1105
|
});
|
|
@@ -1134,6 +1157,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1134
1157
|
outputTokens: 0,
|
|
1135
1158
|
webSearchCount: 0
|
|
1136
1159
|
};
|
|
1160
|
+
let rawUsageData;
|
|
1137
1161
|
for await (const chunk of stream1) {
|
|
1138
1162
|
if ('response' in chunk && chunk.response) {
|
|
1139
1163
|
// Extract model from response object in chunk (for stored prompts)
|
|
@@ -1149,6 +1173,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1149
1173
|
finalContent = chunk.response.output;
|
|
1150
1174
|
}
|
|
1151
1175
|
if ('response' in chunk && chunk.response?.usage) {
|
|
1176
|
+
rawUsageData = chunk.response.usage;
|
|
1152
1177
|
usage = {
|
|
1153
1178
|
...usage,
|
|
1154
1179
|
inputTokens: chunk.response.usage.input_tokens ?? 0,
|
|
@@ -1165,7 +1190,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1165
1190
|
...posthogParams,
|
|
1166
1191
|
model: openAIParams.model ?? modelFromResponse,
|
|
1167
1192
|
provider: 'openai',
|
|
1168
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1193
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1169
1194
|
output: finalContent,
|
|
1170
1195
|
latency,
|
|
1171
1196
|
baseURL: this.baseURL,
|
|
@@ -1176,7 +1201,8 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1176
1201
|
outputTokens: usage.outputTokens,
|
|
1177
1202
|
reasoningTokens: usage.reasoningTokens,
|
|
1178
1203
|
cacheReadInputTokens: usage.cacheReadInputTokens,
|
|
1179
|
-
webSearchCount: usage.webSearchCount
|
|
1204
|
+
webSearchCount: usage.webSearchCount,
|
|
1205
|
+
rawUsage: rawUsageData
|
|
1180
1206
|
},
|
|
1181
1207
|
tools: availableTools
|
|
1182
1208
|
});
|
|
@@ -1186,7 +1212,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1186
1212
|
...posthogParams,
|
|
1187
1213
|
model: openAIParams.model,
|
|
1188
1214
|
provider: 'openai',
|
|
1189
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1215
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1190
1216
|
output: [],
|
|
1191
1217
|
latency: 0,
|
|
1192
1218
|
baseURL: this.baseURL,
|
|
@@ -1217,7 +1243,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1217
1243
|
...posthogParams,
|
|
1218
1244
|
model: openAIParams.model ?? result.model,
|
|
1219
1245
|
provider: 'openai',
|
|
1220
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1246
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1221
1247
|
output: formattedOutput,
|
|
1222
1248
|
latency,
|
|
1223
1249
|
baseURL: this.baseURL,
|
|
@@ -1228,7 +1254,8 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1228
1254
|
outputTokens: result.usage?.output_tokens ?? 0,
|
|
1229
1255
|
reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0,
|
|
1230
1256
|
cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0,
|
|
1231
|
-
webSearchCount: calculateWebSearchCount(result)
|
|
1257
|
+
webSearchCount: calculateWebSearchCount(result),
|
|
1258
|
+
rawUsage: result.usage
|
|
1232
1259
|
},
|
|
1233
1260
|
tools: availableTools
|
|
1234
1261
|
});
|
|
@@ -1241,7 +1268,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1241
1268
|
...posthogParams,
|
|
1242
1269
|
model: openAIParams.model,
|
|
1243
1270
|
provider: 'openai',
|
|
1244
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1271
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1245
1272
|
output: [],
|
|
1246
1273
|
latency: 0,
|
|
1247
1274
|
baseURL: this.baseURL,
|
|
@@ -1277,7 +1304,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1277
1304
|
...posthogParams,
|
|
1278
1305
|
model: openAIParams.model ?? result.model,
|
|
1279
1306
|
provider: 'openai',
|
|
1280
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1307
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1281
1308
|
output: result.output,
|
|
1282
1309
|
latency,
|
|
1283
1310
|
baseURL: this.baseURL,
|
|
@@ -1287,7 +1314,8 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1287
1314
|
inputTokens: result.usage?.input_tokens ?? 0,
|
|
1288
1315
|
outputTokens: result.usage?.output_tokens ?? 0,
|
|
1289
1316
|
reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0,
|
|
1290
|
-
cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0
|
|
1317
|
+
cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0,
|
|
1318
|
+
rawUsage: result.usage
|
|
1291
1319
|
}
|
|
1292
1320
|
});
|
|
1293
1321
|
return result;
|
|
@@ -1297,7 +1325,7 @@ let WrappedResponses$1 = class WrappedResponses extends Responses {
|
|
|
1297
1325
|
...posthogParams,
|
|
1298
1326
|
model: openAIParams.model,
|
|
1299
1327
|
provider: 'openai',
|
|
1300
|
-
input: formatOpenAIResponsesInput(openAIParams.input, openAIParams.instructions),
|
|
1328
|
+
input: formatOpenAIResponsesInput(sanitizeOpenAIResponse(openAIParams.input), openAIParams.instructions),
|
|
1301
1329
|
output: [],
|
|
1302
1330
|
latency: 0,
|
|
1303
1331
|
baseURL: this.baseURL,
|
|
@@ -1346,7 +1374,8 @@ let WrappedEmbeddings$1 = class WrappedEmbeddings extends Embeddings {
|
|
|
1346
1374
|
params: body,
|
|
1347
1375
|
httpStatus: 200,
|
|
1348
1376
|
usage: {
|
|
1349
|
-
inputTokens: result.usage?.prompt_tokens ?? 0
|
|
1377
|
+
inputTokens: result.usage?.prompt_tokens ?? 0,
|
|
1378
|
+
rawUsage: result.usage
|
|
1350
1379
|
}
|
|
1351
1380
|
});
|
|
1352
1381
|
return result;
|
|
@@ -1414,7 +1443,8 @@ class WrappedTranscriptions extends Transcriptions {
|
|
|
1414
1443
|
if ('usage' in chunk && chunk.usage) {
|
|
1415
1444
|
usage = {
|
|
1416
1445
|
inputTokens: chunk.usage?.type === 'tokens' ? chunk.usage.input_tokens ?? 0 : 0,
|
|
1417
|
-
outputTokens: chunk.usage?.type === 'tokens' ? chunk.usage.output_tokens ?? 0 : 0
|
|
1446
|
+
outputTokens: chunk.usage?.type === 'tokens' ? chunk.usage.output_tokens ?? 0 : 0,
|
|
1447
|
+
rawUsage: chunk.usage
|
|
1418
1448
|
};
|
|
1419
1449
|
}
|
|
1420
1450
|
}
|
|
@@ -1475,7 +1505,8 @@ class WrappedTranscriptions extends Transcriptions {
|
|
|
1475
1505
|
httpStatus: 200,
|
|
1476
1506
|
usage: {
|
|
1477
1507
|
inputTokens: result.usage?.type === 'tokens' ? result.usage.input_tokens ?? 0 : 0,
|
|
1478
|
-
outputTokens: result.usage?.type === 'tokens' ? result.usage.output_tokens ?? 0 : 0
|
|
1508
|
+
outputTokens: result.usage?.type === 'tokens' ? result.usage.output_tokens ?? 0 : 0,
|
|
1509
|
+
rawUsage: result.usage
|
|
1479
1510
|
}
|
|
1480
1511
|
});
|
|
1481
1512
|
return result;
|
|
@@ -2271,13 +2302,29 @@ const wrapVercelLanguageModel = (model, phClient, options) => {
|
|
|
2271
2302
|
const webSearchCount = extractWebSearchCount(providerMetadata, result.usage);
|
|
2272
2303
|
// V2 usage has simple numbers, V3 has objects with .total - normalize both
|
|
2273
2304
|
const usageObj = result.usage;
|
|
2305
|
+
// Extract raw response for providers that include detailed usage metadata
|
|
2306
|
+
// For Gemini, candidatesTokensDetails is in result.response.body.usageMetadata
|
|
2307
|
+
const rawUsageData = {
|
|
2308
|
+
usage: result.usage,
|
|
2309
|
+
providerMetadata
|
|
2310
|
+
};
|
|
2311
|
+
// Include response body usageMetadata if it contains detailed token breakdown (e.g., candidatesTokensDetails)
|
|
2312
|
+
if (result.response && typeof result.response === 'object') {
|
|
2313
|
+
const responseBody = result.response.body;
|
|
2314
|
+
if (responseBody && typeof responseBody === 'object' && 'usageMetadata' in responseBody) {
|
|
2315
|
+
rawUsageData.rawResponse = {
|
|
2316
|
+
usageMetadata: responseBody.usageMetadata
|
|
2317
|
+
};
|
|
2318
|
+
}
|
|
2319
|
+
}
|
|
2274
2320
|
const usage = {
|
|
2275
2321
|
inputTokens: extractTokenCount(result.usage.inputTokens),
|
|
2276
2322
|
outputTokens: extractTokenCount(result.usage.outputTokens),
|
|
2277
2323
|
reasoningTokens: extractReasoningTokens(usageObj),
|
|
2278
2324
|
cacheReadInputTokens: extractCacheReadTokens(usageObj),
|
|
2279
2325
|
webSearchCount,
|
|
2280
|
-
...additionalTokenValues
|
|
2326
|
+
...additionalTokenValues,
|
|
2327
|
+
rawUsage: rawUsageData
|
|
2281
2328
|
};
|
|
2282
2329
|
adjustAnthropicV3CacheTokens(model, provider, usage);
|
|
2283
2330
|
await sendEventToPosthog({
|
|
@@ -2432,10 +2479,14 @@ const wrapVercelLanguageModel = (model, phClient, options) => {
|
|
|
2432
2479
|
content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
|
|
2433
2480
|
}] : [];
|
|
2434
2481
|
const webSearchCount = extractWebSearchCount(providerMetadata, usage);
|
|
2435
|
-
// Update usage with web search count
|
|
2482
|
+
// Update usage with web search count and raw metadata
|
|
2436
2483
|
const finalUsage = {
|
|
2437
2484
|
...usage,
|
|
2438
|
-
webSearchCount
|
|
2485
|
+
webSearchCount,
|
|
2486
|
+
rawUsage: {
|
|
2487
|
+
usage,
|
|
2488
|
+
providerMetadata
|
|
2489
|
+
}
|
|
2439
2490
|
};
|
|
2440
2491
|
adjustAnthropicV3CacheTokens(model, provider, finalUsage);
|
|
2441
2492
|
await sendEventToPosthog({
|
|
@@ -2528,6 +2579,7 @@ class WrappedMessages extends AnthropicOriginal__default.default.Messages {
|
|
|
2528
2579
|
cacheReadInputTokens: 0,
|
|
2529
2580
|
webSearchCount: 0
|
|
2530
2581
|
};
|
|
2582
|
+
let lastRawUsage;
|
|
2531
2583
|
if ('tee' in value) {
|
|
2532
2584
|
const [stream1, stream2] = value.tee();
|
|
2533
2585
|
(async () => {
|
|
@@ -2600,12 +2652,14 @@ class WrappedMessages extends AnthropicOriginal__default.default.Messages {
|
|
|
2600
2652
|
}
|
|
2601
2653
|
}
|
|
2602
2654
|
if (chunk.type == 'message_start') {
|
|
2655
|
+
lastRawUsage = chunk.message.usage;
|
|
2603
2656
|
usage.inputTokens = chunk.message.usage.input_tokens ?? 0;
|
|
2604
2657
|
usage.cacheCreationInputTokens = chunk.message.usage.cache_creation_input_tokens ?? 0;
|
|
2605
2658
|
usage.cacheReadInputTokens = chunk.message.usage.cache_read_input_tokens ?? 0;
|
|
2606
2659
|
usage.webSearchCount = chunk.message.usage.server_tool_use?.web_search_requests ?? 0;
|
|
2607
2660
|
}
|
|
2608
2661
|
if ('usage' in chunk) {
|
|
2662
|
+
lastRawUsage = chunk.usage;
|
|
2609
2663
|
usage.outputTokens = chunk.usage.output_tokens ?? 0;
|
|
2610
2664
|
// Update web search count if present in delta
|
|
2611
2665
|
if (chunk.usage.server_tool_use?.web_search_requests !== undefined) {
|
|
@@ -2613,6 +2667,7 @@ class WrappedMessages extends AnthropicOriginal__default.default.Messages {
|
|
|
2613
2667
|
}
|
|
2614
2668
|
}
|
|
2615
2669
|
}
|
|
2670
|
+
usage.rawUsage = lastRawUsage;
|
|
2616
2671
|
const latency = (Date.now() - startTime) / 1000;
|
|
2617
2672
|
const availableTools = extractAvailableToolCalls('anthropic', anthropicParams);
|
|
2618
2673
|
// Format output to match non-streaming version
|
|
@@ -2686,7 +2741,8 @@ class WrappedMessages extends AnthropicOriginal__default.default.Messages {
|
|
|
2686
2741
|
outputTokens: result.usage.output_tokens ?? 0,
|
|
2687
2742
|
cacheCreationInputTokens: result.usage.cache_creation_input_tokens ?? 0,
|
|
2688
2743
|
cacheReadInputTokens: result.usage.cache_read_input_tokens ?? 0,
|
|
2689
|
-
webSearchCount: result.usage.server_tool_use?.web_search_requests ?? 0
|
|
2744
|
+
webSearchCount: result.usage.server_tool_use?.web_search_requests ?? 0,
|
|
2745
|
+
rawUsage: result.usage
|
|
2690
2746
|
},
|
|
2691
2747
|
tools: availableTools
|
|
2692
2748
|
});
|
|
@@ -2760,7 +2816,8 @@ class WrappedModels {
|
|
|
2760
2816
|
outputTokens: metadata?.candidatesTokenCount ?? 0,
|
|
2761
2817
|
reasoningTokens: metadata?.thoughtsTokenCount ?? 0,
|
|
2762
2818
|
cacheReadInputTokens: metadata?.cachedContentTokenCount ?? 0,
|
|
2763
|
-
webSearchCount: calculateGoogleWebSearchCount(response)
|
|
2819
|
+
webSearchCount: calculateGoogleWebSearchCount(response),
|
|
2820
|
+
rawUsage: metadata
|
|
2764
2821
|
},
|
|
2765
2822
|
tools: availableTools
|
|
2766
2823
|
});
|
|
@@ -2796,7 +2853,8 @@ class WrappedModels {
|
|
|
2796
2853
|
let usage = {
|
|
2797
2854
|
inputTokens: 0,
|
|
2798
2855
|
outputTokens: 0,
|
|
2799
|
-
webSearchCount: 0
|
|
2856
|
+
webSearchCount: 0,
|
|
2857
|
+
rawUsage: undefined
|
|
2800
2858
|
};
|
|
2801
2859
|
try {
|
|
2802
2860
|
const stream = await this.client.models.generateContentStream(geminiParams);
|
|
@@ -2854,7 +2912,8 @@ class WrappedModels {
|
|
|
2854
2912
|
outputTokens: metadata.candidatesTokenCount ?? 0,
|
|
2855
2913
|
reasoningTokens: metadata.thoughtsTokenCount ?? 0,
|
|
2856
2914
|
cacheReadInputTokens: metadata.cachedContentTokenCount ?? 0,
|
|
2857
|
-
webSearchCount: usage.webSearchCount
|
|
2915
|
+
webSearchCount: usage.webSearchCount,
|
|
2916
|
+
rawUsage: metadata
|
|
2858
2917
|
};
|
|
2859
2918
|
}
|
|
2860
2919
|
yield chunk;
|
|
@@ -2879,7 +2938,8 @@ class WrappedModels {
|
|
|
2879
2938
|
httpStatus: 200,
|
|
2880
2939
|
usage: {
|
|
2881
2940
|
...usage,
|
|
2882
|
-
webSearchCount: usage.webSearchCount
|
|
2941
|
+
webSearchCount: usage.webSearchCount,
|
|
2942
|
+
rawUsage: usage.rawUsage
|
|
2883
2943
|
},
|
|
2884
2944
|
tools: availableTools
|
|
2885
2945
|
});
|
|
@@ -3317,6 +3377,20 @@ function isSerializableLike(obj) {
|
|
|
3317
3377
|
return obj !== null && typeof obj === "object" && "lc_serializable" in obj && typeof obj.toJSON === "function";
|
|
3318
3378
|
}
|
|
3319
3379
|
/**
|
|
3380
|
+
* Create a "not_implemented" serialization result for objects that cannot be serialized.
|
|
3381
|
+
*/
|
|
3382
|
+
function createNotImplemented(obj) {
|
|
3383
|
+
let id;
|
|
3384
|
+
if (obj !== null && typeof obj === "object") if ("lc_id" in obj && Array.isArray(obj.lc_id)) id = obj.lc_id;
|
|
3385
|
+
else id = [obj.constructor?.name ?? "Object"];
|
|
3386
|
+
else id = [typeof obj];
|
|
3387
|
+
return {
|
|
3388
|
+
lc: 1,
|
|
3389
|
+
type: "not_implemented",
|
|
3390
|
+
id
|
|
3391
|
+
};
|
|
3392
|
+
}
|
|
3393
|
+
/**
|
|
3320
3394
|
* Escape a value if it needs escaping (contains `lc` key).
|
|
3321
3395
|
*
|
|
3322
3396
|
* This is a simpler version of `serializeValue` that doesn't handle Serializable
|
|
@@ -3324,18 +3398,27 @@ function isSerializableLike(obj) {
|
|
|
3324
3398
|
* processed by `toJSON()`.
|
|
3325
3399
|
*
|
|
3326
3400
|
* @param value - The value to potentially escape.
|
|
3401
|
+
* @param pathSet - WeakSet to track ancestor objects in the current path to detect circular references.
|
|
3402
|
+
* Objects are removed after processing to allow shared references (same object in
|
|
3403
|
+
* multiple places) while still detecting true circular references (ancestor in descendant).
|
|
3327
3404
|
* @returns The value with any `lc`-containing objects wrapped in escape markers.
|
|
3328
3405
|
*/
|
|
3329
|
-
function escapeIfNeeded(value) {
|
|
3406
|
+
function escapeIfNeeded(value, pathSet = /* @__PURE__ */ new WeakSet()) {
|
|
3330
3407
|
if (value !== null && typeof value === "object" && !Array.isArray(value)) {
|
|
3408
|
+
if (pathSet.has(value)) return createNotImplemented(value);
|
|
3331
3409
|
if (isSerializableLike(value)) return value;
|
|
3410
|
+
pathSet.add(value);
|
|
3332
3411
|
const record = value;
|
|
3333
|
-
if (needsEscaping(record))
|
|
3412
|
+
if (needsEscaping(record)) {
|
|
3413
|
+
pathSet.delete(value);
|
|
3414
|
+
return escapeObject(record);
|
|
3415
|
+
}
|
|
3334
3416
|
const result = {};
|
|
3335
|
-
for (const [key, val] of Object.entries(record)) result[key] = escapeIfNeeded(val);
|
|
3417
|
+
for (const [key, val] of Object.entries(record)) result[key] = escapeIfNeeded(val, pathSet);
|
|
3418
|
+
pathSet.delete(value);
|
|
3336
3419
|
return result;
|
|
3337
3420
|
}
|
|
3338
|
-
if (Array.isArray(value)) return value.map((item) => escapeIfNeeded(item));
|
|
3421
|
+
if (Array.isArray(value)) return value.map((item) => escapeIfNeeded(item, pathSet));
|
|
3339
3422
|
return value;
|
|
3340
3423
|
}
|
|
3341
3424
|
|
|
@@ -3461,7 +3544,9 @@ var Serializable = class Serializable {
|
|
|
3461
3544
|
if (last in read && read[last] !== void 0) write[last] = write[last] || read[last];
|
|
3462
3545
|
});
|
|
3463
3546
|
const escapedKwargs = {};
|
|
3464
|
-
|
|
3547
|
+
const pathSet = /* @__PURE__ */ new WeakSet();
|
|
3548
|
+
pathSet.add(this);
|
|
3549
|
+
for (const [key, value] of Object.entries(kwargs)) escapedKwargs[key] = escapeIfNeeded(value, pathSet);
|
|
3465
3550
|
const kwargsWithSecrets = Object.keys(secrets).length ? replaceSecrets(escapedKwargs, secrets) : escapedKwargs;
|
|
3466
3551
|
const processedKwargs = mapKeys(kwargsWithSecrets, keyToJson, aliases);
|
|
3467
3552
|
return {
|