langsmith 0.3.73 → 0.3.74
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client.cjs +3 -0
- package/dist/client.d.ts +3 -0
- package/dist/client.js +3 -0
- package/dist/index.cjs +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/wrappers/openai.cjs +54 -26
- package/dist/wrappers/openai.js +54 -26
- package/package.json +1 -1
package/dist/client.cjs
CHANGED
|
@@ -1558,6 +1558,9 @@ class Client {
|
|
|
1558
1558
|
is_root: isRoot,
|
|
1559
1559
|
order,
|
|
1560
1560
|
};
|
|
1561
|
+
if (body.select.includes("child_run_ids")) {
|
|
1562
|
+
(0, warn_js_1.warnOnce)("Deprecated: 'child_run_ids' in the listRuns select parameter is deprecated and will be removed in a future version.");
|
|
1563
|
+
}
|
|
1561
1564
|
let runsYielded = 0;
|
|
1562
1565
|
for await (const runs of this._getCursorPaginatedList("/runs/query", body)) {
|
|
1563
1566
|
if (limit) {
|
package/dist/client.d.ts
CHANGED
|
@@ -131,6 +131,9 @@ interface ListRunsParams {
|
|
|
131
131
|
treeFilter?: string;
|
|
132
132
|
/**
|
|
133
133
|
* The values to include in the response.
|
|
134
|
+
*
|
|
135
|
+
* Note: The 'child_run_ids' value is deprecated and will be removed in a future version.
|
|
136
|
+
* This field is no longer populated by the API.
|
|
134
137
|
*/
|
|
135
138
|
select?: string[];
|
|
136
139
|
}
|
package/dist/client.js
CHANGED
|
@@ -1520,6 +1520,9 @@ export class Client {
|
|
|
1520
1520
|
is_root: isRoot,
|
|
1521
1521
|
order,
|
|
1522
1522
|
};
|
|
1523
|
+
if (body.select.includes("child_run_ids")) {
|
|
1524
|
+
warnOnce("Deprecated: 'child_run_ids' in the listRuns select parameter is deprecated and will be removed in a future version.");
|
|
1525
|
+
}
|
|
1523
1526
|
let runsYielded = 0;
|
|
1524
1527
|
for await (const runs of this._getCursorPaginatedList("/runs/query", body)) {
|
|
1525
1528
|
if (limit) {
|
package/dist/index.cjs
CHANGED
|
@@ -10,4 +10,4 @@ Object.defineProperty(exports, "overrideFetchImplementation", { enumerable: true
|
|
|
10
10
|
var project_js_1 = require("./utils/project.cjs");
|
|
11
11
|
Object.defineProperty(exports, "getDefaultProjectName", { enumerable: true, get: function () { return project_js_1.getDefaultProjectName; } });
|
|
12
12
|
// Update using yarn bump-version
|
|
13
|
-
exports.__version__ = "0.3.
|
|
13
|
+
exports.__version__ = "0.3.74";
|
package/dist/index.d.ts
CHANGED
|
@@ -3,4 +3,4 @@ export type { Dataset, Example, TracerSession, Run, Feedback, RetrieverOutput, }
|
|
|
3
3
|
export { RunTree, type RunTreeConfig } from "./run_trees.js";
|
|
4
4
|
export { overrideFetchImplementation } from "./singletons/fetch.js";
|
|
5
5
|
export { getDefaultProjectName } from "./utils/project.js";
|
|
6
|
-
export declare const __version__ = "0.3.
|
|
6
|
+
export declare const __version__ = "0.3.74";
|
package/dist/index.js
CHANGED
|
@@ -3,4 +3,4 @@ export { RunTree } from "./run_trees.js";
|
|
|
3
3
|
export { overrideFetchImplementation } from "./singletons/fetch.js";
|
|
4
4
|
export { getDefaultProjectName } from "./utils/project.js";
|
|
5
5
|
// Update using yarn bump-version
|
|
6
|
-
export const __version__ = "0.3.
|
|
6
|
+
export const __version__ = "0.3.74";
|
package/dist/wrappers/openai.cjs
CHANGED
|
@@ -127,49 +127,77 @@ const textAggregator = (allChunks
|
|
|
127
127
|
];
|
|
128
128
|
return aggregatedOutput;
|
|
129
129
|
};
|
|
130
|
+
function isChatCompletionUsage(usage) {
|
|
131
|
+
return usage != null && typeof usage === "object" && "prompt_tokens" in usage;
|
|
132
|
+
}
|
|
130
133
|
function processChatCompletion(outputs) {
|
|
131
|
-
const
|
|
132
|
-
const recognizedServiceTier = ["priority", "flex"].includes(
|
|
133
|
-
?
|
|
134
|
+
const openAICompletion = outputs;
|
|
135
|
+
const recognizedServiceTier = ["priority", "flex"].includes(openAICompletion.service_tier ?? "")
|
|
136
|
+
? openAICompletion.service_tier
|
|
134
137
|
: undefined;
|
|
135
138
|
const serviceTierPrefix = recognizedServiceTier
|
|
136
139
|
? `${recognizedServiceTier}_`
|
|
137
140
|
: "";
|
|
138
141
|
// copy the original object, minus usage
|
|
139
|
-
const result = { ...
|
|
140
|
-
const usage =
|
|
142
|
+
const result = { ...openAICompletion };
|
|
143
|
+
const usage = openAICompletion.usage;
|
|
141
144
|
if (usage) {
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
145
|
+
let inputTokens = 0;
|
|
146
|
+
let outputTokens = 0;
|
|
147
|
+
let totalTokens = 0;
|
|
148
|
+
let inputTokenDetails = {};
|
|
149
|
+
let outputTokenDetails = {};
|
|
150
|
+
if (isChatCompletionUsage(usage)) {
|
|
151
|
+
inputTokens = usage.prompt_tokens ?? 0;
|
|
152
|
+
outputTokens = usage.completion_tokens ?? 0;
|
|
153
|
+
totalTokens = usage.total_tokens ?? 0;
|
|
154
|
+
inputTokenDetails = {
|
|
155
|
+
...(usage.prompt_tokens_details?.audio_tokens !== null && {
|
|
156
|
+
audio: usage.prompt_tokens_details?.audio_tokens,
|
|
157
|
+
}),
|
|
158
|
+
...(usage.prompt_tokens_details?.cached_tokens !== null && {
|
|
159
|
+
[`${serviceTierPrefix}cache_read`]: usage.prompt_tokens_details?.cached_tokens,
|
|
160
|
+
}),
|
|
161
|
+
};
|
|
162
|
+
outputTokenDetails = {
|
|
163
|
+
...(usage.completion_tokens_details?.audio_tokens !== null && {
|
|
164
|
+
audio: usage.completion_tokens_details?.audio_tokens,
|
|
165
|
+
}),
|
|
166
|
+
...(usage.completion_tokens_details?.reasoning_tokens !== null && {
|
|
167
|
+
[`${serviceTierPrefix}reasoning`]: usage.completion_tokens_details?.reasoning_tokens,
|
|
168
|
+
}),
|
|
169
|
+
};
|
|
170
|
+
}
|
|
171
|
+
else {
|
|
172
|
+
inputTokens = usage.input_tokens ?? 0;
|
|
173
|
+
outputTokens = usage.output_tokens ?? 0;
|
|
174
|
+
totalTokens = usage.total_tokens ?? 0;
|
|
175
|
+
inputTokenDetails = {
|
|
176
|
+
...(usage.input_tokens_details?.cached_tokens !== null && {
|
|
177
|
+
[`${serviceTierPrefix}cache_read`]: usage.input_tokens_details?.cached_tokens,
|
|
178
|
+
}),
|
|
179
|
+
};
|
|
180
|
+
outputTokenDetails = {
|
|
181
|
+
...(usage.output_tokens_details?.reasoning_tokens !== null && {
|
|
182
|
+
[`${serviceTierPrefix}reasoning`]: usage.output_tokens_details?.reasoning_tokens,
|
|
183
|
+
}),
|
|
184
|
+
};
|
|
185
|
+
}
|
|
158
186
|
if (recognizedServiceTier) {
|
|
159
187
|
// Avoid counting cache read and reasoning tokens towards the
|
|
160
188
|
// service tier token count since service tier tokens are already
|
|
161
189
|
// priced differently
|
|
162
190
|
inputTokenDetails[recognizedServiceTier] =
|
|
163
|
-
|
|
191
|
+
inputTokens -
|
|
164
192
|
(inputTokenDetails[`${serviceTierPrefix}cache_read`] ?? 0);
|
|
165
193
|
outputTokenDetails[recognizedServiceTier] =
|
|
166
|
-
|
|
194
|
+
outputTokens -
|
|
167
195
|
(outputTokenDetails[`${serviceTierPrefix}reasoning`] ?? 0);
|
|
168
196
|
}
|
|
169
197
|
result.usage_metadata = {
|
|
170
|
-
input_tokens:
|
|
171
|
-
output_tokens:
|
|
172
|
-
total_tokens:
|
|
198
|
+
input_tokens: inputTokens ?? 0,
|
|
199
|
+
output_tokens: outputTokens ?? 0,
|
|
200
|
+
total_tokens: totalTokens ?? 0,
|
|
173
201
|
...(Object.keys(inputTokenDetails).length > 0 && {
|
|
174
202
|
input_token_details: inputTokenDetails,
|
|
175
203
|
}),
|
package/dist/wrappers/openai.js
CHANGED
|
@@ -124,49 +124,77 @@ const textAggregator = (allChunks
|
|
|
124
124
|
];
|
|
125
125
|
return aggregatedOutput;
|
|
126
126
|
};
|
|
127
|
+
function isChatCompletionUsage(usage) {
|
|
128
|
+
return usage != null && typeof usage === "object" && "prompt_tokens" in usage;
|
|
129
|
+
}
|
|
127
130
|
function processChatCompletion(outputs) {
|
|
128
|
-
const
|
|
129
|
-
const recognizedServiceTier = ["priority", "flex"].includes(
|
|
130
|
-
?
|
|
131
|
+
const openAICompletion = outputs;
|
|
132
|
+
const recognizedServiceTier = ["priority", "flex"].includes(openAICompletion.service_tier ?? "")
|
|
133
|
+
? openAICompletion.service_tier
|
|
131
134
|
: undefined;
|
|
132
135
|
const serviceTierPrefix = recognizedServiceTier
|
|
133
136
|
? `${recognizedServiceTier}_`
|
|
134
137
|
: "";
|
|
135
138
|
// copy the original object, minus usage
|
|
136
|
-
const result = { ...
|
|
137
|
-
const usage =
|
|
139
|
+
const result = { ...openAICompletion };
|
|
140
|
+
const usage = openAICompletion.usage;
|
|
138
141
|
if (usage) {
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
142
|
+
let inputTokens = 0;
|
|
143
|
+
let outputTokens = 0;
|
|
144
|
+
let totalTokens = 0;
|
|
145
|
+
let inputTokenDetails = {};
|
|
146
|
+
let outputTokenDetails = {};
|
|
147
|
+
if (isChatCompletionUsage(usage)) {
|
|
148
|
+
inputTokens = usage.prompt_tokens ?? 0;
|
|
149
|
+
outputTokens = usage.completion_tokens ?? 0;
|
|
150
|
+
totalTokens = usage.total_tokens ?? 0;
|
|
151
|
+
inputTokenDetails = {
|
|
152
|
+
...(usage.prompt_tokens_details?.audio_tokens !== null && {
|
|
153
|
+
audio: usage.prompt_tokens_details?.audio_tokens,
|
|
154
|
+
}),
|
|
155
|
+
...(usage.prompt_tokens_details?.cached_tokens !== null && {
|
|
156
|
+
[`${serviceTierPrefix}cache_read`]: usage.prompt_tokens_details?.cached_tokens,
|
|
157
|
+
}),
|
|
158
|
+
};
|
|
159
|
+
outputTokenDetails = {
|
|
160
|
+
...(usage.completion_tokens_details?.audio_tokens !== null && {
|
|
161
|
+
audio: usage.completion_tokens_details?.audio_tokens,
|
|
162
|
+
}),
|
|
163
|
+
...(usage.completion_tokens_details?.reasoning_tokens !== null && {
|
|
164
|
+
[`${serviceTierPrefix}reasoning`]: usage.completion_tokens_details?.reasoning_tokens,
|
|
165
|
+
}),
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
else {
|
|
169
|
+
inputTokens = usage.input_tokens ?? 0;
|
|
170
|
+
outputTokens = usage.output_tokens ?? 0;
|
|
171
|
+
totalTokens = usage.total_tokens ?? 0;
|
|
172
|
+
inputTokenDetails = {
|
|
173
|
+
...(usage.input_tokens_details?.cached_tokens !== null && {
|
|
174
|
+
[`${serviceTierPrefix}cache_read`]: usage.input_tokens_details?.cached_tokens,
|
|
175
|
+
}),
|
|
176
|
+
};
|
|
177
|
+
outputTokenDetails = {
|
|
178
|
+
...(usage.output_tokens_details?.reasoning_tokens !== null && {
|
|
179
|
+
[`${serviceTierPrefix}reasoning`]: usage.output_tokens_details?.reasoning_tokens,
|
|
180
|
+
}),
|
|
181
|
+
};
|
|
182
|
+
}
|
|
155
183
|
if (recognizedServiceTier) {
|
|
156
184
|
// Avoid counting cache read and reasoning tokens towards the
|
|
157
185
|
// service tier token count since service tier tokens are already
|
|
158
186
|
// priced differently
|
|
159
187
|
inputTokenDetails[recognizedServiceTier] =
|
|
160
|
-
|
|
188
|
+
inputTokens -
|
|
161
189
|
(inputTokenDetails[`${serviceTierPrefix}cache_read`] ?? 0);
|
|
162
190
|
outputTokenDetails[recognizedServiceTier] =
|
|
163
|
-
|
|
191
|
+
outputTokens -
|
|
164
192
|
(outputTokenDetails[`${serviceTierPrefix}reasoning`] ?? 0);
|
|
165
193
|
}
|
|
166
194
|
result.usage_metadata = {
|
|
167
|
-
input_tokens:
|
|
168
|
-
output_tokens:
|
|
169
|
-
total_tokens:
|
|
195
|
+
input_tokens: inputTokens ?? 0,
|
|
196
|
+
output_tokens: outputTokens ?? 0,
|
|
197
|
+
total_tokens: totalTokens ?? 0,
|
|
170
198
|
...(Object.keys(inputTokenDetails).length > 0 && {
|
|
171
199
|
input_token_details: inputTokenDetails,
|
|
172
200
|
}),
|