langsmith 0.3.73 → 0.3.75

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/client.cjs CHANGED
@@ -1558,6 +1558,9 @@ class Client {
1558
1558
  is_root: isRoot,
1559
1559
  order,
1560
1560
  };
1561
+ if (body.select.includes("child_run_ids")) {
1562
+ (0, warn_js_1.warnOnce)("Deprecated: 'child_run_ids' in the listRuns select parameter is deprecated and will be removed in a future version.");
1563
+ }
1561
1564
  let runsYielded = 0;
1562
1565
  for await (const runs of this._getCursorPaginatedList("/runs/query", body)) {
1563
1566
  if (limit) {
package/dist/client.d.ts CHANGED
@@ -131,6 +131,9 @@ interface ListRunsParams {
131
131
  treeFilter?: string;
132
132
  /**
133
133
  * The values to include in the response.
134
+ *
135
+ * Note: The 'child_run_ids' value is deprecated and will be removed in a future version.
136
+ * This field is no longer populated by the API.
134
137
  */
135
138
  select?: string[];
136
139
  }
package/dist/client.js CHANGED
@@ -1520,6 +1520,9 @@ export class Client {
1520
1520
  is_root: isRoot,
1521
1521
  order,
1522
1522
  };
1523
+ if (body.select.includes("child_run_ids")) {
1524
+ warnOnce("Deprecated: 'child_run_ids' in the listRuns select parameter is deprecated and will be removed in a future version.");
1525
+ }
1523
1526
  let runsYielded = 0;
1524
1527
  for await (const runs of this._getCursorPaginatedList("/runs/query", body)) {
1525
1528
  if (limit) {
package/dist/index.cjs CHANGED
@@ -10,4 +10,4 @@ Object.defineProperty(exports, "overrideFetchImplementation", { enumerable: true
10
10
  var project_js_1 = require("./utils/project.cjs");
11
11
  Object.defineProperty(exports, "getDefaultProjectName", { enumerable: true, get: function () { return project_js_1.getDefaultProjectName; } });
12
12
  // Update using yarn bump-version
13
- exports.__version__ = "0.3.73";
13
+ exports.__version__ = "0.3.75";
package/dist/index.d.ts CHANGED
@@ -3,4 +3,4 @@ export type { Dataset, Example, TracerSession, Run, Feedback, RetrieverOutput, }
3
3
  export { RunTree, type RunTreeConfig } from "./run_trees.js";
4
4
  export { overrideFetchImplementation } from "./singletons/fetch.js";
5
5
  export { getDefaultProjectName } from "./utils/project.js";
6
- export declare const __version__ = "0.3.73";
6
+ export declare const __version__ = "0.3.75";
package/dist/index.js CHANGED
@@ -3,4 +3,4 @@ export { RunTree } from "./run_trees.js";
3
3
  export { overrideFetchImplementation } from "./singletons/fetch.js";
4
4
  export { getDefaultProjectName } from "./utils/project.js";
5
5
  // Update using yarn bump-version
6
- export const __version__ = "0.3.73";
6
+ export const __version__ = "0.3.75";
@@ -91,7 +91,9 @@ const runInputsToMap = (rawInputs) => {
91
91
  }
92
92
  return inputs;
93
93
  };
94
- const handleRunInputs = (inputs, processInputs) => {
94
+ const handleRunInputs = (
95
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
96
+ inputs, processInputs) => {
95
97
  try {
96
98
  return processInputs(inputs);
97
99
  }
@@ -147,6 +149,7 @@ function isAsyncFn(fn) {
147
149
  // Note: This mutates the run tree
148
150
  async function handleRunOutputs(params) {
149
151
  const { runTree, rawOutputs, processOutputsFn, on_end, postRunPromise, excludeInputs, } = params;
152
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
150
153
  let outputs;
151
154
  if ((0, asserts_js_1.isKVMap)(rawOutputs)) {
152
155
  outputs = { ...rawOutputs };
@@ -2,6 +2,18 @@ import { RunTreeConfig } from "./run_trees.js";
2
2
  import { Attachments, InvocationParamsSchema, KVMap } from "./schemas.js";
3
3
  import type { TraceableFunction } from "./singletons/types.js";
4
4
  import { OTELTracer } from "./experimental/otel/types.js";
5
+ export type ProcessInputs<Args extends unknown[]> = Args extends [] ? Record<string, never> : Args extends [infer Input] ? Input extends KVMap ? Input extends Iterable<infer Item> | AsyncIterable<infer Item> ? {
6
+ input: Array<Item>;
7
+ } : Input : {
8
+ input: Input;
9
+ } : {
10
+ args: Args;
11
+ };
12
+ export type ProcessOutputs<ReturnValue> = ReturnValue extends KVMap ? ReturnValue extends Iterable<infer Item> | AsyncIterable<infer Item> ? {
13
+ outputs: Array<Item>;
14
+ } : ReturnValue : {
15
+ outputs: ReturnValue;
16
+ };
5
17
  export type TraceableConfig<Func extends (...args: any[]) => any> = Partial<Omit<RunTreeConfig, "inputs" | "outputs">> & {
6
18
  aggregator?: (args: any[]) => any;
7
19
  argsConfigPath?: [number] | [number, string];
@@ -27,19 +39,31 @@ export type TraceableConfig<Func extends (...args: any[]) => any> = Partial<Omit
27
39
  * This function should NOT mutate the inputs.
28
40
  * `processInputs` is not inherited by nested traceable functions.
29
41
  *
42
+ * The input to this function is determined as follows based on the
43
+ * arguments passed to the wrapped function:
44
+ * - If called with one argument that is an object, it will be the unchanged argument
45
+ * - If called with one argument that is not an object, it will be `{ input: arg }`
46
+ * - If called with multiple arguments, it will be `{ args: [...arguments] }`
47
+ * - If called with no arguments, it will be an empty object `{}`
48
+ *
30
49
  * @param inputs Key-value map of the function inputs.
31
50
  * @returns Transformed key-value map
32
51
  */
33
- processInputs?: (inputs: Readonly<KVMap>) => KVMap;
52
+ processInputs?: (inputs: Readonly<ProcessInputs<Parameters<Func>>>) => KVMap;
34
53
  /**
35
54
  * Apply transformations to the outputs before logging.
36
55
  * This function should NOT mutate the outputs.
37
56
  * `processOutputs` is not inherited by nested traceable functions.
38
57
  *
58
+ * The input to this function is determined as follows based on the
59
+ * return value of the wrapped function:
60
+ * - If the return value is an object, it will be the unchanged return value
61
+ * - If the return value is not an object, it will wrapped as `{ outputs: returnValue }`
62
+ *
39
63
  * @param outputs Key-value map of the function outputs
40
64
  * @returns Transformed key-value map
41
65
  */
42
- processOutputs?: (outputs: Readonly<KVMap>) => KVMap | Promise<KVMap>;
66
+ processOutputs?: (outputs: Readonly<ProcessOutputs<Awaited<ReturnType<Func>>>>) => KVMap | Promise<KVMap>;
43
67
  };
44
68
  /**
45
69
  * Higher-order function that takes function as input and returns a
package/dist/traceable.js CHANGED
@@ -87,7 +87,9 @@ const runInputsToMap = (rawInputs) => {
87
87
  }
88
88
  return inputs;
89
89
  };
90
- const handleRunInputs = (inputs, processInputs) => {
90
+ const handleRunInputs = (
91
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
92
+ inputs, processInputs) => {
91
93
  try {
92
94
  return processInputs(inputs);
93
95
  }
@@ -143,6 +145,7 @@ function isAsyncFn(fn) {
143
145
  // Note: This mutates the run tree
144
146
  async function handleRunOutputs(params) {
145
147
  const { runTree, rawOutputs, processOutputsFn, on_end, postRunPromise, excludeInputs, } = params;
148
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
146
149
  let outputs;
147
150
  if (isKVMap(rawOutputs)) {
148
151
  outputs = { ...rawOutputs };
@@ -127,49 +127,77 @@ const textAggregator = (allChunks
127
127
  ];
128
128
  return aggregatedOutput;
129
129
  };
130
+ function isChatCompletionUsage(usage) {
131
+ return usage != null && typeof usage === "object" && "prompt_tokens" in usage;
132
+ }
130
133
  function processChatCompletion(outputs) {
131
- const chatCompletion = outputs;
132
- const recognizedServiceTier = ["priority", "flex"].includes(chatCompletion.service_tier ?? "")
133
- ? chatCompletion.service_tier
134
+ const openAICompletion = outputs;
135
+ const recognizedServiceTier = ["priority", "flex"].includes(openAICompletion.service_tier ?? "")
136
+ ? openAICompletion.service_tier
134
137
  : undefined;
135
138
  const serviceTierPrefix = recognizedServiceTier
136
139
  ? `${recognizedServiceTier}_`
137
140
  : "";
138
141
  // copy the original object, minus usage
139
- const result = { ...chatCompletion };
140
- const usage = chatCompletion.usage;
142
+ const result = { ...openAICompletion };
143
+ const usage = openAICompletion.usage;
141
144
  if (usage) {
142
- const inputTokenDetails = {
143
- ...(usage.prompt_tokens_details?.audio_tokens !== null && {
144
- audio: usage.prompt_tokens_details?.audio_tokens,
145
- }),
146
- ...(usage.prompt_tokens_details?.cached_tokens !== null && {
147
- [`${serviceTierPrefix}cache_read`]: usage.prompt_tokens_details?.cached_tokens,
148
- }),
149
- };
150
- const outputTokenDetails = {
151
- ...(usage.completion_tokens_details?.audio_tokens !== null && {
152
- audio: usage.completion_tokens_details?.audio_tokens,
153
- }),
154
- ...(usage.completion_tokens_details?.reasoning_tokens !== null && {
155
- [`${serviceTierPrefix}reasoning`]: usage.completion_tokens_details?.reasoning_tokens,
156
- }),
157
- };
145
+ let inputTokens = 0;
146
+ let outputTokens = 0;
147
+ let totalTokens = 0;
148
+ let inputTokenDetails = {};
149
+ let outputTokenDetails = {};
150
+ if (isChatCompletionUsage(usage)) {
151
+ inputTokens = usage.prompt_tokens ?? 0;
152
+ outputTokens = usage.completion_tokens ?? 0;
153
+ totalTokens = usage.total_tokens ?? 0;
154
+ inputTokenDetails = {
155
+ ...(usage.prompt_tokens_details?.audio_tokens !== null && {
156
+ audio: usage.prompt_tokens_details?.audio_tokens,
157
+ }),
158
+ ...(usage.prompt_tokens_details?.cached_tokens !== null && {
159
+ [`${serviceTierPrefix}cache_read`]: usage.prompt_tokens_details?.cached_tokens,
160
+ }),
161
+ };
162
+ outputTokenDetails = {
163
+ ...(usage.completion_tokens_details?.audio_tokens !== null && {
164
+ audio: usage.completion_tokens_details?.audio_tokens,
165
+ }),
166
+ ...(usage.completion_tokens_details?.reasoning_tokens !== null && {
167
+ [`${serviceTierPrefix}reasoning`]: usage.completion_tokens_details?.reasoning_tokens,
168
+ }),
169
+ };
170
+ }
171
+ else {
172
+ inputTokens = usage.input_tokens ?? 0;
173
+ outputTokens = usage.output_tokens ?? 0;
174
+ totalTokens = usage.total_tokens ?? 0;
175
+ inputTokenDetails = {
176
+ ...(usage.input_tokens_details?.cached_tokens !== null && {
177
+ [`${serviceTierPrefix}cache_read`]: usage.input_tokens_details?.cached_tokens,
178
+ }),
179
+ };
180
+ outputTokenDetails = {
181
+ ...(usage.output_tokens_details?.reasoning_tokens !== null && {
182
+ [`${serviceTierPrefix}reasoning`]: usage.output_tokens_details?.reasoning_tokens,
183
+ }),
184
+ };
185
+ }
158
186
  if (recognizedServiceTier) {
159
187
  // Avoid counting cache read and reasoning tokens towards the
160
188
  // service tier token count since service tier tokens are already
161
189
  // priced differently
162
190
  inputTokenDetails[recognizedServiceTier] =
163
- usage.prompt_tokens -
191
+ inputTokens -
164
192
  (inputTokenDetails[`${serviceTierPrefix}cache_read`] ?? 0);
165
193
  outputTokenDetails[recognizedServiceTier] =
166
- usage.completion_tokens -
194
+ outputTokens -
167
195
  (outputTokenDetails[`${serviceTierPrefix}reasoning`] ?? 0);
168
196
  }
169
197
  result.usage_metadata = {
170
- input_tokens: usage.prompt_tokens ?? 0,
171
- output_tokens: usage.completion_tokens ?? 0,
172
- total_tokens: usage.total_tokens ?? 0,
198
+ input_tokens: inputTokens ?? 0,
199
+ output_tokens: outputTokens ?? 0,
200
+ total_tokens: totalTokens ?? 0,
173
201
  ...(Object.keys(inputTokenDetails).length > 0 && {
174
202
  input_token_details: inputTokenDetails,
175
203
  }),
@@ -124,49 +124,77 @@ const textAggregator = (allChunks
124
124
  ];
125
125
  return aggregatedOutput;
126
126
  };
127
+ function isChatCompletionUsage(usage) {
128
+ return usage != null && typeof usage === "object" && "prompt_tokens" in usage;
129
+ }
127
130
  function processChatCompletion(outputs) {
128
- const chatCompletion = outputs;
129
- const recognizedServiceTier = ["priority", "flex"].includes(chatCompletion.service_tier ?? "")
130
- ? chatCompletion.service_tier
131
+ const openAICompletion = outputs;
132
+ const recognizedServiceTier = ["priority", "flex"].includes(openAICompletion.service_tier ?? "")
133
+ ? openAICompletion.service_tier
131
134
  : undefined;
132
135
  const serviceTierPrefix = recognizedServiceTier
133
136
  ? `${recognizedServiceTier}_`
134
137
  : "";
135
138
  // copy the original object, minus usage
136
- const result = { ...chatCompletion };
137
- const usage = chatCompletion.usage;
139
+ const result = { ...openAICompletion };
140
+ const usage = openAICompletion.usage;
138
141
  if (usage) {
139
- const inputTokenDetails = {
140
- ...(usage.prompt_tokens_details?.audio_tokens !== null && {
141
- audio: usage.prompt_tokens_details?.audio_tokens,
142
- }),
143
- ...(usage.prompt_tokens_details?.cached_tokens !== null && {
144
- [`${serviceTierPrefix}cache_read`]: usage.prompt_tokens_details?.cached_tokens,
145
- }),
146
- };
147
- const outputTokenDetails = {
148
- ...(usage.completion_tokens_details?.audio_tokens !== null && {
149
- audio: usage.completion_tokens_details?.audio_tokens,
150
- }),
151
- ...(usage.completion_tokens_details?.reasoning_tokens !== null && {
152
- [`${serviceTierPrefix}reasoning`]: usage.completion_tokens_details?.reasoning_tokens,
153
- }),
154
- };
142
+ let inputTokens = 0;
143
+ let outputTokens = 0;
144
+ let totalTokens = 0;
145
+ let inputTokenDetails = {};
146
+ let outputTokenDetails = {};
147
+ if (isChatCompletionUsage(usage)) {
148
+ inputTokens = usage.prompt_tokens ?? 0;
149
+ outputTokens = usage.completion_tokens ?? 0;
150
+ totalTokens = usage.total_tokens ?? 0;
151
+ inputTokenDetails = {
152
+ ...(usage.prompt_tokens_details?.audio_tokens !== null && {
153
+ audio: usage.prompt_tokens_details?.audio_tokens,
154
+ }),
155
+ ...(usage.prompt_tokens_details?.cached_tokens !== null && {
156
+ [`${serviceTierPrefix}cache_read`]: usage.prompt_tokens_details?.cached_tokens,
157
+ }),
158
+ };
159
+ outputTokenDetails = {
160
+ ...(usage.completion_tokens_details?.audio_tokens !== null && {
161
+ audio: usage.completion_tokens_details?.audio_tokens,
162
+ }),
163
+ ...(usage.completion_tokens_details?.reasoning_tokens !== null && {
164
+ [`${serviceTierPrefix}reasoning`]: usage.completion_tokens_details?.reasoning_tokens,
165
+ }),
166
+ };
167
+ }
168
+ else {
169
+ inputTokens = usage.input_tokens ?? 0;
170
+ outputTokens = usage.output_tokens ?? 0;
171
+ totalTokens = usage.total_tokens ?? 0;
172
+ inputTokenDetails = {
173
+ ...(usage.input_tokens_details?.cached_tokens !== null && {
174
+ [`${serviceTierPrefix}cache_read`]: usage.input_tokens_details?.cached_tokens,
175
+ }),
176
+ };
177
+ outputTokenDetails = {
178
+ ...(usage.output_tokens_details?.reasoning_tokens !== null && {
179
+ [`${serviceTierPrefix}reasoning`]: usage.output_tokens_details?.reasoning_tokens,
180
+ }),
181
+ };
182
+ }
155
183
  if (recognizedServiceTier) {
156
184
  // Avoid counting cache read and reasoning tokens towards the
157
185
  // service tier token count since service tier tokens are already
158
186
  // priced differently
159
187
  inputTokenDetails[recognizedServiceTier] =
160
- usage.prompt_tokens -
188
+ inputTokens -
161
189
  (inputTokenDetails[`${serviceTierPrefix}cache_read`] ?? 0);
162
190
  outputTokenDetails[recognizedServiceTier] =
163
- usage.completion_tokens -
191
+ outputTokens -
164
192
  (outputTokenDetails[`${serviceTierPrefix}reasoning`] ?? 0);
165
193
  }
166
194
  result.usage_metadata = {
167
- input_tokens: usage.prompt_tokens ?? 0,
168
- output_tokens: usage.completion_tokens ?? 0,
169
- total_tokens: usage.total_tokens ?? 0,
195
+ input_tokens: inputTokens ?? 0,
196
+ output_tokens: outputTokens ?? 0,
197
+ total_tokens: totalTokens ?? 0,
170
198
  ...(Object.keys(inputTokenDetails).length > 0 && {
171
199
  input_token_details: inputTokenDetails,
172
200
  }),
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "langsmith",
3
- "version": "0.3.73",
4
- "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.",
3
+ "version": "0.3.75",
4
+ "description": "Client library to connect to the LangSmith Observability and Evaluation Platform.",
5
5
  "packageManager": "yarn@1.22.19",
6
6
  "files": [
7
7
  "dist/",