langsmith 0.4.0 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -13,4 +13,4 @@ var uuid_js_1 = require("./uuid.cjs");
13
13
  Object.defineProperty(exports, "uuid7", { enumerable: true, get: function () { return uuid_js_1.uuid7; } });
14
14
  Object.defineProperty(exports, "uuid7FromTime", { enumerable: true, get: function () { return uuid_js_1.uuid7FromTime; } });
15
15
  // Update using yarn bump-version
16
- exports.__version__ = "0.4.0";
16
+ exports.__version__ = "0.4.1";
package/dist/index.d.ts CHANGED
@@ -4,4 +4,4 @@ export { RunTree, type RunTreeConfig } from "./run_trees.js";
4
4
  export { overrideFetchImplementation } from "./singletons/fetch.js";
5
5
  export { getDefaultProjectName } from "./utils/project.js";
6
6
  export { uuid7, uuid7FromTime } from "./uuid.js";
7
- export declare const __version__ = "0.4.0";
7
+ export declare const __version__ = "0.4.1";
package/dist/index.js CHANGED
@@ -4,4 +4,4 @@ export { overrideFetchImplementation } from "./singletons/fetch.js";
4
4
  export { getDefaultProjectName } from "./utils/project.js";
5
5
  export { uuid7, uuid7FromTime } from "./uuid.js";
6
6
  // Update using yarn bump-version
7
- export const __version__ = "0.4.0";
7
+ export const __version__ = "0.4.1";
@@ -676,6 +676,7 @@ class RunTree {
676
676
  await childRun.postRun(false);
677
677
  }
678
678
  }
679
+ this.child_runs = [];
679
680
  }
680
681
  catch (error) {
681
682
  console.error(`Error in postRun for run ${this.id}:`, error);
@@ -757,6 +758,7 @@ class RunTree {
757
758
  console.error(`Error in patchRun for run ${this.id}`, error);
758
759
  }
759
760
  }
761
+ this.child_runs = [];
760
762
  }
761
763
  toJSON() {
762
764
  return this._convertToCreate(this, undefined, false);
package/dist/run_trees.js CHANGED
@@ -670,6 +670,7 @@ export class RunTree {
670
670
  await childRun.postRun(false);
671
671
  }
672
672
  }
673
+ this.child_runs = [];
673
674
  }
674
675
  catch (error) {
675
676
  console.error(`Error in postRun for run ${this.id}:`, error);
@@ -751,6 +752,7 @@ export class RunTree {
751
752
  console.error(`Error in patchRun for run ${this.id}`, error);
752
753
  }
753
754
  }
755
+ this.child_runs = [];
754
756
  }
755
757
  toJSON() {
756
758
  return this._convertToCreate(this, undefined, false);
@@ -636,8 +636,13 @@ function traceable(wrappedFunc, config) {
636
636
  throw e;
637
637
  }
638
638
  finally {
639
- if (!finished)
639
+ if (!finished) {
640
+ // Call return() on the original iterator to trigger cleanup
641
+ if (iterator.return) {
642
+ await iterator.return(undefined);
643
+ }
640
644
  await currentRunTree?.end(undefined, "Cancelled");
645
+ }
641
646
  await handleRunOutputs({
642
647
  runTree: currentRunTree,
643
648
  rawOutputs: await handleChunks(chunks),
package/dist/traceable.js CHANGED
@@ -632,8 +632,13 @@ export function traceable(wrappedFunc, config) {
632
632
  throw e;
633
633
  }
634
634
  finally {
635
- if (!finished)
635
+ if (!finished) {
636
+ // Call return() on the original iterator to trigger cleanup
637
+ if (iterator.return) {
638
+ await iterator.return(undefined);
639
+ }
636
640
  await currentRunTree?.end(undefined, "Cancelled");
641
+ }
637
642
  await handleRunOutputs({
638
643
  runTree: currentRunTree,
639
644
  rawOutputs: await handleChunks(chunks),
@@ -232,6 +232,33 @@ function processChatCompletion(outputs) {
232
232
  delete result.usage;
233
233
  return result;
234
234
  }
235
+ const getChatModelInvocationParamsFn = (provider, useResponsesApi) => {
236
+ return (payload) => {
237
+ if (typeof payload !== "object" || payload == null)
238
+ return undefined;
239
+ const params = payload;
240
+ const ls_stop = (typeof params.stop === "string" ? [params.stop] : params.stop) ??
241
+ undefined;
242
+ const ls_invocation_params = {};
243
+ for (const [key, value] of Object.entries(params)) {
244
+ if (TRACED_INVOCATION_KEYS.includes(key)) {
245
+ ls_invocation_params[key] = value;
246
+ }
247
+ }
248
+ if (useResponsesApi) {
249
+ ls_invocation_params.use_responses_api = true;
250
+ }
251
+ return {
252
+ ls_provider: provider,
253
+ ls_model_type: "chat",
254
+ ls_model_name: params.model,
255
+ ls_max_tokens: params.max_completion_tokens ?? params.max_tokens ?? undefined,
256
+ ls_temperature: params.temperature ?? undefined,
257
+ ls_stop,
258
+ ls_invocation_params,
259
+ };
260
+ };
261
+ };
235
262
  /**
236
263
  * Wraps an OpenAI client's completion methods, enabling automatic LangSmith
237
264
  * tracing. Method signatures are unchanged, with the exception that you can pass
@@ -279,29 +306,7 @@ const wrapOpenAI = (openai, options) => {
279
306
  run_type: "llm",
280
307
  aggregator: chatAggregator,
281
308
  argsConfigPath: [1, "langsmithExtra"],
282
- getInvocationParams: (payload) => {
283
- if (typeof payload !== "object" || payload == null)
284
- return undefined;
285
- // we can safely do so, as the types are not exported in TSC
286
- const params = payload;
287
- const ls_stop = (typeof params.stop === "string" ? [params.stop] : params.stop) ??
288
- undefined;
289
- const ls_invocation_params = {};
290
- for (const [key, value] of Object.entries(params)) {
291
- if (TRACED_INVOCATION_KEYS.includes(key)) {
292
- ls_invocation_params[key] = value;
293
- }
294
- }
295
- return {
296
- ls_provider: provider,
297
- ls_model_type: "chat",
298
- ls_model_name: params.model,
299
- ls_max_tokens: params.max_completion_tokens ?? params.max_tokens ?? undefined,
300
- ls_temperature: params.temperature ?? undefined,
301
- ls_stop,
302
- ls_invocation_params,
303
- };
304
- },
309
+ getInvocationParams: getChatModelInvocationParamsFn(provider, false),
305
310
  processOutputs: processChatCompletion,
306
311
  ...options,
307
312
  };
@@ -415,17 +420,7 @@ const wrapOpenAI = (openai, options) => {
415
420
  run_type: "llm",
416
421
  aggregator: responsesAggregator,
417
422
  argsConfigPath: [1, "langsmithExtra"],
418
- getInvocationParams: (payload) => {
419
- if (typeof payload !== "object" || payload == null)
420
- return undefined;
421
- // Handle responses API parameters
422
- const params = payload;
423
- return {
424
- ls_provider: provider,
425
- ls_model_type: "chat",
426
- ls_model_name: params.model || "unknown",
427
- };
428
- },
423
+ getInvocationParams: getChatModelInvocationParamsFn(provider, true),
429
424
  processOutputs: processChatCompletion,
430
425
  ...options,
431
426
  });
@@ -437,17 +432,7 @@ const wrapOpenAI = (openai, options) => {
437
432
  run_type: "llm",
438
433
  aggregator: responsesAggregator,
439
434
  argsConfigPath: [1, "langsmithExtra"],
440
- getInvocationParams: (payload) => {
441
- if (typeof payload !== "object" || payload == null)
442
- return undefined;
443
- // Handle responses API parameters
444
- const params = payload;
445
- return {
446
- ls_provider: provider,
447
- ls_model_type: "chat",
448
- ls_model_name: params.model || "unknown",
449
- };
450
- },
435
+ getInvocationParams: getChatModelInvocationParamsFn(provider, true),
451
436
  processOutputs: processChatCompletion,
452
437
  ...options,
453
438
  });
@@ -459,17 +444,7 @@ const wrapOpenAI = (openai, options) => {
459
444
  run_type: "llm",
460
445
  aggregator: responsesAggregator,
461
446
  argsConfigPath: [1, "langsmithExtra"],
462
- getInvocationParams: (payload) => {
463
- if (typeof payload !== "object" || payload == null)
464
- return undefined;
465
- // Handle responses API parameters
466
- const params = payload;
467
- return {
468
- ls_provider: provider,
469
- ls_model_type: "chat",
470
- ls_model_name: params.model || "unknown",
471
- };
472
- },
447
+ getInvocationParams: getChatModelInvocationParamsFn(provider, true),
473
448
  processOutputs: processChatCompletion,
474
449
  ...options,
475
450
  });
@@ -229,6 +229,33 @@ function processChatCompletion(outputs) {
229
229
  delete result.usage;
230
230
  return result;
231
231
  }
232
+ const getChatModelInvocationParamsFn = (provider, useResponsesApi) => {
233
+ return (payload) => {
234
+ if (typeof payload !== "object" || payload == null)
235
+ return undefined;
236
+ const params = payload;
237
+ const ls_stop = (typeof params.stop === "string" ? [params.stop] : params.stop) ??
238
+ undefined;
239
+ const ls_invocation_params = {};
240
+ for (const [key, value] of Object.entries(params)) {
241
+ if (TRACED_INVOCATION_KEYS.includes(key)) {
242
+ ls_invocation_params[key] = value;
243
+ }
244
+ }
245
+ if (useResponsesApi) {
246
+ ls_invocation_params.use_responses_api = true;
247
+ }
248
+ return {
249
+ ls_provider: provider,
250
+ ls_model_type: "chat",
251
+ ls_model_name: params.model,
252
+ ls_max_tokens: params.max_completion_tokens ?? params.max_tokens ?? undefined,
253
+ ls_temperature: params.temperature ?? undefined,
254
+ ls_stop,
255
+ ls_invocation_params,
256
+ };
257
+ };
258
+ };
232
259
  /**
233
260
  * Wraps an OpenAI client's completion methods, enabling automatic LangSmith
234
261
  * tracing. Method signatures are unchanged, with the exception that you can pass
@@ -276,29 +303,7 @@ export const wrapOpenAI = (openai, options) => {
276
303
  run_type: "llm",
277
304
  aggregator: chatAggregator,
278
305
  argsConfigPath: [1, "langsmithExtra"],
279
- getInvocationParams: (payload) => {
280
- if (typeof payload !== "object" || payload == null)
281
- return undefined;
282
- // we can safely do so, as the types are not exported in TSC
283
- const params = payload;
284
- const ls_stop = (typeof params.stop === "string" ? [params.stop] : params.stop) ??
285
- undefined;
286
- const ls_invocation_params = {};
287
- for (const [key, value] of Object.entries(params)) {
288
- if (TRACED_INVOCATION_KEYS.includes(key)) {
289
- ls_invocation_params[key] = value;
290
- }
291
- }
292
- return {
293
- ls_provider: provider,
294
- ls_model_type: "chat",
295
- ls_model_name: params.model,
296
- ls_max_tokens: params.max_completion_tokens ?? params.max_tokens ?? undefined,
297
- ls_temperature: params.temperature ?? undefined,
298
- ls_stop,
299
- ls_invocation_params,
300
- };
301
- },
306
+ getInvocationParams: getChatModelInvocationParamsFn(provider, false),
302
307
  processOutputs: processChatCompletion,
303
308
  ...options,
304
309
  };
@@ -412,17 +417,7 @@ export const wrapOpenAI = (openai, options) => {
412
417
  run_type: "llm",
413
418
  aggregator: responsesAggregator,
414
419
  argsConfigPath: [1, "langsmithExtra"],
415
- getInvocationParams: (payload) => {
416
- if (typeof payload !== "object" || payload == null)
417
- return undefined;
418
- // Handle responses API parameters
419
- const params = payload;
420
- return {
421
- ls_provider: provider,
422
- ls_model_type: "chat",
423
- ls_model_name: params.model || "unknown",
424
- };
425
- },
420
+ getInvocationParams: getChatModelInvocationParamsFn(provider, true),
426
421
  processOutputs: processChatCompletion,
427
422
  ...options,
428
423
  });
@@ -434,17 +429,7 @@ export const wrapOpenAI = (openai, options) => {
434
429
  run_type: "llm",
435
430
  aggregator: responsesAggregator,
436
431
  argsConfigPath: [1, "langsmithExtra"],
437
- getInvocationParams: (payload) => {
438
- if (typeof payload !== "object" || payload == null)
439
- return undefined;
440
- // Handle responses API parameters
441
- const params = payload;
442
- return {
443
- ls_provider: provider,
444
- ls_model_type: "chat",
445
- ls_model_name: params.model || "unknown",
446
- };
447
- },
432
+ getInvocationParams: getChatModelInvocationParamsFn(provider, true),
448
433
  processOutputs: processChatCompletion,
449
434
  ...options,
450
435
  });
@@ -456,17 +441,7 @@ export const wrapOpenAI = (openai, options) => {
456
441
  run_type: "llm",
457
442
  aggregator: responsesAggregator,
458
443
  argsConfigPath: [1, "langsmithExtra"],
459
- getInvocationParams: (payload) => {
460
- if (typeof payload !== "object" || payload == null)
461
- return undefined;
462
- // Handle responses API parameters
463
- const params = payload;
464
- return {
465
- ls_provider: provider,
466
- ls_model_type: "chat",
467
- ls_model_name: params.model || "unknown",
468
- };
469
- },
444
+ getInvocationParams: getChatModelInvocationParamsFn(provider, true),
470
445
  processOutputs: processChatCompletion,
471
446
  ...options,
472
447
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langsmith",
3
- "version": "0.4.0",
3
+ "version": "0.4.1",
4
4
  "description": "Client library to connect to the LangSmith Observability and Evaluation Platform.",
5
5
  "packageManager": "yarn@1.22.19",
6
6
  "files": [