langsmith 0.3.87 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +0 -50
  2. package/dist/client.cjs +52 -31
  3. package/dist/client.d.ts +2 -9
  4. package/dist/client.js +53 -32
  5. package/dist/evaluation/_runner.cjs +34 -32
  6. package/dist/evaluation/_runner.js +35 -33
  7. package/dist/experimental/otel/setup.cjs +2 -0
  8. package/dist/experimental/otel/setup.d.ts +2 -0
  9. package/dist/experimental/otel/setup.js +2 -0
  10. package/dist/experimental/vercel/index.d.ts +11 -0
  11. package/dist/experimental/vercel/middleware.cjs +15 -4
  12. package/dist/experimental/vercel/middleware.d.ts +1 -0
  13. package/dist/experimental/vercel/middleware.js +15 -4
  14. package/dist/index.cjs +1 -1
  15. package/dist/index.d.ts +1 -1
  16. package/dist/index.js +1 -1
  17. package/dist/singletons/constants.cjs +2 -1
  18. package/dist/singletons/constants.d.ts +1 -0
  19. package/dist/singletons/constants.js +1 -0
  20. package/dist/traceable.cjs +107 -43
  21. package/dist/traceable.js +108 -44
  22. package/dist/utils/error.cjs +33 -1
  23. package/dist/utils/error.d.ts +13 -0
  24. package/dist/utils/error.js +30 -0
  25. package/dist/utils/jestlike/index.cjs +1 -1
  26. package/dist/utils/jestlike/index.js +1 -1
  27. package/dist/utils/jestlike/types.d.ts +0 -4
  28. package/package.json +1 -40
  29. package/dist/evaluation/langchain.cjs +0 -54
  30. package/dist/evaluation/langchain.d.ts +0 -21
  31. package/dist/evaluation/langchain.js +0 -51
  32. package/dist/utils/vercel.types.cjs +0 -2
  33. package/dist/utils/vercel.types.d.ts +0 -1
  34. package/dist/utils/vercel.types.js +0 -1
  35. package/dist/vercel.cjs +0 -866
  36. package/dist/vercel.d.ts +0 -87
  37. package/dist/vercel.js +0 -861
  38. package/dist/wrappers/vercel.cjs +0 -101
  39. package/dist/wrappers/vercel.d.ts +0 -31
  40. package/dist/wrappers/vercel.js +0 -97
  41. package/evaluation/langchain.cjs +0 -1
  42. package/evaluation/langchain.d.cts +0 -1
  43. package/evaluation/langchain.d.ts +0 -1
  44. package/evaluation/langchain.js +0 -1
  45. package/vercel.cjs +0 -1
  46. package/vercel.d.cts +0 -1
  47. package/vercel.d.ts +0 -1
  48. package/vercel.js +0 -1
  49. package/wrappers/vercel.cjs +0 -1
  50. package/wrappers/vercel.d.cts +0 -1
  51. package/wrappers/vercel.d.ts +0 -1
  52. package/wrappers/vercel.js +0 -1
package/README.md CHANGED
@@ -470,56 +470,6 @@ for (const run of runs) {
470
470
  }
471
471
  ```
472
472
 
473
- # Evaluating Runs
474
-
475
- Check out the [LangSmith Testing & Evaluation dos](https://docs.smith.langchain.com/docs/evaluation/) for up-to-date workflows.
476
-
477
- For generating automated feedback on individual runs, you can run evaluations directly using the LangSmith client.
478
-
479
- ```ts
480
- import { StringEvaluator } from "langsmith/evaluation";
481
-
482
- function jaccardChars(output: string, answer: string): number {
483
- const predictionChars = new Set(output.trim().toLowerCase());
484
- const answerChars = new Set(answer.trim().toLowerCase());
485
- const intersection = [...predictionChars].filter((x) => answerChars.has(x));
486
- const union = new Set([...predictionChars, ...answerChars]);
487
- return intersection.length / union.size;
488
- }
489
-
490
- async function grader(config: {
491
- input: string;
492
- prediction: string;
493
- answer?: string;
494
- }): Promise<{ score: number; value: string }> {
495
- let value: string;
496
- let score: number;
497
- if (config.answer === null || config.answer === undefined) {
498
- value = "AMBIGUOUS";
499
- score = 0.5;
500
- } else {
501
- score = jaccardChars(config.prediction, config.answer);
502
- value = score > 0.9 ? "CORRECT" : "INCORRECT";
503
- }
504
- return { score: score, value: value };
505
- }
506
-
507
- const evaluator = new StringEvaluator({
508
- evaluationName: "Jaccard",
509
- gradingFunction: grader,
510
- });
511
-
512
- const runs = await client.listRuns({
513
- projectName: "my_project",
514
- executionOrder: 1,
515
- error: false,
516
- });
517
-
518
- for (const run of runs) {
519
- client.evaluateRun(run, evaluator);
520
- }
521
- ```
522
-
523
473
  ## Additional Documentation
524
474
 
525
475
  To learn more about the LangSmith platform, check out the [docs](https://docs.smith.langchain.com/docs/).
package/dist/client.cjs CHANGED
@@ -416,6 +416,12 @@ class Client {
416
416
  writable: true,
417
417
  value: false
418
418
  });
419
+ Object.defineProperty(this, "_multipartDisabled", {
420
+ enumerable: true,
421
+ configurable: true,
422
+ writable: true,
423
+ value: false
424
+ });
419
425
  Object.defineProperty(this, "debug", {
420
426
  enumerable: true,
421
427
  configurable: true,
@@ -708,7 +714,7 @@ class Client {
708
714
  async _getBatchSizeLimitBytes() {
709
715
  const serverInfo = await this._ensureServerInfo();
710
716
  return (this.batchSizeBytesLimit ??
711
- serverInfo.batch_ingest_config?.size_limit_bytes ??
717
+ serverInfo?.batch_ingest_config?.size_limit_bytes ??
712
718
  exports.DEFAULT_UNCOMPRESSED_BATCH_SIZE_LIMIT_BYTES);
713
719
  }
714
720
  /**
@@ -717,7 +723,7 @@ class Client {
717
723
  async _getBatchSizeLimit() {
718
724
  const serverInfo = await this._ensureServerInfo();
719
725
  return (this.batchSizeLimit ??
720
- serverInfo.batch_ingest_config?.size_limit ??
726
+ serverInfo?.batch_ingest_config?.size_limit ??
721
727
  DEFAULT_BATCH_SIZE_LIMIT);
722
728
  }
723
729
  async _getDatasetExamplesMultiPartSupport() {
@@ -780,13 +786,32 @@ class Client {
780
786
  .map((item) => item.item),
781
787
  };
782
788
  const serverInfo = await this._ensureServerInfo();
783
- if (serverInfo?.batch_ingest_config?.use_multipart_endpoint) {
789
+ const useMultipart = !this._multipartDisabled &&
790
+ (serverInfo?.batch_ingest_config?.use_multipart_endpoint ?? true);
791
+ if (useMultipart) {
784
792
  const useGzip = serverInfo?.instance_flags?.gzip_body_enabled;
785
- await this.multipartIngestRuns(ingestParams, {
786
- ...options,
787
- useGzip,
788
- sizeBytes: batchSizeBytes,
789
- });
793
+ try {
794
+ await this.multipartIngestRuns(ingestParams, {
795
+ ...options,
796
+ useGzip,
797
+ sizeBytes: batchSizeBytes,
798
+ });
799
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
800
+ }
801
+ catch (e) {
802
+ if ((0, error_js_1.isLangSmithNotFoundError)(e)) {
803
+ // Fallback to batch ingest if multipart endpoint returns 404
804
+ // Disable multipart for future requests
805
+ this._multipartDisabled = true;
806
+ await this.batchIngestRuns(ingestParams, {
807
+ ...options,
808
+ sizeBytes: batchSizeBytes,
809
+ });
810
+ }
811
+ else {
812
+ throw e;
813
+ }
814
+ }
790
815
  }
791
816
  else {
792
817
  await this.batchIngestRuns(ingestParams, {
@@ -1315,6 +1340,10 @@ class Client {
1315
1340
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
1316
1341
  }
1317
1342
  catch (e) {
1343
+ // Re-throw 404 errors so caller can fall back to batch ingest
1344
+ if ((0, error_js_1.isLangSmithNotFoundError)(e)) {
1345
+ throw e;
1346
+ }
1318
1347
  console.warn(`${e.message.trim()}\n\nContext: ${context}`);
1319
1348
  }
1320
1349
  }
@@ -2895,29 +2924,6 @@ class Client {
2895
2924
  return res;
2896
2925
  });
2897
2926
  }
2898
- /**
2899
- * @deprecated This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead.
2900
- */
2901
- async evaluateRun(run, evaluator, { sourceInfo, loadChildRuns, referenceExample, } = { loadChildRuns: false }) {
2902
- (0, warn_js_1.warnOnce)("This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead.");
2903
- let run_;
2904
- if (typeof run === "string") {
2905
- run_ = await this.readRun(run, { loadChildRuns });
2906
- }
2907
- else if (typeof run === "object" && "id" in run) {
2908
- run_ = run;
2909
- }
2910
- else {
2911
- throw new Error(`Invalid run type: ${typeof run}`);
2912
- }
2913
- if (run_.reference_example_id !== null &&
2914
- run_.reference_example_id !== undefined) {
2915
- referenceExample = await this.readExample(run_.reference_example_id);
2916
- }
2917
- const feedbackResult = await evaluator.evaluateRun(run_, referenceExample);
2918
- const [_, feedbacks] = await this._logEvaluationFeedback(feedbackResult, run_, sourceInfo);
2919
- return feedbacks[0];
2920
- }
2921
2927
  async createFeedback(runId, key, { score, value, correction, comment, sourceInfo, feedbackSourceType = "api", sourceRunId, feedbackId, feedbackConfig, projectId, comparativeExperimentId, }) {
2922
2928
  if (!runId && !projectId) {
2923
2929
  throw new Error("One of runId or projectId must be provided");
@@ -3947,6 +3953,21 @@ class Client {
3947
3953
  console.warn("[WARNING]: When tracing in manual flush mode, you must call `await client.flush()` manually to submit trace batches.");
3948
3954
  return Promise.resolve();
3949
3955
  }
3956
+ /**
3957
+ * traceables use a backgrounded promise before updating runs to avoid blocking
3958
+ * and to allow waiting for child runs to end. Waiting a small amount of time
3959
+ * here ensures that they are able to enqueue their run operation before we await
3960
+ * queued run operations below:
3961
+ *
3962
+ * ```ts
3963
+ * const run = await traceable(async () => {
3964
+ * return "Hello, world!";
3965
+ * }, { client })();
3966
+ *
3967
+ * await client.awaitPendingTraceBatches();
3968
+ * ```
3969
+ */
3970
+ await new Promise((resolve) => setTimeout(resolve, 1));
3950
3971
  await Promise.all([
3951
3972
  ...this.autoBatchQueue.items.map(({ itemPromise }) => itemPromise),
3952
3973
  this.batchIngestCaller.queue.onIdle(),
package/dist/client.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import type { OTELContext } from "./experimental/otel/types.js";
2
2
  import { AsyncCallerParams } from "./utils/async_caller.js";
3
3
  import { ComparativeExperiment, DataType, Dataset, DatasetDiffInfo, DatasetShareSchema, Example, ExampleCreate, ExampleUpdate, ExampleUpdateWithoutId, Feedback, FeedbackConfig, FeedbackIngestToken, KVMap, LangChainBaseMessage, LangSmithSettings, LikePromptResponse, Prompt, PromptCommit, PromptSortField, Run, RunCreate, RunUpdate, ScoreType, ExampleSearch, TimeDelta, TracerSession, TracerSessionResult, ValueType, AnnotationQueue, RunWithAnnotationQueueInfo, Attachments, UploadExamplesResponse, UpdateExamplesResponse, DatasetVersion, AnnotationQueueWithDetails } from "./schemas.js";
4
- import { EvaluationResult, EvaluationResults, RunEvaluator } from "./evaluation/evaluator.js";
4
+ import { EvaluationResult, EvaluationResults } from "./evaluation/evaluator.js";
5
5
  export interface ClientConfig {
6
6
  apiUrl?: string;
7
7
  apiKey?: string;
@@ -353,6 +353,7 @@ export declare class Client implements LangSmithTracingClientInterface {
353
353
  private cachedLSEnvVarsForMetadata?;
354
354
  private get _fetch();
355
355
  private multipartStreamingDisabled;
356
+ private _multipartDisabled;
356
357
  debug: boolean;
357
358
  constructor(config?: ClientConfig);
358
359
  static getDefaultClientConfig(): {
@@ -796,14 +797,6 @@ export declare class Client implements LangSmithTracingClientInterface {
796
797
  exampleIds: string[];
797
798
  remove?: boolean;
798
799
  }): Promise<void>;
799
- /**
800
- * @deprecated This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead.
801
- */
802
- evaluateRun(run: Run | string, evaluator: RunEvaluator, { sourceInfo, loadChildRuns, referenceExample, }?: {
803
- sourceInfo?: KVMap;
804
- loadChildRuns: boolean;
805
- referenceExample?: Example;
806
- }): Promise<Feedback>;
807
800
  createFeedback(runId: string | null, key: string, { score, value, correction, comment, sourceInfo, feedbackSourceType, sourceRunId, feedbackId, feedbackConfig, projectId, comparativeExperimentId, }: {
808
801
  score?: ScoreType;
809
802
  value?: ValueType;
package/dist/client.js CHANGED
@@ -8,7 +8,7 @@ import { __version__ } from "./index.js";
8
8
  import { assertUuid } from "./utils/_uuid.js";
9
9
  import { warnOnce } from "./utils/warn.js";
10
10
  import { parsePromptIdentifier } from "./utils/prompts.js";
11
- import { raiseForStatus } from "./utils/error.js";
11
+ import { raiseForStatus, isLangSmithNotFoundError } from "./utils/error.js";
12
12
  import { _globalFetchImplementationIsNodeFetch, _getFetchImplementation, } from "./singletons/fetch.js";
13
13
  import { serialize as serializePayloadForTracing } from "./utils/fast-safe-stringify/index.js";
14
14
  export function mergeRuntimeEnvIntoRun(run, cachedEnvVars, omitTracedRuntimeInfo) {
@@ -378,6 +378,12 @@ export class Client {
378
378
  writable: true,
379
379
  value: false
380
380
  });
381
+ Object.defineProperty(this, "_multipartDisabled", {
382
+ enumerable: true,
383
+ configurable: true,
384
+ writable: true,
385
+ value: false
386
+ });
381
387
  Object.defineProperty(this, "debug", {
382
388
  enumerable: true,
383
389
  configurable: true,
@@ -670,7 +676,7 @@ export class Client {
670
676
  async _getBatchSizeLimitBytes() {
671
677
  const serverInfo = await this._ensureServerInfo();
672
678
  return (this.batchSizeBytesLimit ??
673
- serverInfo.batch_ingest_config?.size_limit_bytes ??
679
+ serverInfo?.batch_ingest_config?.size_limit_bytes ??
674
680
  DEFAULT_UNCOMPRESSED_BATCH_SIZE_LIMIT_BYTES);
675
681
  }
676
682
  /**
@@ -679,7 +685,7 @@ export class Client {
679
685
  async _getBatchSizeLimit() {
680
686
  const serverInfo = await this._ensureServerInfo();
681
687
  return (this.batchSizeLimit ??
682
- serverInfo.batch_ingest_config?.size_limit ??
688
+ serverInfo?.batch_ingest_config?.size_limit ??
683
689
  DEFAULT_BATCH_SIZE_LIMIT);
684
690
  }
685
691
  async _getDatasetExamplesMultiPartSupport() {
@@ -742,13 +748,32 @@ export class Client {
742
748
  .map((item) => item.item),
743
749
  };
744
750
  const serverInfo = await this._ensureServerInfo();
745
- if (serverInfo?.batch_ingest_config?.use_multipart_endpoint) {
751
+ const useMultipart = !this._multipartDisabled &&
752
+ (serverInfo?.batch_ingest_config?.use_multipart_endpoint ?? true);
753
+ if (useMultipart) {
746
754
  const useGzip = serverInfo?.instance_flags?.gzip_body_enabled;
747
- await this.multipartIngestRuns(ingestParams, {
748
- ...options,
749
- useGzip,
750
- sizeBytes: batchSizeBytes,
751
- });
755
+ try {
756
+ await this.multipartIngestRuns(ingestParams, {
757
+ ...options,
758
+ useGzip,
759
+ sizeBytes: batchSizeBytes,
760
+ });
761
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
762
+ }
763
+ catch (e) {
764
+ if (isLangSmithNotFoundError(e)) {
765
+ // Fallback to batch ingest if multipart endpoint returns 404
766
+ // Disable multipart for future requests
767
+ this._multipartDisabled = true;
768
+ await this.batchIngestRuns(ingestParams, {
769
+ ...options,
770
+ sizeBytes: batchSizeBytes,
771
+ });
772
+ }
773
+ else {
774
+ throw e;
775
+ }
776
+ }
752
777
  }
753
778
  else {
754
779
  await this.batchIngestRuns(ingestParams, {
@@ -1277,6 +1302,10 @@ export class Client {
1277
1302
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
1278
1303
  }
1279
1304
  catch (e) {
1305
+ // Re-throw 404 errors so caller can fall back to batch ingest
1306
+ if (isLangSmithNotFoundError(e)) {
1307
+ throw e;
1308
+ }
1280
1309
  console.warn(`${e.message.trim()}\n\nContext: ${context}`);
1281
1310
  }
1282
1311
  }
@@ -2857,29 +2886,6 @@ export class Client {
2857
2886
  return res;
2858
2887
  });
2859
2888
  }
2860
- /**
2861
- * @deprecated This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead.
2862
- */
2863
- async evaluateRun(run, evaluator, { sourceInfo, loadChildRuns, referenceExample, } = { loadChildRuns: false }) {
2864
- warnOnce("This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead.");
2865
- let run_;
2866
- if (typeof run === "string") {
2867
- run_ = await this.readRun(run, { loadChildRuns });
2868
- }
2869
- else if (typeof run === "object" && "id" in run) {
2870
- run_ = run;
2871
- }
2872
- else {
2873
- throw new Error(`Invalid run type: ${typeof run}`);
2874
- }
2875
- if (run_.reference_example_id !== null &&
2876
- run_.reference_example_id !== undefined) {
2877
- referenceExample = await this.readExample(run_.reference_example_id);
2878
- }
2879
- const feedbackResult = await evaluator.evaluateRun(run_, referenceExample);
2880
- const [_, feedbacks] = await this._logEvaluationFeedback(feedbackResult, run_, sourceInfo);
2881
- return feedbacks[0];
2882
- }
2883
2889
  async createFeedback(runId, key, { score, value, correction, comment, sourceInfo, feedbackSourceType = "api", sourceRunId, feedbackId, feedbackConfig, projectId, comparativeExperimentId, }) {
2884
2890
  if (!runId && !projectId) {
2885
2891
  throw new Error("One of runId or projectId must be provided");
@@ -3909,6 +3915,21 @@ export class Client {
3909
3915
  console.warn("[WARNING]: When tracing in manual flush mode, you must call `await client.flush()` manually to submit trace batches.");
3910
3916
  return Promise.resolve();
3911
3917
  }
3918
+ /**
3919
+ * traceables use a backgrounded promise before updating runs to avoid blocking
3920
+ * and to allow waiting for child runs to end. Waiting a small amount of time
3921
+ * here ensures that they are able to enqueue their run operation before we await
3922
+ * queued run operations below:
3923
+ *
3924
+ * ```ts
3925
+ * const run = await traceable(async () => {
3926
+ * return "Hello, world!";
3927
+ * }, { client })();
3928
+ *
3929
+ * await client.awaitPendingTraceBatches();
3930
+ * ```
3931
+ */
3932
+ await new Promise((resolve) => setTimeout(resolve, 1));
3912
3933
  await Promise.all([
3913
3934
  ...this.autoBatchQueue.items.map(({ itemPromise }) => itemPromise),
3914
3935
  this.batchIngestCaller.queue.onIdle(),
@@ -700,7 +700,7 @@ async function _forward(fn, example, experimentName, metadata, client, includeAt
700
700
  const _getRun = (r) => {
701
701
  run = r;
702
702
  };
703
- const options = {
703
+ const defaultOptions = {
704
704
  reference_example_id: example.id,
705
705
  on_end: _getRun,
706
706
  project_name: experimentName,
@@ -713,37 +713,39 @@ async function _forward(fn, example, experimentName, metadata, client, includeAt
713
713
  client,
714
714
  tracingEnabled: true,
715
715
  };
716
- const wrappedFn = "invoke" in fn
717
- ? (0, traceable_js_1.traceable)(async (inputs) => {
718
- let langChainCallbacks;
719
- try {
720
- // TODO: Deprecate this and rely on interop on 0.2 minor bump.
721
- const { getLangchainCallbacks } = await import("../langchain.js");
722
- langChainCallbacks = await getLangchainCallbacks();
723
- }
724
- catch {
725
- // no-op
726
- }
727
- // Issue with retrieving LangChain callbacks, rely on interop
728
- if (langChainCallbacks === undefined && !includeAttachments) {
729
- return await fn.invoke(inputs);
730
- }
731
- else if (langChainCallbacks === undefined && includeAttachments) {
732
- return await fn.invoke(inputs, {
733
- attachments: example.attachments,
734
- });
735
- }
736
- else if (!includeAttachments) {
737
- return await fn.invoke(inputs, { callbacks: langChainCallbacks });
738
- }
739
- else {
740
- return await fn.invoke(inputs, {
741
- attachments: example.attachments,
742
- callbacks: langChainCallbacks,
743
- });
744
- }
745
- }, options)
746
- : (0, traceable_js_1.traceable)(fn, options);
716
+ const wrappedFn = (0, traceable_js_1.isTraceableFunction)(fn)
717
+ ? fn
718
+ : "invoke" in fn
719
+ ? (0, traceable_js_1.traceable)(async (inputs) => {
720
+ let langChainCallbacks;
721
+ try {
722
+ // TODO: Deprecate this and rely on interop on 0.2 minor bump.
723
+ const { getLangchainCallbacks } = await import("../langchain.js");
724
+ langChainCallbacks = await getLangchainCallbacks();
725
+ }
726
+ catch {
727
+ // no-op
728
+ }
729
+ // Issue with retrieving LangChain callbacks, rely on interop
730
+ if (langChainCallbacks === undefined && !includeAttachments) {
731
+ return await fn.invoke(inputs);
732
+ }
733
+ else if (langChainCallbacks === undefined && includeAttachments) {
734
+ return await fn.invoke(inputs, {
735
+ attachments: example.attachments,
736
+ });
737
+ }
738
+ else if (!includeAttachments) {
739
+ return await fn.invoke(inputs, { callbacks: langChainCallbacks });
740
+ }
741
+ else {
742
+ return await fn.invoke(inputs, {
743
+ attachments: example.attachments,
744
+ callbacks: langChainCallbacks,
745
+ });
746
+ }
747
+ }, defaultOptions)
748
+ : (0, traceable_js_1.traceable)(fn, defaultOptions);
747
749
  try {
748
750
  if (includeAttachments && !("invoke" in fn)) {
749
751
  await wrappedFn(example.inputs, { attachments: example.attachments });
@@ -1,5 +1,5 @@
1
1
  import { Client } from "../index.js";
2
- import { traceable } from "../traceable.js";
2
+ import { isTraceableFunction, traceable } from "../traceable.js";
3
3
  import { getDefaultRevisionId, getGitInfo } from "../utils/_git.js";
4
4
  import { assertUuid } from "../utils/_uuid.js";
5
5
  import { AsyncCaller } from "../utils/async_caller.js";
@@ -695,7 +695,7 @@ async function _forward(fn, example, experimentName, metadata, client, includeAt
695
695
  const _getRun = (r) => {
696
696
  run = r;
697
697
  };
698
- const options = {
698
+ const defaultOptions = {
699
699
  reference_example_id: example.id,
700
700
  on_end: _getRun,
701
701
  project_name: experimentName,
@@ -708,37 +708,39 @@ async function _forward(fn, example, experimentName, metadata, client, includeAt
708
708
  client,
709
709
  tracingEnabled: true,
710
710
  };
711
- const wrappedFn = "invoke" in fn
712
- ? traceable(async (inputs) => {
713
- let langChainCallbacks;
714
- try {
715
- // TODO: Deprecate this and rely on interop on 0.2 minor bump.
716
- const { getLangchainCallbacks } = await import("../langchain.js");
717
- langChainCallbacks = await getLangchainCallbacks();
718
- }
719
- catch {
720
- // no-op
721
- }
722
- // Issue with retrieving LangChain callbacks, rely on interop
723
- if (langChainCallbacks === undefined && !includeAttachments) {
724
- return await fn.invoke(inputs);
725
- }
726
- else if (langChainCallbacks === undefined && includeAttachments) {
727
- return await fn.invoke(inputs, {
728
- attachments: example.attachments,
729
- });
730
- }
731
- else if (!includeAttachments) {
732
- return await fn.invoke(inputs, { callbacks: langChainCallbacks });
733
- }
734
- else {
735
- return await fn.invoke(inputs, {
736
- attachments: example.attachments,
737
- callbacks: langChainCallbacks,
738
- });
739
- }
740
- }, options)
741
- : traceable(fn, options);
711
+ const wrappedFn = isTraceableFunction(fn)
712
+ ? fn
713
+ : "invoke" in fn
714
+ ? traceable(async (inputs) => {
715
+ let langChainCallbacks;
716
+ try {
717
+ // TODO: Deprecate this and rely on interop on 0.2 minor bump.
718
+ const { getLangchainCallbacks } = await import("../langchain.js");
719
+ langChainCallbacks = await getLangchainCallbacks();
720
+ }
721
+ catch {
722
+ // no-op
723
+ }
724
+ // Issue with retrieving LangChain callbacks, rely on interop
725
+ if (langChainCallbacks === undefined && !includeAttachments) {
726
+ return await fn.invoke(inputs);
727
+ }
728
+ else if (langChainCallbacks === undefined && includeAttachments) {
729
+ return await fn.invoke(inputs, {
730
+ attachments: example.attachments,
731
+ });
732
+ }
733
+ else if (!includeAttachments) {
734
+ return await fn.invoke(inputs, { callbacks: langChainCallbacks });
735
+ }
736
+ else {
737
+ return await fn.invoke(inputs, {
738
+ attachments: example.attachments,
739
+ callbacks: langChainCallbacks,
740
+ });
741
+ }
742
+ }, defaultOptions)
743
+ : traceable(fn, defaultOptions);
742
744
  try {
743
745
  if (includeAttachments && !("invoke" in fn)) {
744
746
  await wrappedFn(example.inputs, { attachments: example.attachments });
@@ -11,6 +11,8 @@ const exporter_js_1 = require("./exporter.cjs");
11
11
  const processor_js_1 = require("./processor.cjs");
12
12
  const otel_js_1 = require("../../singletons/otel.cjs");
13
13
  /**
14
+ * @deprecated Use non-OTEL `wrapAISDK` from `langsmith/experimental/vercel` instead.
15
+ *
14
16
  * Initializes OpenTelemetry with LangSmith-specific configuration for tracing.
15
17
  *
16
18
  * Call this once at the start of your application to enable tracing integration. Sets global
@@ -27,6 +27,8 @@ export type InitializeOTELConfig = {
27
27
  exporterConfig?: LangSmithOTLPTraceExporterConfig;
28
28
  };
29
29
  /**
30
+ * @deprecated Use non-OTEL `wrapAISDK` from `langsmith/experimental/vercel` instead.
31
+ *
30
32
  * Initializes OpenTelemetry with LangSmith-specific configuration for tracing.
31
33
  *
32
34
  * Call this once at the start of your application to enable tracing integration. Sets global
@@ -8,6 +8,8 @@ import { LangSmithOTLPTraceExporter, } from "./exporter.js";
8
8
  import { LangSmithOTLPSpanProcessor } from "./processor.js";
9
9
  import { setDefaultOTLPTracerComponents, setOTELInstances, } from "../../singletons/otel.js";
10
10
  /**
11
+ * @deprecated Use non-OTEL `wrapAISDK` from `langsmith/experimental/vercel` instead.
12
+ *
11
13
  * Initializes OpenTelemetry with LangSmith-specific configuration for tracing.
12
14
  *
13
15
  * Call this once at the start of your application to enable tracing integration. Sets global
@@ -189,6 +189,17 @@ export type WrapAISDKConfig<T extends (...args: any[]) => any = (...args: any[])
189
189
  * @default false
190
190
  */
191
191
  traceResponseMetadata?: boolean;
192
+ /**
193
+ * Whether to include raw HTTP request and response details in traces from the
194
+ * underlying model calls (doGenerate/doStream).
195
+ *
196
+ * When enabled, traces will include the full HTTP request body, response body,
197
+ * headers, and other low-level details. This can be useful for debugging provider
198
+ * issues but creates very verbose traces.
199
+ *
200
+ * @default false
201
+ */
202
+ traceRawHttp?: boolean;
192
203
  };
193
204
  /**
194
205
  * Wraps LangSmith config in a way that matches AI SDK provider types.
@@ -18,8 +18,17 @@ const _formatTracedInputs = (params) => {
18
18
  }
19
19
  return rest;
20
20
  };
21
- const _formatTracedOutputs = (outputs) => {
22
- const formattedOutputs = { ...outputs };
21
+ const _formatTracedOutputs = (outputs, includeHttpDetails = false) => {
22
+ let formattedOutputs;
23
+ if (includeHttpDetails) {
24
+ // Include all fields including raw request/response/usage
25
+ formattedOutputs = { ...outputs };
26
+ }
27
+ else {
28
+ // Extract only the fields we want to trace, excluding raw request/response/usage
29
+ const { request: _, response: __, ...messageFields } = outputs;
30
+ formattedOutputs = { ...messageFields };
31
+ }
23
32
  if (formattedOutputs.role == null) {
24
33
  formattedOutputs.role = formattedOutputs.type ?? "assistant";
25
34
  }
@@ -93,8 +102,10 @@ function LangSmithMiddleware(config) {
93
102
  },
94
103
  processOutputs: (outputs) => {
95
104
  const typedOutputs = outputs;
96
- const outputFormatter = lsConfig?.processOutputs ?? _formatTracedOutputs;
97
- return outputFormatter(typedOutputs);
105
+ if (lsConfig?.processOutputs) {
106
+ return lsConfig.processOutputs(typedOutputs);
107
+ }
108
+ return _formatTracedOutputs(typedOutputs, lsConfig?.traceRawHttp);
98
109
  },
99
110
  });
100
111
  const res = await traceableFunc(params);
@@ -23,5 +23,6 @@ export declare function LangSmithMiddleware(config?: {
23
23
  lsConfig?: Partial<Omit<RunTreeConfig, "inputs" | "outputs" | "run_type">> & {
24
24
  processInputs?: (inputs: Record<string, unknown>) => Record<string, unknown>;
25
25
  processOutputs?: (outputs: Record<string, unknown>) => Record<string, unknown> | Promise<Record<string, unknown>>;
26
+ traceRawHttp?: boolean;
26
27
  };
27
28
  }): LanguageModelV2Middleware;
@@ -15,8 +15,17 @@ const _formatTracedInputs = (params) => {
15
15
  }
16
16
  return rest;
17
17
  };
18
- const _formatTracedOutputs = (outputs) => {
19
- const formattedOutputs = { ...outputs };
18
+ const _formatTracedOutputs = (outputs, includeHttpDetails = false) => {
19
+ let formattedOutputs;
20
+ if (includeHttpDetails) {
21
+ // Include all fields including raw request/response/usage
22
+ formattedOutputs = { ...outputs };
23
+ }
24
+ else {
25
+ // Extract only the fields we want to trace, excluding raw request/response/usage
26
+ const { request: _, response: __, ...messageFields } = outputs;
27
+ formattedOutputs = { ...messageFields };
28
+ }
20
29
  if (formattedOutputs.role == null) {
21
30
  formattedOutputs.role = formattedOutputs.type ?? "assistant";
22
31
  }
@@ -90,8 +99,10 @@ export function LangSmithMiddleware(config) {
90
99
  },
91
100
  processOutputs: (outputs) => {
92
101
  const typedOutputs = outputs;
93
- const outputFormatter = lsConfig?.processOutputs ?? _formatTracedOutputs;
94
- return outputFormatter(typedOutputs);
102
+ if (lsConfig?.processOutputs) {
103
+ return lsConfig.processOutputs(typedOutputs);
104
+ }
105
+ return _formatTracedOutputs(typedOutputs, lsConfig?.traceRawHttp);
95
106
  },
96
107
  });
97
108
  const res = await traceableFunc(params);
package/dist/index.cjs CHANGED
@@ -13,4 +13,4 @@ var uuid_js_1 = require("./uuid.cjs");
13
13
  Object.defineProperty(exports, "uuid7", { enumerable: true, get: function () { return uuid_js_1.uuid7; } });
14
14
  Object.defineProperty(exports, "uuid7FromTime", { enumerable: true, get: function () { return uuid_js_1.uuid7FromTime; } });
15
15
  // Update using yarn bump-version
16
- exports.__version__ = "0.3.87";
16
+ exports.__version__ = "0.4.0";