@langchain/core 0.2.2 → 0.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -83,7 +83,7 @@ Streaming (and streaming of intermediate steps) is needed to show the user that
83
83
  Async interfaces are nice when moving into production.
84
84
  Rather than having to write multiple implementations for all of those, LCEL allows you to write a runnable once and invoke it in many different ways.
85
85
 
86
- For more check out the [LCEL docs](https://js.langchain.com/docs/expression_language/).
86
+ For more check out the [LCEL docs](https://js.langchain.com/v0.2/docs/concepts#langchain-expression-language).
87
87
 
88
88
  ![LangChain Stack](../docs/core_docs/static/img/langchain_stack_feb_2024.webp)
89
89
 
@@ -601,7 +601,8 @@ class CallbackManager extends BaseCallbackManager {
601
601
  }
602
602
  const verboseEnabled = (0, env_js_1.getEnvironmentVariable)("LANGCHAIN_VERBOSE") === "true" ||
603
603
  options?.verbose;
604
- const tracingV2Enabled = (0, env_js_1.getEnvironmentVariable)("LANGCHAIN_TRACING_V2") === "true";
604
+ const tracingV2Enabled = (0, env_js_1.getEnvironmentVariable)("LANGCHAIN_TRACING_V2") === "true" ||
605
+ (0, env_js_1.getEnvironmentVariable)("LANGSMITH_TRACING") === "true";
605
606
  const tracingEnabled = tracingV2Enabled ||
606
607
  ((0, env_js_1.getEnvironmentVariable)("LANGCHAIN_TRACING") ?? false);
607
608
  if (verboseEnabled || tracingEnabled) {
@@ -616,7 +617,12 @@ class CallbackManager extends BaseCallbackManager {
616
617
  if (tracingEnabled &&
617
618
  !callbackManager.handlers.some((handler) => handler.name === "langchain_tracer")) {
618
619
  if (tracingV2Enabled) {
619
- callbackManager.addHandler(await (0, initialize_js_1.getTracingV2CallbackHandler)(), true);
620
+ const tracerV2 = await (0, initialize_js_1.getTracingV2CallbackHandler)();
621
+ callbackManager.addHandler(tracerV2, true);
622
+ // handoff between langchain and langsmith/traceable
623
+ // override the parent run ID
624
+ callbackManager._parentRunId =
625
+ tracerV2.getTraceableRunTree()?.id ?? callbackManager._parentRunId;
620
626
  }
621
627
  }
622
628
  }
@@ -120,7 +120,7 @@ export declare class CallbackManager extends BaseCallbackManager implements Base
120
120
  metadata: Record<string, unknown>;
121
121
  inheritableMetadata: Record<string, unknown>;
122
122
  name: string;
123
- readonly _parentRunId?: string;
123
+ _parentRunId?: string;
124
124
  constructor(parentRunId?: string, options?: {
125
125
  handlers?: BaseCallbackHandler[];
126
126
  inheritableHandlers?: BaseCallbackHandler[];
@@ -592,7 +592,8 @@ export class CallbackManager extends BaseCallbackManager {
592
592
  }
593
593
  const verboseEnabled = getEnvironmentVariable("LANGCHAIN_VERBOSE") === "true" ||
594
594
  options?.verbose;
595
- const tracingV2Enabled = getEnvironmentVariable("LANGCHAIN_TRACING_V2") === "true";
595
+ const tracingV2Enabled = getEnvironmentVariable("LANGCHAIN_TRACING_V2") === "true" ||
596
+ getEnvironmentVariable("LANGSMITH_TRACING") === "true";
596
597
  const tracingEnabled = tracingV2Enabled ||
597
598
  (getEnvironmentVariable("LANGCHAIN_TRACING") ?? false);
598
599
  if (verboseEnabled || tracingEnabled) {
@@ -607,7 +608,12 @@ export class CallbackManager extends BaseCallbackManager {
607
608
  if (tracingEnabled &&
608
609
  !callbackManager.handlers.some((handler) => handler.name === "langchain_tracer")) {
609
610
  if (tracingV2Enabled) {
610
- callbackManager.addHandler(await getTracingV2CallbackHandler(), true);
611
+ const tracerV2 = await getTracingV2CallbackHandler();
612
+ callbackManager.addHandler(tracerV2, true);
613
+ // handoff between langchain and langsmith/traceable
614
+ // override the parent run ID
615
+ callbackManager._parentRunId =
616
+ tracerV2.getTraceableRunTree()?.id ?? callbackManager._parentRunId;
611
617
  }
612
618
  }
613
619
  }
@@ -5,6 +5,9 @@ const index_js_1 = require("../messages/index.cjs");
5
5
  const outputs_js_1 = require("../outputs.cjs");
6
6
  const base_js_1 = require("./base.cjs");
7
7
  const manager_js_1 = require("../callbacks/manager.cjs");
8
+ const event_stream_js_1 = require("../tracers/event_stream.cjs");
9
+ const log_stream_js_1 = require("../tracers/log_stream.cjs");
10
+ const stream_js_1 = require("../utils/stream.cjs");
8
11
  /**
9
12
  * Creates a transform stream for encoding chat message chunks.
10
13
  * @deprecated Use {@link BytesOutputParser} instead
@@ -128,39 +131,76 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
128
131
  batch_size: 1,
129
132
  };
130
133
  const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), baseMessages, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions.runName);
131
- // generate results
132
- const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
133
- // handle results
134
134
  const generations = [];
135
135
  const llmOutputs = [];
136
- await Promise.all(results.map(async (pResult, i) => {
137
- if (pResult.status === "fulfilled") {
138
- const result = pResult.value;
139
- for (const generation of result.generations) {
140
- generation.message.response_metadata = {
141
- ...generation.generationInfo,
142
- ...generation.message.response_metadata,
143
- };
136
+ // Even if stream is not explicitly called, check if model is implicitly
137
+ // called from streamEvents() or streamLog() to get all streamed events.
138
+ // Bail out if _streamResponseChunks not overridden
139
+ const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => {
140
+ return (0, event_stream_js_1.isStreamEventsHandler)(handler) || (0, log_stream_js_1.isLogStreamHandler)(handler);
141
+ });
142
+ if (hasStreamingHandler &&
143
+ baseMessages.length === 1 &&
144
+ this._streamResponseChunks !==
145
+ BaseChatModel.prototype._streamResponseChunks) {
146
+ try {
147
+ const stream = await this._streamResponseChunks(baseMessages[0], parsedOptions, runManagers?.[0]);
148
+ let aggregated;
149
+ for await (const chunk of stream) {
150
+ if (aggregated === undefined) {
151
+ aggregated = chunk;
152
+ }
153
+ else {
154
+ aggregated = (0, stream_js_1.concat)(aggregated, chunk);
155
+ }
144
156
  }
145
- if (result.generations.length === 1) {
146
- result.generations[0].message.response_metadata = {
147
- ...result.llmOutput,
148
- ...result.generations[0].message.response_metadata,
149
- };
157
+ if (aggregated === undefined) {
158
+ throw new Error("Received empty response from chat model call.");
150
159
  }
151
- generations[i] = result.generations;
152
- llmOutputs[i] = result.llmOutput;
153
- return runManagers?.[i]?.handleLLMEnd({
154
- generations: [result.generations],
155
- llmOutput: result.llmOutput,
160
+ generations.push([aggregated]);
161
+ await runManagers?.[0].handleLLMEnd({
162
+ generations,
163
+ llmOutput: {},
156
164
  });
157
165
  }
158
- else {
159
- // status === "rejected"
160
- await runManagers?.[i]?.handleLLMError(pResult.reason);
161
- return Promise.reject(pResult.reason);
166
+ catch (e) {
167
+ await runManagers?.[0].handleLLMError(e);
168
+ throw e;
162
169
  }
163
- }));
170
+ }
171
+ else {
172
+ // generate results
173
+ const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
174
+ // handle results
175
+ await Promise.all(results.map(async (pResult, i) => {
176
+ if (pResult.status === "fulfilled") {
177
+ const result = pResult.value;
178
+ for (const generation of result.generations) {
179
+ generation.message.response_metadata = {
180
+ ...generation.generationInfo,
181
+ ...generation.message.response_metadata,
182
+ };
183
+ }
184
+ if (result.generations.length === 1) {
185
+ result.generations[0].message.response_metadata = {
186
+ ...result.llmOutput,
187
+ ...result.generations[0].message.response_metadata,
188
+ };
189
+ }
190
+ generations[i] = result.generations;
191
+ llmOutputs[i] = result.llmOutput;
192
+ return runManagers?.[i]?.handleLLMEnd({
193
+ generations: [result.generations],
194
+ llmOutput: result.llmOutput,
195
+ });
196
+ }
197
+ else {
198
+ // status === "rejected"
199
+ await runManagers?.[i]?.handleLLMError(pResult.reason);
200
+ return Promise.reject(pResult.reason);
201
+ }
202
+ }));
203
+ }
164
204
  // create combined output
165
205
  const output = {
166
206
  generations,
@@ -2,6 +2,9 @@ import { AIMessage, HumanMessage, coerceMessageLikeToMessage, } from "../message
2
2
  import { RUN_KEY, } from "../outputs.js";
3
3
  import { BaseLanguageModel, } from "./base.js";
4
4
  import { CallbackManager, } from "../callbacks/manager.js";
5
+ import { isStreamEventsHandler } from "../tracers/event_stream.js";
6
+ import { isLogStreamHandler } from "../tracers/log_stream.js";
7
+ import { concat } from "../utils/stream.js";
5
8
  /**
6
9
  * Creates a transform stream for encoding chat message chunks.
7
10
  * @deprecated Use {@link BytesOutputParser} instead
@@ -124,39 +127,76 @@ export class BaseChatModel extends BaseLanguageModel {
124
127
  batch_size: 1,
125
128
  };
126
129
  const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), baseMessages, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions.runName);
127
- // generate results
128
- const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
129
- // handle results
130
130
  const generations = [];
131
131
  const llmOutputs = [];
132
- await Promise.all(results.map(async (pResult, i) => {
133
- if (pResult.status === "fulfilled") {
134
- const result = pResult.value;
135
- for (const generation of result.generations) {
136
- generation.message.response_metadata = {
137
- ...generation.generationInfo,
138
- ...generation.message.response_metadata,
139
- };
132
+ // Even if stream is not explicitly called, check if model is implicitly
133
+ // called from streamEvents() or streamLog() to get all streamed events.
134
+ // Bail out if _streamResponseChunks not overridden
135
+ const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => {
136
+ return isStreamEventsHandler(handler) || isLogStreamHandler(handler);
137
+ });
138
+ if (hasStreamingHandler &&
139
+ baseMessages.length === 1 &&
140
+ this._streamResponseChunks !==
141
+ BaseChatModel.prototype._streamResponseChunks) {
142
+ try {
143
+ const stream = await this._streamResponseChunks(baseMessages[0], parsedOptions, runManagers?.[0]);
144
+ let aggregated;
145
+ for await (const chunk of stream) {
146
+ if (aggregated === undefined) {
147
+ aggregated = chunk;
148
+ }
149
+ else {
150
+ aggregated = concat(aggregated, chunk);
151
+ }
140
152
  }
141
- if (result.generations.length === 1) {
142
- result.generations[0].message.response_metadata = {
143
- ...result.llmOutput,
144
- ...result.generations[0].message.response_metadata,
145
- };
153
+ if (aggregated === undefined) {
154
+ throw new Error("Received empty response from chat model call.");
146
155
  }
147
- generations[i] = result.generations;
148
- llmOutputs[i] = result.llmOutput;
149
- return runManagers?.[i]?.handleLLMEnd({
150
- generations: [result.generations],
151
- llmOutput: result.llmOutput,
156
+ generations.push([aggregated]);
157
+ await runManagers?.[0].handleLLMEnd({
158
+ generations,
159
+ llmOutput: {},
152
160
  });
153
161
  }
154
- else {
155
- // status === "rejected"
156
- await runManagers?.[i]?.handleLLMError(pResult.reason);
157
- return Promise.reject(pResult.reason);
162
+ catch (e) {
163
+ await runManagers?.[0].handleLLMError(e);
164
+ throw e;
158
165
  }
159
- }));
166
+ }
167
+ else {
168
+ // generate results
169
+ const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
170
+ // handle results
171
+ await Promise.all(results.map(async (pResult, i) => {
172
+ if (pResult.status === "fulfilled") {
173
+ const result = pResult.value;
174
+ for (const generation of result.generations) {
175
+ generation.message.response_metadata = {
176
+ ...generation.generationInfo,
177
+ ...generation.message.response_metadata,
178
+ };
179
+ }
180
+ if (result.generations.length === 1) {
181
+ result.generations[0].message.response_metadata = {
182
+ ...result.llmOutput,
183
+ ...result.generations[0].message.response_metadata,
184
+ };
185
+ }
186
+ generations[i] = result.generations;
187
+ llmOutputs[i] = result.llmOutput;
188
+ return runManagers?.[i]?.handleLLMEnd({
189
+ generations: [result.generations],
190
+ llmOutput: result.llmOutput,
191
+ });
192
+ }
193
+ else {
194
+ // status === "rejected"
195
+ await runManagers?.[i]?.handleLLMError(pResult.reason);
196
+ return Promise.reject(pResult.reason);
197
+ }
198
+ }));
199
+ }
160
200
  // create combined output
161
201
  const output = {
162
202
  generations,
@@ -5,6 +5,9 @@ const index_js_1 = require("../messages/index.cjs");
5
5
  const outputs_js_1 = require("../outputs.cjs");
6
6
  const manager_js_1 = require("../callbacks/manager.cjs");
7
7
  const base_js_1 = require("./base.cjs");
8
+ const event_stream_js_1 = require("../tracers/event_stream.cjs");
9
+ const log_stream_js_1 = require("../tracers/log_stream.cjs");
10
+ const stream_js_1 = require("../utils/stream.cjs");
8
11
  /**
9
12
  * LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
10
13
  */
@@ -133,16 +136,49 @@ class BaseLLM extends base_js_1.BaseLanguageModel {
133
136
  batch_size: prompts.length,
134
137
  };
135
138
  const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions?.runName);
139
+ // Even if stream is not explicitly called, check if model is implicitly
140
+ // called from streamEvents() or streamLog() to get all streamed events.
141
+ // Bail out if _streamResponseChunks not overridden
142
+ const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => {
143
+ return (0, event_stream_js_1.isStreamEventsHandler)(handler) || (0, log_stream_js_1.isLogStreamHandler)(handler);
144
+ });
136
145
  let output;
137
- try {
138
- output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
146
+ if (hasStreamingHandler &&
147
+ prompts.length === 1 &&
148
+ this._streamResponseChunks !== BaseLLM.prototype._streamResponseChunks) {
149
+ try {
150
+ const stream = await this._streamResponseChunks(prompts[0], parsedOptions, runManagers?.[0]);
151
+ let aggregated;
152
+ for await (const chunk of stream) {
153
+ if (aggregated === undefined) {
154
+ aggregated = chunk;
155
+ }
156
+ else {
157
+ aggregated = (0, stream_js_1.concat)(aggregated, chunk);
158
+ }
159
+ }
160
+ if (aggregated === undefined) {
161
+ throw new Error("Received empty response from chat model call.");
162
+ }
163
+ output = { generations: [[aggregated]], llmOutput: {} };
164
+ await runManagers?.[0].handleLLMEnd(output);
165
+ }
166
+ catch (e) {
167
+ await runManagers?.[0].handleLLMError(e);
168
+ throw e;
169
+ }
139
170
  }
140
- catch (err) {
141
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
142
- throw err;
171
+ else {
172
+ try {
173
+ output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
174
+ }
175
+ catch (err) {
176
+ await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
177
+ throw err;
178
+ }
179
+ const flattenedOutputs = this._flattenLLMResult(output);
180
+ await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
143
181
  }
144
- const flattenedOutputs = this._flattenLLMResult(output);
145
- await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
146
182
  const runIds = runManagers?.map((manager) => manager.runId) || undefined;
147
183
  // This defines RUN_KEY as a non-enumerable property on the output object
148
184
  // so that it is not serialized when the output is stringified, and so that
@@ -2,6 +2,9 @@ import { AIMessage, getBufferString, } from "../messages/index.js";
2
2
  import { RUN_KEY, GenerationChunk, } from "../outputs.js";
3
3
  import { CallbackManager, } from "../callbacks/manager.js";
4
4
  import { BaseLanguageModel, } from "./base.js";
5
+ import { isStreamEventsHandler } from "../tracers/event_stream.js";
6
+ import { isLogStreamHandler } from "../tracers/log_stream.js";
7
+ import { concat } from "../utils/stream.js";
5
8
  /**
6
9
  * LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
7
10
  */
@@ -130,16 +133,49 @@ export class BaseLLM extends BaseLanguageModel {
130
133
  batch_size: prompts.length,
131
134
  };
132
135
  const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions?.runName);
136
+ // Even if stream is not explicitly called, check if model is implicitly
137
+ // called from streamEvents() or streamLog() to get all streamed events.
138
+ // Bail out if _streamResponseChunks not overridden
139
+ const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => {
140
+ return isStreamEventsHandler(handler) || isLogStreamHandler(handler);
141
+ });
133
142
  let output;
134
- try {
135
- output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
143
+ if (hasStreamingHandler &&
144
+ prompts.length === 1 &&
145
+ this._streamResponseChunks !== BaseLLM.prototype._streamResponseChunks) {
146
+ try {
147
+ const stream = await this._streamResponseChunks(prompts[0], parsedOptions, runManagers?.[0]);
148
+ let aggregated;
149
+ for await (const chunk of stream) {
150
+ if (aggregated === undefined) {
151
+ aggregated = chunk;
152
+ }
153
+ else {
154
+ aggregated = concat(aggregated, chunk);
155
+ }
156
+ }
157
+ if (aggregated === undefined) {
158
+ throw new Error("Received empty response from chat model call.");
159
+ }
160
+ output = { generations: [[aggregated]], llmOutput: {} };
161
+ await runManagers?.[0].handleLLMEnd(output);
162
+ }
163
+ catch (e) {
164
+ await runManagers?.[0].handleLLMError(e);
165
+ throw e;
166
+ }
136
167
  }
137
- catch (err) {
138
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
139
- throw err;
168
+ else {
169
+ try {
170
+ output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
171
+ }
172
+ catch (err) {
173
+ await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
174
+ throw err;
175
+ }
176
+ const flattenedOutputs = this._flattenLLMResult(output);
177
+ await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
140
178
  }
141
- const flattenedOutputs = this._flattenLLMResult(output);
142
- await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
143
179
  const runIds = runManagers?.map((manager) => manager.runId) || undefined;
144
180
  // This defines RUN_KEY as a non-enumerable property on the output object
145
181
  // so that it is not serialized when the output is stringified, and so that
@@ -3,10 +3,11 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
3
3
  return (mod && mod.__esModule) ? mod : { "default": mod };
4
4
  };
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
- exports.RunnablePick = exports.RunnableAssign = exports._coerceToRunnable = exports.RunnableWithFallbacks = exports.RunnableParallel = exports.RunnableLambda = exports.RunnableMap = exports.RunnableSequence = exports.RunnableRetry = exports.RunnableEach = exports.RunnableBinding = exports.Runnable = exports._coerceToDict = void 0;
6
+ exports.RunnablePick = exports.RunnableAssign = exports._coerceToRunnable = exports.RunnableWithFallbacks = exports.RunnableParallel = exports.RunnableLambda = exports.RunnableTraceable = exports.RunnableMap = exports.RunnableSequence = exports.RunnableRetry = exports.RunnableEach = exports.RunnableBinding = exports.Runnable = exports._coerceToDict = void 0;
7
7
  const zod_1 = require("zod");
8
8
  const p_retry_1 = __importDefault(require("p-retry"));
9
9
  const uuid_1 = require("uuid");
10
+ const traceable_1 = require("langsmith/singletons/traceable");
10
11
  const manager_js_1 = require("../callbacks/manager.cjs");
11
12
  const log_stream_js_1 = require("../tracers/log_stream.cjs");
12
13
  const event_stream_js_1 = require("../tracers/event_stream.cjs");
@@ -287,14 +288,12 @@ class Runnable extends serializable_js_1.Serializable {
287
288
  const pipe = await (0, stream_js_1.pipeGeneratorWithSetup)(transformer.bind(this), wrapInputForTracing(), async () => callbackManager_?.handleChainStart(this.toJSON(), { input: "" }, config.runId, config.runType, undefined, undefined, config.runName ?? this.getName()), config);
288
289
  delete config.runId;
289
290
  runManager = pipe.setup;
290
- const isStreamEventsHandler = (handler) => handler.name === "event_stream_tracer";
291
- const streamEventsHandler = runManager?.handlers.find(isStreamEventsHandler);
291
+ const streamEventsHandler = runManager?.handlers.find(event_stream_js_1.isStreamEventsHandler);
292
292
  let iterator = pipe.output;
293
293
  if (streamEventsHandler !== undefined && runManager !== undefined) {
294
294
  iterator = streamEventsHandler.tapOutputIterable(runManager.runId, iterator);
295
295
  }
296
- const isLogStreamHandler = (handler) => handler.name === "log_stream_tracer";
297
- const streamLogHandler = runManager?.handlers.find(isLogStreamHandler);
296
+ const streamLogHandler = runManager?.handlers.find(log_stream_js_1.isLogStreamHandler);
298
297
  if (streamLogHandler !== undefined && runManager !== undefined) {
299
298
  iterator = streamLogHandler.tapOutputIterable(runManager.runId, iterator);
300
299
  }
@@ -1387,6 +1386,69 @@ class RunnableMap extends Runnable {
1387
1386
  }
1388
1387
  }
1389
1388
  exports.RunnableMap = RunnableMap;
1389
+ /**
1390
+ * A runnable that wraps a traced LangSmith function.
1391
+ */
1392
+ class RunnableTraceable extends Runnable {
1393
+ constructor(fields) {
1394
+ super(fields);
1395
+ Object.defineProperty(this, "lc_serializable", {
1396
+ enumerable: true,
1397
+ configurable: true,
1398
+ writable: true,
1399
+ value: false
1400
+ });
1401
+ Object.defineProperty(this, "lc_namespace", {
1402
+ enumerable: true,
1403
+ configurable: true,
1404
+ writable: true,
1405
+ value: ["langchain_core", "runnables"]
1406
+ });
1407
+ Object.defineProperty(this, "func", {
1408
+ enumerable: true,
1409
+ configurable: true,
1410
+ writable: true,
1411
+ value: void 0
1412
+ });
1413
+ if (!(0, traceable_1.isTraceableFunction)(fields.func)) {
1414
+ throw new Error("RunnableTraceable requires a function that is wrapped in traceable higher-order function");
1415
+ }
1416
+ this.func = fields.func;
1417
+ }
1418
+ async invoke(input, options) {
1419
+ const [config] = this._getOptionsList(options ?? {}, 1);
1420
+ const callbacks = await (0, config_js_1.getCallbackManagerForConfig)(config);
1421
+ return (await this.func((0, config_js_1.patchConfig)(config, { callbacks }), input));
1422
+ }
1423
+ async *_streamIterator(input, options) {
1424
+ const result = await this.invoke(input, options);
1425
+ if ((0, iter_js_1.isAsyncIterable)(result)) {
1426
+ for await (const item of result) {
1427
+ yield item;
1428
+ }
1429
+ return;
1430
+ }
1431
+ if ((0, iter_js_1.isIterator)(result)) {
1432
+ while (true) {
1433
+ const state = result.next();
1434
+ if (state.done)
1435
+ break;
1436
+ yield state.value;
1437
+ }
1438
+ return;
1439
+ }
1440
+ yield result;
1441
+ }
1442
+ static from(func) {
1443
+ return new RunnableTraceable({ func });
1444
+ }
1445
+ }
1446
+ exports.RunnableTraceable = RunnableTraceable;
1447
+ function assertNonTraceableFunction(func) {
1448
+ if ((0, traceable_1.isTraceableFunction)(func)) {
1449
+ throw new Error("RunnableLambda requires a function that is not wrapped in traceable higher-order function. This shouldn't happen.");
1450
+ }
1451
+ }
1390
1452
  /**
1391
1453
  * A runnable that runs a callable.
1392
1454
  */
@@ -1395,6 +1457,10 @@ class RunnableLambda extends Runnable {
1395
1457
  return "RunnableLambda";
1396
1458
  }
1397
1459
  constructor(fields) {
1460
+ if ((0, traceable_1.isTraceableFunction)(fields.func)) {
1461
+ // eslint-disable-next-line no-constructor-return
1462
+ return RunnableTraceable.from(fields.func);
1463
+ }
1398
1464
  super(fields);
1399
1465
  Object.defineProperty(this, "lc_namespace", {
1400
1466
  enumerable: true,
@@ -1408,6 +1474,7 @@ class RunnableLambda extends Runnable {
1408
1474
  writable: true,
1409
1475
  value: void 0
1410
1476
  });
1477
+ assertNonTraceableFunction(fields.func);
1411
1478
  this.func = fields.func;
1412
1479
  }
1413
1480
  static from(func) {
@@ -1455,7 +1522,7 @@ class RunnableLambda extends Runnable {
1455
1522
  }
1456
1523
  output = finalOutput;
1457
1524
  }
1458
- else if ((0, iter_js_1.isIterator)(output)) {
1525
+ else if ((0, iter_js_1.isIterableIterator)(output)) {
1459
1526
  let finalOutput;
1460
1527
  for (const chunk of (0, iter_js_1.consumeIteratorInContext)(childConfig, output)) {
1461
1528
  if (finalOutput === undefined) {
@@ -1533,7 +1600,7 @@ class RunnableLambda extends Runnable {
1533
1600
  yield chunk;
1534
1601
  }
1535
1602
  }
1536
- else if ((0, iter_js_1.isIterator)(output)) {
1603
+ else if ((0, iter_js_1.isIterableIterator)(output)) {
1537
1604
  for (const chunk of (0, iter_js_1.consumeIteratorInContext)(config, output)) {
1538
1605
  yield chunk;
1539
1606
  }
@@ -1,3 +1,4 @@
1
+ import { type TraceableFunction } from "langsmith/singletons/traceable";
1
2
  import type { RunnableInterface, RunnableBatchOptions } from "./types.js";
2
3
  import { CallbackManagerForChainRun } from "../callbacks/manager.js";
3
4
  import { LogStreamCallbackHandler, LogStreamCallbackHandlerInput, RunLogPatch } from "../tracers/log_stream.js";
@@ -463,6 +464,21 @@ export declare class RunnableMap<RunInput = any, RunOutput extends Record<string
463
464
  transform(generator: AsyncGenerator<RunInput>, options?: Partial<RunnableConfig>): AsyncGenerator<RunOutput>;
464
465
  stream(input: RunInput, options?: Partial<RunnableConfig>): Promise<IterableReadableStream<RunOutput>>;
465
466
  }
467
+ type AnyTraceableFunction = TraceableFunction<(...any: any[]) => any>;
468
+ /**
469
+ * A runnable that wraps a traced LangSmith function.
470
+ */
471
+ export declare class RunnableTraceable<RunInput, RunOutput> extends Runnable<RunInput, RunOutput> {
472
+ lc_serializable: boolean;
473
+ lc_namespace: string[];
474
+ protected func: AnyTraceableFunction;
475
+ constructor(fields: {
476
+ func: AnyTraceableFunction;
477
+ });
478
+ invoke(input: RunInput, options?: Partial<RunnableConfig>): Promise<RunOutput>;
479
+ _streamIterator(input: RunInput, options?: Partial<RunnableConfig>): AsyncGenerator<RunOutput>;
480
+ static from(func: AnyTraceableFunction): RunnableTraceable<unknown, unknown>;
481
+ }
466
482
  /**
467
483
  * A runnable that runs a callable.
468
484
  */
@@ -471,9 +487,10 @@ export declare class RunnableLambda<RunInput, RunOutput> extends Runnable<RunInp
471
487
  lc_namespace: string[];
472
488
  protected func: RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput>>;
473
489
  constructor(fields: {
474
- func: RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput>>;
490
+ func: RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput>> | TraceableFunction<RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput>>>;
475
491
  });
476
492
  static from<RunInput, RunOutput>(func: RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput>>): RunnableLambda<RunInput, RunOutput>;
493
+ static from<RunInput, RunOutput>(func: TraceableFunction<RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput>>>): RunnableLambda<RunInput, RunOutput>;
477
494
  _invoke(input: RunInput, config?: Partial<RunnableConfig>, runManager?: CallbackManagerForChainRun): Promise<RunOutput>;
478
495
  invoke(input: RunInput, options?: Partial<RunnableConfig>): Promise<RunOutput>;
479
496
  _transform(generator: AsyncGenerator<RunInput>, runManager?: CallbackManagerForChainRun, config?: Partial<RunnableConfig>): AsyncGenerator<RunOutput>;