@langchain/core 0.2.2 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -601,7 +601,8 @@ class CallbackManager extends BaseCallbackManager {
601
601
  }
602
602
  const verboseEnabled = (0, env_js_1.getEnvironmentVariable)("LANGCHAIN_VERBOSE") === "true" ||
603
603
  options?.verbose;
604
- const tracingV2Enabled = (0, env_js_1.getEnvironmentVariable)("LANGCHAIN_TRACING_V2") === "true";
604
+ const tracingV2Enabled = (0, env_js_1.getEnvironmentVariable)("LANGCHAIN_TRACING_V2") === "true" ||
605
+ (0, env_js_1.getEnvironmentVariable)("LANGSMITH_TRACING") === "true";
605
606
  const tracingEnabled = tracingV2Enabled ||
606
607
  ((0, env_js_1.getEnvironmentVariable)("LANGCHAIN_TRACING") ?? false);
607
608
  if (verboseEnabled || tracingEnabled) {
@@ -592,7 +592,8 @@ export class CallbackManager extends BaseCallbackManager {
592
592
  }
593
593
  const verboseEnabled = getEnvironmentVariable("LANGCHAIN_VERBOSE") === "true" ||
594
594
  options?.verbose;
595
- const tracingV2Enabled = getEnvironmentVariable("LANGCHAIN_TRACING_V2") === "true";
595
+ const tracingV2Enabled = getEnvironmentVariable("LANGCHAIN_TRACING_V2") === "true" ||
596
+ getEnvironmentVariable("LANGSMITH_TRACING") === "true";
596
597
  const tracingEnabled = tracingV2Enabled ||
597
598
  (getEnvironmentVariable("LANGCHAIN_TRACING") ?? false);
598
599
  if (verboseEnabled || tracingEnabled) {
@@ -5,6 +5,9 @@ const index_js_1 = require("../messages/index.cjs");
5
5
  const outputs_js_1 = require("../outputs.cjs");
6
6
  const base_js_1 = require("./base.cjs");
7
7
  const manager_js_1 = require("../callbacks/manager.cjs");
8
+ const event_stream_js_1 = require("../tracers/event_stream.cjs");
9
+ const log_stream_js_1 = require("../tracers/log_stream.cjs");
10
+ const stream_js_1 = require("../utils/stream.cjs");
8
11
  /**
9
12
  * Creates a transform stream for encoding chat message chunks.
10
13
  * @deprecated Use {@link BytesOutputParser} instead
@@ -128,39 +131,76 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
128
131
  batch_size: 1,
129
132
  };
130
133
  const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), baseMessages, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions.runName);
131
- // generate results
132
- const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
133
- // handle results
134
134
  const generations = [];
135
135
  const llmOutputs = [];
136
- await Promise.all(results.map(async (pResult, i) => {
137
- if (pResult.status === "fulfilled") {
138
- const result = pResult.value;
139
- for (const generation of result.generations) {
140
- generation.message.response_metadata = {
141
- ...generation.generationInfo,
142
- ...generation.message.response_metadata,
143
- };
136
+ // Even if stream is not explicitly called, check if model is implicitly
137
+ // called from streamEvents() or streamLog() to get all streamed events.
138
+ // Bail out if _streamResponseChunks not overridden
139
+ const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => {
140
+ return (0, event_stream_js_1.isStreamEventsHandler)(handler) || (0, log_stream_js_1.isLogStreamHandler)(handler);
141
+ });
142
+ if (hasStreamingHandler &&
143
+ baseMessages.length === 1 &&
144
+ this._streamResponseChunks !==
145
+ BaseChatModel.prototype._streamResponseChunks) {
146
+ try {
147
+ const stream = await this._streamResponseChunks(baseMessages[0], parsedOptions, runManagers?.[0]);
148
+ let aggregated;
149
+ for await (const chunk of stream) {
150
+ if (aggregated === undefined) {
151
+ aggregated = chunk;
152
+ }
153
+ else {
154
+ aggregated = (0, stream_js_1.concat)(aggregated, chunk);
155
+ }
144
156
  }
145
- if (result.generations.length === 1) {
146
- result.generations[0].message.response_metadata = {
147
- ...result.llmOutput,
148
- ...result.generations[0].message.response_metadata,
149
- };
157
+ if (aggregated === undefined) {
158
+ throw new Error("Received empty response from chat model call.");
150
159
  }
151
- generations[i] = result.generations;
152
- llmOutputs[i] = result.llmOutput;
153
- return runManagers?.[i]?.handleLLMEnd({
154
- generations: [result.generations],
155
- llmOutput: result.llmOutput,
160
+ generations.push([aggregated]);
161
+ await runManagers?.[0].handleLLMEnd({
162
+ generations,
163
+ llmOutput: {},
156
164
  });
157
165
  }
158
- else {
159
- // status === "rejected"
160
- await runManagers?.[i]?.handleLLMError(pResult.reason);
161
- return Promise.reject(pResult.reason);
166
+ catch (e) {
167
+ await runManagers?.[0].handleLLMError(e);
168
+ throw e;
162
169
  }
163
- }));
170
+ }
171
+ else {
172
+ // generate results
173
+ const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
174
+ // handle results
175
+ await Promise.all(results.map(async (pResult, i) => {
176
+ if (pResult.status === "fulfilled") {
177
+ const result = pResult.value;
178
+ for (const generation of result.generations) {
179
+ generation.message.response_metadata = {
180
+ ...generation.generationInfo,
181
+ ...generation.message.response_metadata,
182
+ };
183
+ }
184
+ if (result.generations.length === 1) {
185
+ result.generations[0].message.response_metadata = {
186
+ ...result.llmOutput,
187
+ ...result.generations[0].message.response_metadata,
188
+ };
189
+ }
190
+ generations[i] = result.generations;
191
+ llmOutputs[i] = result.llmOutput;
192
+ return runManagers?.[i]?.handleLLMEnd({
193
+ generations: [result.generations],
194
+ llmOutput: result.llmOutput,
195
+ });
196
+ }
197
+ else {
198
+ // status === "rejected"
199
+ await runManagers?.[i]?.handleLLMError(pResult.reason);
200
+ return Promise.reject(pResult.reason);
201
+ }
202
+ }));
203
+ }
164
204
  // create combined output
165
205
  const output = {
166
206
  generations,
@@ -2,6 +2,9 @@ import { AIMessage, HumanMessage, coerceMessageLikeToMessage, } from "../message
2
2
  import { RUN_KEY, } from "../outputs.js";
3
3
  import { BaseLanguageModel, } from "./base.js";
4
4
  import { CallbackManager, } from "../callbacks/manager.js";
5
+ import { isStreamEventsHandler } from "../tracers/event_stream.js";
6
+ import { isLogStreamHandler } from "../tracers/log_stream.js";
7
+ import { concat } from "../utils/stream.js";
5
8
  /**
6
9
  * Creates a transform stream for encoding chat message chunks.
7
10
  * @deprecated Use {@link BytesOutputParser} instead
@@ -124,39 +127,76 @@ export class BaseChatModel extends BaseLanguageModel {
124
127
  batch_size: 1,
125
128
  };
126
129
  const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), baseMessages, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions.runName);
127
- // generate results
128
- const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
129
- // handle results
130
130
  const generations = [];
131
131
  const llmOutputs = [];
132
- await Promise.all(results.map(async (pResult, i) => {
133
- if (pResult.status === "fulfilled") {
134
- const result = pResult.value;
135
- for (const generation of result.generations) {
136
- generation.message.response_metadata = {
137
- ...generation.generationInfo,
138
- ...generation.message.response_metadata,
139
- };
132
+ // Even if stream is not explicitly called, check if model is implicitly
133
+ // called from streamEvents() or streamLog() to get all streamed events.
134
+ // Bail out if _streamResponseChunks not overridden
135
+ const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => {
136
+ return isStreamEventsHandler(handler) || isLogStreamHandler(handler);
137
+ });
138
+ if (hasStreamingHandler &&
139
+ baseMessages.length === 1 &&
140
+ this._streamResponseChunks !==
141
+ BaseChatModel.prototype._streamResponseChunks) {
142
+ try {
143
+ const stream = await this._streamResponseChunks(baseMessages[0], parsedOptions, runManagers?.[0]);
144
+ let aggregated;
145
+ for await (const chunk of stream) {
146
+ if (aggregated === undefined) {
147
+ aggregated = chunk;
148
+ }
149
+ else {
150
+ aggregated = concat(aggregated, chunk);
151
+ }
140
152
  }
141
- if (result.generations.length === 1) {
142
- result.generations[0].message.response_metadata = {
143
- ...result.llmOutput,
144
- ...result.generations[0].message.response_metadata,
145
- };
153
+ if (aggregated === undefined) {
154
+ throw new Error("Received empty response from chat model call.");
146
155
  }
147
- generations[i] = result.generations;
148
- llmOutputs[i] = result.llmOutput;
149
- return runManagers?.[i]?.handleLLMEnd({
150
- generations: [result.generations],
151
- llmOutput: result.llmOutput,
156
+ generations.push([aggregated]);
157
+ await runManagers?.[0].handleLLMEnd({
158
+ generations,
159
+ llmOutput: {},
152
160
  });
153
161
  }
154
- else {
155
- // status === "rejected"
156
- await runManagers?.[i]?.handleLLMError(pResult.reason);
157
- return Promise.reject(pResult.reason);
162
+ catch (e) {
163
+ await runManagers?.[0].handleLLMError(e);
164
+ throw e;
158
165
  }
159
- }));
166
+ }
167
+ else {
168
+ // generate results
169
+ const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
170
+ // handle results
171
+ await Promise.all(results.map(async (pResult, i) => {
172
+ if (pResult.status === "fulfilled") {
173
+ const result = pResult.value;
174
+ for (const generation of result.generations) {
175
+ generation.message.response_metadata = {
176
+ ...generation.generationInfo,
177
+ ...generation.message.response_metadata,
178
+ };
179
+ }
180
+ if (result.generations.length === 1) {
181
+ result.generations[0].message.response_metadata = {
182
+ ...result.llmOutput,
183
+ ...result.generations[0].message.response_metadata,
184
+ };
185
+ }
186
+ generations[i] = result.generations;
187
+ llmOutputs[i] = result.llmOutput;
188
+ return runManagers?.[i]?.handleLLMEnd({
189
+ generations: [result.generations],
190
+ llmOutput: result.llmOutput,
191
+ });
192
+ }
193
+ else {
194
+ // status === "rejected"
195
+ await runManagers?.[i]?.handleLLMError(pResult.reason);
196
+ return Promise.reject(pResult.reason);
197
+ }
198
+ }));
199
+ }
160
200
  // create combined output
161
201
  const output = {
162
202
  generations,
@@ -5,6 +5,9 @@ const index_js_1 = require("../messages/index.cjs");
5
5
  const outputs_js_1 = require("../outputs.cjs");
6
6
  const manager_js_1 = require("../callbacks/manager.cjs");
7
7
  const base_js_1 = require("./base.cjs");
8
+ const event_stream_js_1 = require("../tracers/event_stream.cjs");
9
+ const log_stream_js_1 = require("../tracers/log_stream.cjs");
10
+ const stream_js_1 = require("../utils/stream.cjs");
8
11
  /**
9
12
  * LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
10
13
  */
@@ -133,16 +136,49 @@ class BaseLLM extends base_js_1.BaseLanguageModel {
133
136
  batch_size: prompts.length,
134
137
  };
135
138
  const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions?.runName);
139
+ // Even if stream is not explicitly called, check if model is implicitly
140
+ // called from streamEvents() or streamLog() to get all streamed events.
141
+ // Bail out if _streamResponseChunks not overridden
142
+ const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => {
143
+ return (0, event_stream_js_1.isStreamEventsHandler)(handler) || (0, log_stream_js_1.isLogStreamHandler)(handler);
144
+ });
136
145
  let output;
137
- try {
138
- output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
146
+ if (hasStreamingHandler &&
147
+ prompts.length === 1 &&
148
+ this._streamResponseChunks !== BaseLLM.prototype._streamResponseChunks) {
149
+ try {
150
+ const stream = await this._streamResponseChunks(prompts[0], parsedOptions, runManagers?.[0]);
151
+ let aggregated;
152
+ for await (const chunk of stream) {
153
+ if (aggregated === undefined) {
154
+ aggregated = chunk;
155
+ }
156
+ else {
157
+ aggregated = (0, stream_js_1.concat)(aggregated, chunk);
158
+ }
159
+ }
160
+ if (aggregated === undefined) {
161
+ throw new Error("Received empty response from chat model call.");
162
+ }
163
+ output = { generations: [[aggregated]], llmOutput: {} };
164
+ await runManagers?.[0].handleLLMEnd(output);
165
+ }
166
+ catch (e) {
167
+ await runManagers?.[0].handleLLMError(e);
168
+ throw e;
169
+ }
139
170
  }
140
- catch (err) {
141
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
142
- throw err;
171
+ else {
172
+ try {
173
+ output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
174
+ }
175
+ catch (err) {
176
+ await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
177
+ throw err;
178
+ }
179
+ const flattenedOutputs = this._flattenLLMResult(output);
180
+ await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
143
181
  }
144
- const flattenedOutputs = this._flattenLLMResult(output);
145
- await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
146
182
  const runIds = runManagers?.map((manager) => manager.runId) || undefined;
147
183
  // This defines RUN_KEY as a non-enumerable property on the output object
148
184
  // so that it is not serialized when the output is stringified, and so that
@@ -2,6 +2,9 @@ import { AIMessage, getBufferString, } from "../messages/index.js";
2
2
  import { RUN_KEY, GenerationChunk, } from "../outputs.js";
3
3
  import { CallbackManager, } from "../callbacks/manager.js";
4
4
  import { BaseLanguageModel, } from "./base.js";
5
+ import { isStreamEventsHandler } from "../tracers/event_stream.js";
6
+ import { isLogStreamHandler } from "../tracers/log_stream.js";
7
+ import { concat } from "../utils/stream.js";
5
8
  /**
6
9
  * LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
7
10
  */
@@ -130,16 +133,49 @@ export class BaseLLM extends BaseLanguageModel {
130
133
  batch_size: prompts.length,
131
134
  };
132
135
  const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions?.runName);
136
+ // Even if stream is not explicitly called, check if model is implicitly
137
+ // called from streamEvents() or streamLog() to get all streamed events.
138
+ // Bail out if _streamResponseChunks not overridden
139
+ const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => {
140
+ return isStreamEventsHandler(handler) || isLogStreamHandler(handler);
141
+ });
133
142
  let output;
134
- try {
135
- output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
143
+ if (hasStreamingHandler &&
144
+ prompts.length === 1 &&
145
+ this._streamResponseChunks !== BaseLLM.prototype._streamResponseChunks) {
146
+ try {
147
+ const stream = await this._streamResponseChunks(prompts[0], parsedOptions, runManagers?.[0]);
148
+ let aggregated;
149
+ for await (const chunk of stream) {
150
+ if (aggregated === undefined) {
151
+ aggregated = chunk;
152
+ }
153
+ else {
154
+ aggregated = concat(aggregated, chunk);
155
+ }
156
+ }
157
+ if (aggregated === undefined) {
158
+ throw new Error("Received empty response from chat model call.");
159
+ }
160
+ output = { generations: [[aggregated]], llmOutput: {} };
161
+ await runManagers?.[0].handleLLMEnd(output);
162
+ }
163
+ catch (e) {
164
+ await runManagers?.[0].handleLLMError(e);
165
+ throw e;
166
+ }
136
167
  }
137
- catch (err) {
138
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
139
- throw err;
168
+ else {
169
+ try {
170
+ output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
171
+ }
172
+ catch (err) {
173
+ await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
174
+ throw err;
175
+ }
176
+ const flattenedOutputs = this._flattenLLMResult(output);
177
+ await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
140
178
  }
141
- const flattenedOutputs = this._flattenLLMResult(output);
142
- await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
143
179
  const runIds = runManagers?.map((manager) => manager.runId) || undefined;
144
180
  // This defines RUN_KEY as a non-enumerable property on the output object
145
181
  // so that it is not serialized when the output is stringified, and so that
@@ -287,14 +287,12 @@ class Runnable extends serializable_js_1.Serializable {
287
287
  const pipe = await (0, stream_js_1.pipeGeneratorWithSetup)(transformer.bind(this), wrapInputForTracing(), async () => callbackManager_?.handleChainStart(this.toJSON(), { input: "" }, config.runId, config.runType, undefined, undefined, config.runName ?? this.getName()), config);
288
288
  delete config.runId;
289
289
  runManager = pipe.setup;
290
- const isStreamEventsHandler = (handler) => handler.name === "event_stream_tracer";
291
- const streamEventsHandler = runManager?.handlers.find(isStreamEventsHandler);
290
+ const streamEventsHandler = runManager?.handlers.find(event_stream_js_1.isStreamEventsHandler);
292
291
  let iterator = pipe.output;
293
292
  if (streamEventsHandler !== undefined && runManager !== undefined) {
294
293
  iterator = streamEventsHandler.tapOutputIterable(runManager.runId, iterator);
295
294
  }
296
- const isLogStreamHandler = (handler) => handler.name === "log_stream_tracer";
297
- const streamLogHandler = runManager?.handlers.find(isLogStreamHandler);
295
+ const streamLogHandler = runManager?.handlers.find(log_stream_js_1.isLogStreamHandler);
298
296
  if (streamLogHandler !== undefined && runManager !== undefined) {
299
297
  iterator = streamLogHandler.tapOutputIterable(runManager.runId, iterator);
300
298
  }
@@ -2,8 +2,8 @@ import { z } from "zod";
2
2
  import pRetry from "p-retry";
3
3
  import { v4 as uuidv4 } from "uuid";
4
4
  import { CallbackManager, } from "../callbacks/manager.js";
5
- import { LogStreamCallbackHandler, RunLog, RunLogPatch, } from "../tracers/log_stream.js";
6
- import { EventStreamCallbackHandler, } from "../tracers/event_stream.js";
5
+ import { LogStreamCallbackHandler, RunLog, RunLogPatch, isLogStreamHandler, } from "../tracers/log_stream.js";
6
+ import { EventStreamCallbackHandler, isStreamEventsHandler, } from "../tracers/event_stream.js";
7
7
  import { Serializable } from "../load/serializable.js";
8
8
  import { IterableReadableStream, concat, atee, pipeGeneratorWithSetup, AsyncGeneratorWithSetup, } from "../utils/stream.js";
9
9
  import { DEFAULT_RECURSION_LIMIT, ensureConfig, getCallbackManagerForConfig, mergeConfigs, patchConfig, } from "./config.js";
@@ -280,13 +280,11 @@ export class Runnable extends Serializable {
280
280
  const pipe = await pipeGeneratorWithSetup(transformer.bind(this), wrapInputForTracing(), async () => callbackManager_?.handleChainStart(this.toJSON(), { input: "" }, config.runId, config.runType, undefined, undefined, config.runName ?? this.getName()), config);
281
281
  delete config.runId;
282
282
  runManager = pipe.setup;
283
- const isStreamEventsHandler = (handler) => handler.name === "event_stream_tracer";
284
283
  const streamEventsHandler = runManager?.handlers.find(isStreamEventsHandler);
285
284
  let iterator = pipe.output;
286
285
  if (streamEventsHandler !== undefined && runManager !== undefined) {
287
286
  iterator = streamEventsHandler.tapOutputIterable(runManager.runId, iterator);
288
287
  }
289
- const isLogStreamHandler = (handler) => handler.name === "log_stream_tracer";
290
288
  const streamLogHandler = runManager?.handlers.find(isLogStreamHandler);
291
289
  if (streamLogHandler !== undefined && runManager !== undefined) {
292
290
  iterator = streamLogHandler.tapOutputIterable(runManager.runId, iterator);