@langchain/core 0.1.1 → 0.1.2-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -146,6 +146,68 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
146
146
  });
147
147
  return output;
148
148
  }
149
+ async _generateCached({ messages, cache, llmStringKey, parsedOptions, handledOptions, }) {
150
+ const baseMessages = messages.map((messageList) => messageList.map(index_js_1.coerceMessageLikeToMessage));
151
+ // create callback manager and start run
152
+ const callbackManager_ = await manager_js_1.CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
153
+ const extra = {
154
+ options: parsedOptions,
155
+ invocation_params: this?.invocationParams(parsedOptions),
156
+ batch_size: 1,
157
+ cached: true,
158
+ };
159
+ const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), baseMessages, undefined, undefined, extra, undefined, undefined, handledOptions.runName);
160
+ // generate results
161
+ const missingPromptIndices = [];
162
+ const results = await Promise.allSettled(baseMessages.map(async (baseMessage, index) => {
163
+ // Join all content into one string for the prompt index
164
+ const prompt = BaseChatModel._convertInputToPromptValue(baseMessage).toString();
165
+ const result = await cache.lookup(prompt, llmStringKey);
166
+ if (result == null) {
167
+ missingPromptIndices.push(index);
168
+ }
169
+ return result;
170
+ }));
171
+ // Map run managers to the results before filtering out null results
172
+ // Null results are just absent from the cache.
173
+ const cachedResults = results
174
+ .map((result, index) => ({ result, runManager: runManagers?.[index] }))
175
+ .filter(({ result }) => (result.status === "fulfilled" && result.value != null) ||
176
+ result.status === "rejected");
177
+ // Handle results and call run managers
178
+ const generations = [];
179
+ await Promise.all(cachedResults.map(async ({ result: promiseResult, runManager }, i) => {
180
+ if (promiseResult.status === "fulfilled") {
181
+ const result = promiseResult.value;
182
+ generations[i] = result;
183
+ if (result.length) {
184
+ await runManager?.handleLLMNewToken(result[0].text);
185
+ }
186
+ return runManager?.handleLLMEnd({
187
+ generations: [result],
188
+ });
189
+ }
190
+ else {
191
+ // status === "rejected"
192
+ await runManager?.handleLLMError(promiseResult.reason);
193
+ return Promise.reject(promiseResult.reason);
194
+ }
195
+ }));
196
+ const output = {
197
+ generations,
198
+ missingPromptIndices,
199
+ };
200
+ // This defines RUN_KEY as a non-enumerable property on the output object
201
+ // so that it is not serialized when the output is stringified, and so that
202
+ // it isnt included when listing the keys of the output object.
203
+ Object.defineProperty(output, outputs_js_1.RUN_KEY, {
204
+ value: runManagers
205
+ ? { runIds: runManagers?.map((manager) => manager.runId) }
206
+ : undefined,
207
+ configurable: true,
208
+ });
209
+ return output;
210
+ }
149
211
  /**
150
212
  * Generates chat based on the input messages.
151
213
  * @param messages An array of arrays of BaseMessage instances.
@@ -170,16 +232,13 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
170
232
  }
171
233
  const { cache } = this;
172
234
  const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
173
- const missingPromptIndices = [];
174
- const generations = await Promise.all(baseMessages.map(async (baseMessage, index) => {
175
- // Join all content into one string for the prompt index
176
- const prompt = BaseChatModel._convertInputToPromptValue(baseMessage).toString();
177
- const result = await cache.lookup(prompt, llmStringKey);
178
- if (!result) {
179
- missingPromptIndices.push(index);
180
- }
181
- return result;
182
- }));
235
+ const { generations, missingPromptIndices } = await this._generateCached({
236
+ messages: baseMessages,
237
+ cache,
238
+ llmStringKey,
239
+ parsedOptions: callOptions,
240
+ handledOptions: runnableConfig,
241
+ });
183
242
  let llmOutput = {};
184
243
  if (missingPromptIndices.length > 0) {
185
244
  const results = await this._generateUncached(missingPromptIndices.map((i) => baseMessages[i]), callOptions, runnableConfig);
@@ -1,9 +1,10 @@
1
- import { BaseMessage, BaseMessageChunk, BaseMessageLike } from "../messages/index.js";
1
+ import { type BaseMessage, BaseMessageChunk, type BaseMessageLike } from "../messages/index.js";
2
2
  import { BasePromptValue } from "../prompt_values.js";
3
- import { LLMResult, ChatGenerationChunk, ChatResult } from "../outputs.js";
3
+ import { LLMResult, ChatGenerationChunk, type ChatResult, type Generation } from "../outputs.js";
4
4
  import { BaseLanguageModel, type BaseLanguageModelCallOptions, type BaseLanguageModelInput, type BaseLanguageModelParams } from "./base.js";
5
- import { CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
6
- import { RunnableConfig } from "../runnables/config.js";
5
+ import { type CallbackManagerForLLMRun, type Callbacks } from "../callbacks/manager.js";
6
+ import type { RunnableConfig } from "../runnables/config.js";
7
+ import type { BaseCache } from "../caches.js";
7
8
  /**
8
9
  * Represents a serialized chat model.
9
10
  */
@@ -32,6 +33,13 @@ export type BaseChatModelCallOptions = BaseLanguageModelCallOptions;
32
33
  * @returns A TransformStream instance that encodes chat message chunks.
33
34
  */
34
35
  export declare function createChatMessageChunkEncoderStream(): TransformStream<BaseMessageChunk, any>;
36
+ interface ChatModelGenerateCachedParameters<T extends BaseChatModel<CallOptions>, CallOptions extends BaseChatModelCallOptions = BaseChatModelCallOptions> {
37
+ messages: BaseMessageLike[][];
38
+ cache: BaseCache<Generation[]>;
39
+ llmStringKey: string;
40
+ parsedOptions: T["ParsedCallOptions"];
41
+ handledOptions: RunnableConfig;
42
+ }
35
43
  /**
36
44
  * Base class for chat models. It extends the BaseLanguageModel class and
37
45
  * provides methods for generating chat based on input messages.
@@ -53,6 +61,9 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
53
61
  _streamIterator(input: BaseLanguageModelInput, options?: CallOptions): AsyncGenerator<BaseMessageChunk>;
54
62
  /** @ignore */
55
63
  _generateUncached(messages: BaseMessageLike[][], parsedOptions: this["ParsedCallOptions"], handledOptions: RunnableConfig): Promise<LLMResult>;
64
+ _generateCached({ messages, cache, llmStringKey, parsedOptions, handledOptions, }: ChatModelGenerateCachedParameters<typeof this>): Promise<LLMResult & {
65
+ missingPromptIndices: number[];
66
+ }>;
56
67
  /**
57
68
  * Generates chat based on the input messages.
58
69
  * @param messages An array of arrays of BaseMessage instances.
@@ -122,3 +133,4 @@ export declare abstract class SimpleChatModel<CallOptions extends BaseChatModelC
122
133
  abstract _call(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
123
134
  _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
124
135
  }
136
+ export {};
@@ -142,6 +142,68 @@ export class BaseChatModel extends BaseLanguageModel {
142
142
  });
143
143
  return output;
144
144
  }
145
+ async _generateCached({ messages, cache, llmStringKey, parsedOptions, handledOptions, }) {
146
+ const baseMessages = messages.map((messageList) => messageList.map(coerceMessageLikeToMessage));
147
+ // create callback manager and start run
148
+ const callbackManager_ = await CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
149
+ const extra = {
150
+ options: parsedOptions,
151
+ invocation_params: this?.invocationParams(parsedOptions),
152
+ batch_size: 1,
153
+ cached: true,
154
+ };
155
+ const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), baseMessages, undefined, undefined, extra, undefined, undefined, handledOptions.runName);
156
+ // generate results
157
+ const missingPromptIndices = [];
158
+ const results = await Promise.allSettled(baseMessages.map(async (baseMessage, index) => {
159
+ // Join all content into one string for the prompt index
160
+ const prompt = BaseChatModel._convertInputToPromptValue(baseMessage).toString();
161
+ const result = await cache.lookup(prompt, llmStringKey);
162
+ if (result == null) {
163
+ missingPromptIndices.push(index);
164
+ }
165
+ return result;
166
+ }));
167
+ // Map run managers to the results before filtering out null results
168
+ // Null results are just absent from the cache.
169
+ const cachedResults = results
170
+ .map((result, index) => ({ result, runManager: runManagers?.[index] }))
171
+ .filter(({ result }) => (result.status === "fulfilled" && result.value != null) ||
172
+ result.status === "rejected");
173
+ // Handle results and call run managers
174
+ const generations = [];
175
+ await Promise.all(cachedResults.map(async ({ result: promiseResult, runManager }, i) => {
176
+ if (promiseResult.status === "fulfilled") {
177
+ const result = promiseResult.value;
178
+ generations[i] = result;
179
+ if (result.length) {
180
+ await runManager?.handleLLMNewToken(result[0].text);
181
+ }
182
+ return runManager?.handleLLMEnd({
183
+ generations: [result],
184
+ });
185
+ }
186
+ else {
187
+ // status === "rejected"
188
+ await runManager?.handleLLMError(promiseResult.reason);
189
+ return Promise.reject(promiseResult.reason);
190
+ }
191
+ }));
192
+ const output = {
193
+ generations,
194
+ missingPromptIndices,
195
+ };
196
+ // This defines RUN_KEY as a non-enumerable property on the output object
197
+ // so that it is not serialized when the output is stringified, and so that
198
+ // it isnt included when listing the keys of the output object.
199
+ Object.defineProperty(output, RUN_KEY, {
200
+ value: runManagers
201
+ ? { runIds: runManagers?.map((manager) => manager.runId) }
202
+ : undefined,
203
+ configurable: true,
204
+ });
205
+ return output;
206
+ }
145
207
  /**
146
208
  * Generates chat based on the input messages.
147
209
  * @param messages An array of arrays of BaseMessage instances.
@@ -166,16 +228,13 @@ export class BaseChatModel extends BaseLanguageModel {
166
228
  }
167
229
  const { cache } = this;
168
230
  const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
169
- const missingPromptIndices = [];
170
- const generations = await Promise.all(baseMessages.map(async (baseMessage, index) => {
171
- // Join all content into one string for the prompt index
172
- const prompt = BaseChatModel._convertInputToPromptValue(baseMessage).toString();
173
- const result = await cache.lookup(prompt, llmStringKey);
174
- if (!result) {
175
- missingPromptIndices.push(index);
176
- }
177
- return result;
178
- }));
231
+ const { generations, missingPromptIndices } = await this._generateCached({
232
+ messages: baseMessages,
233
+ cache,
234
+ llmStringKey,
235
+ parsedOptions: callOptions,
236
+ handledOptions: runnableConfig,
237
+ });
179
238
  let llmOutput = {};
180
239
  if (missingPromptIndices.length > 0) {
181
240
  const results = await this._generateUncached(missingPromptIndices.map((i) => baseMessages[i]), callOptions, runnableConfig);
@@ -153,6 +153,64 @@ class BaseLLM extends base_js_1.BaseLanguageModel {
153
153
  });
154
154
  return output;
155
155
  }
156
+ async _generateCached({ prompts, cache, llmStringKey, parsedOptions, handledOptions, }) {
157
+ const callbackManager_ = await manager_js_1.CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
158
+ const extra = {
159
+ options: parsedOptions,
160
+ invocation_params: this?.invocationParams(parsedOptions),
161
+ batch_size: prompts.length,
162
+ cached: true,
163
+ };
164
+ const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, undefined, undefined, extra, undefined, undefined, handledOptions?.runName);
165
+ // generate results
166
+ const missingPromptIndices = [];
167
+ const results = await Promise.allSettled(prompts.map(async (prompt, index) => {
168
+ const result = await cache.lookup(prompt, llmStringKey);
169
+ if (result == null) {
170
+ missingPromptIndices.push(index);
171
+ }
172
+ return result;
173
+ }));
174
+ // Map run managers to the results before filtering out null results
175
+ // Null results are just absent from the cache.
176
+ const cachedResults = results
177
+ .map((result, index) => ({ result, runManager: runManagers?.[index] }))
178
+ .filter(({ result }) => (result.status === "fulfilled" && result.value != null) ||
179
+ result.status === "rejected");
180
+ // Handle results and call run managers
181
+ const generations = [];
182
+ await Promise.all(cachedResults.map(async ({ result: promiseResult, runManager }, i) => {
183
+ if (promiseResult.status === "fulfilled") {
184
+ const result = promiseResult.value;
185
+ generations[i] = result;
186
+ if (result.length) {
187
+ await runManager?.handleLLMNewToken(result[0].text);
188
+ }
189
+ return runManager?.handleLLMEnd({
190
+ generations: [result],
191
+ });
192
+ }
193
+ else {
194
+ // status === "rejected"
195
+ await runManager?.handleLLMError(promiseResult.reason);
196
+ return Promise.reject(promiseResult.reason);
197
+ }
198
+ }));
199
+ const output = {
200
+ generations,
201
+ missingPromptIndices,
202
+ };
203
+ // This defines RUN_KEY as a non-enumerable property on the output object
204
+ // so that it is not serialized when the output is stringified, and so that
205
+ // it isnt included when listing the keys of the output object.
206
+ Object.defineProperty(output, outputs_js_1.RUN_KEY, {
207
+ value: runManagers
208
+ ? { runIds: runManagers?.map((manager) => manager.runId) }
209
+ : undefined,
210
+ configurable: true,
211
+ });
212
+ return output;
213
+ }
156
214
  /**
157
215
  * Run the LLM on the given prompts and input, handling caching.
158
216
  */
@@ -174,14 +232,13 @@ class BaseLLM extends base_js_1.BaseLanguageModel {
174
232
  }
175
233
  const { cache } = this;
176
234
  const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
177
- const missingPromptIndices = [];
178
- const generations = await Promise.all(prompts.map(async (prompt, index) => {
179
- const result = await cache.lookup(prompt, llmStringKey);
180
- if (!result) {
181
- missingPromptIndices.push(index);
182
- }
183
- return result;
184
- }));
235
+ const { generations, missingPromptIndices } = await this._generateCached({
236
+ prompts,
237
+ cache,
238
+ llmStringKey,
239
+ parsedOptions: callOptions,
240
+ handledOptions: runnableConfig,
241
+ });
185
242
  let llmOutput = {};
186
243
  if (missingPromptIndices.length > 0) {
187
244
  const results = await this._generateUncached(missingPromptIndices.map((i) => prompts[i]), callOptions, runnableConfig);
@@ -1,9 +1,10 @@
1
- import { BaseMessage } from "../messages/index.js";
2
- import { BasePromptValue } from "../prompt_values.js";
3
- import { LLMResult, GenerationChunk } from "../outputs.js";
4
- import { BaseCallbackConfig, CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
1
+ import { type BaseMessage } from "../messages/index.js";
2
+ import type { BasePromptValue } from "../prompt_values.js";
3
+ import { type LLMResult, type Generation, GenerationChunk } from "../outputs.js";
4
+ import { type BaseCallbackConfig, type CallbackManagerForLLMRun, type Callbacks } from "../callbacks/manager.js";
5
5
  import { BaseLanguageModel, type BaseLanguageModelCallOptions, type BaseLanguageModelInput, type BaseLanguageModelParams } from "./base.js";
6
- import { RunnableConfig } from "../runnables/config.js";
6
+ import type { RunnableConfig } from "../runnables/config.js";
7
+ import type { BaseCache } from "../caches.js";
7
8
  export type SerializedLLM = {
8
9
  _model: string;
9
10
  _type: string;
@@ -16,6 +17,13 @@ export interface BaseLLMParams extends BaseLanguageModelParams {
16
17
  }
17
18
  export interface BaseLLMCallOptions extends BaseLanguageModelCallOptions {
18
19
  }
20
+ interface LLMGenerateCachedParameters<T extends BaseLLM<CallOptions>, CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> {
21
+ prompts: string[];
22
+ cache: BaseCache<Generation[]>;
23
+ llmStringKey: string;
24
+ parsedOptions: T["ParsedCallOptions"];
25
+ handledOptions: RunnableConfig;
26
+ }
19
27
  /**
20
28
  * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
21
29
  */
@@ -55,6 +63,9 @@ export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = B
55
63
  _flattenLLMResult(llmResult: LLMResult): LLMResult[];
56
64
  /** @ignore */
57
65
  _generateUncached(prompts: string[], parsedOptions: this["ParsedCallOptions"], handledOptions: BaseCallbackConfig): Promise<LLMResult>;
66
+ _generateCached({ prompts, cache, llmStringKey, parsedOptions, handledOptions, }: LLMGenerateCachedParameters<typeof this>): Promise<LLMResult & {
67
+ missingPromptIndices: number[];
68
+ }>;
58
69
  /**
59
70
  * Run the LLM on the given prompts and input, handling caching.
60
71
  */
@@ -110,3 +121,4 @@ export declare abstract class LLM<CallOptions extends BaseLLMCallOptions = BaseL
110
121
  abstract _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
111
122
  _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
112
123
  }
124
+ export {};
@@ -1,5 +1,5 @@
1
- import { AIMessage, getBufferString } from "../messages/index.js";
2
- import { RUN_KEY, GenerationChunk } from "../outputs.js";
1
+ import { AIMessage, getBufferString, } from "../messages/index.js";
2
+ import { RUN_KEY, GenerationChunk, } from "../outputs.js";
3
3
  import { CallbackManager, } from "../callbacks/manager.js";
4
4
  import { BaseLanguageModel, } from "./base.js";
5
5
  /**
@@ -150,6 +150,64 @@ export class BaseLLM extends BaseLanguageModel {
150
150
  });
151
151
  return output;
152
152
  }
153
+ async _generateCached({ prompts, cache, llmStringKey, parsedOptions, handledOptions, }) {
154
+ const callbackManager_ = await CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
155
+ const extra = {
156
+ options: parsedOptions,
157
+ invocation_params: this?.invocationParams(parsedOptions),
158
+ batch_size: prompts.length,
159
+ cached: true,
160
+ };
161
+ const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, undefined, undefined, extra, undefined, undefined, handledOptions?.runName);
162
+ // generate results
163
+ const missingPromptIndices = [];
164
+ const results = await Promise.allSettled(prompts.map(async (prompt, index) => {
165
+ const result = await cache.lookup(prompt, llmStringKey);
166
+ if (result == null) {
167
+ missingPromptIndices.push(index);
168
+ }
169
+ return result;
170
+ }));
171
+ // Map run managers to the results before filtering out null results
172
+ // Null results are just absent from the cache.
173
+ const cachedResults = results
174
+ .map((result, index) => ({ result, runManager: runManagers?.[index] }))
175
+ .filter(({ result }) => (result.status === "fulfilled" && result.value != null) ||
176
+ result.status === "rejected");
177
+ // Handle results and call run managers
178
+ const generations = [];
179
+ await Promise.all(cachedResults.map(async ({ result: promiseResult, runManager }, i) => {
180
+ if (promiseResult.status === "fulfilled") {
181
+ const result = promiseResult.value;
182
+ generations[i] = result;
183
+ if (result.length) {
184
+ await runManager?.handleLLMNewToken(result[0].text);
185
+ }
186
+ return runManager?.handleLLMEnd({
187
+ generations: [result],
188
+ });
189
+ }
190
+ else {
191
+ // status === "rejected"
192
+ await runManager?.handleLLMError(promiseResult.reason);
193
+ return Promise.reject(promiseResult.reason);
194
+ }
195
+ }));
196
+ const output = {
197
+ generations,
198
+ missingPromptIndices,
199
+ };
200
+ // This defines RUN_KEY as a non-enumerable property on the output object
201
+ // so that it is not serialized when the output is stringified, and so that
202
+ // it isnt included when listing the keys of the output object.
203
+ Object.defineProperty(output, RUN_KEY, {
204
+ value: runManagers
205
+ ? { runIds: runManagers?.map((manager) => manager.runId) }
206
+ : undefined,
207
+ configurable: true,
208
+ });
209
+ return output;
210
+ }
153
211
  /**
154
212
  * Run the LLM on the given prompts and input, handling caching.
155
213
  */
@@ -171,14 +229,13 @@ export class BaseLLM extends BaseLanguageModel {
171
229
  }
172
230
  const { cache } = this;
173
231
  const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
174
- const missingPromptIndices = [];
175
- const generations = await Promise.all(prompts.map(async (prompt, index) => {
176
- const result = await cache.lookup(prompt, llmStringKey);
177
- if (!result) {
178
- missingPromptIndices.push(index);
179
- }
180
- return result;
181
- }));
232
+ const { generations, missingPromptIndices } = await this._generateCached({
233
+ prompts,
234
+ cache,
235
+ llmStringKey,
236
+ parsedOptions: callOptions,
237
+ handledOptions: runnableConfig,
238
+ });
182
239
  let llmOutput = {};
183
240
  if (missingPromptIndices.length > 0) {
184
241
  const results = await this._generateUncached(missingPromptIndices.map((i) => prompts[i]), callOptions, runnableConfig);
@@ -1,9 +1,28 @@
1
1
  import { CallbackManager, CallbackManagerForChainRun, BaseCallbackConfig } from "../callbacks/manager.js";
2
2
  import { LogStreamCallbackHandlerInput, RunLogPatch } from "../tracers/log_stream.js";
3
3
  import { Serializable } from "../load/serializable.js";
4
- import { IterableReadableStream } from "../utils/stream.js";
4
+ import { IterableReadableStream, type IterableReadableStreamInterface } from "../utils/stream.js";
5
5
  import { RunnableConfig } from "./config.js";
6
6
  import { Run } from "../tracers/base.js";
7
+ /**
8
+ * Base interface implemented by all runnables.
9
+ * Used for cross-compatibility between different versions of LangChain core.
10
+ *
11
+ * Should not change on patch releases.
12
+ */
13
+ export interface RunnableInterface<RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig> {
14
+ invoke(input: RunInput, options?: Partial<CallOptions>): Promise<RunOutput>;
15
+ batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {
16
+ returnExceptions?: false;
17
+ }): Promise<RunOutput[]>;
18
+ batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {
19
+ returnExceptions: true;
20
+ }): Promise<(RunOutput | Error)[]>;
21
+ batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>;
22
+ batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>;
23
+ stream(input: RunInput, options?: Partial<CallOptions>): Promise<IterableReadableStreamInterface<RunOutput>>;
24
+ transform(generator: AsyncGenerator<RunInput>, options: Partial<CallOptions>): AsyncGenerator<RunOutput>;
25
+ }
7
26
  export type RunnableFunc<RunInput, RunOutput> = (input: RunInput, options?: {
8
27
  config?: RunnableConfig;
9
28
  } | Record<string, any> | (Record<string, any> & {
@@ -12,7 +31,7 @@ export type RunnableFunc<RunInput, RunOutput> = (input: RunInput, options?: {
12
31
  export type RunnableMapLike<RunInput, RunOutput> = {
13
32
  [K in keyof RunOutput]: RunnableLike<RunInput, RunOutput[K]>;
14
33
  };
15
- export type RunnableLike<RunInput = any, RunOutput = any> = Runnable<RunInput, RunOutput> | RunnableFunc<RunInput, RunOutput> | RunnableMapLike<RunInput, RunOutput>;
34
+ export type RunnableLike<RunInput = any, RunOutput = any> = RunnableInterface<RunInput, RunOutput> | RunnableFunc<RunInput, RunOutput> | RunnableMapLike<RunInput, RunOutput>;
16
35
  export type RunnableBatchOptions = {
17
36
  maxConcurrency?: number;
18
37
  returnExceptions?: boolean;
@@ -22,7 +41,7 @@ export type RunnableRetryFailedAttemptHandler = (error: any) => any;
22
41
  * A Runnable is a generic unit of work that can be invoked, batched, streamed, and/or
23
42
  * transformed.
24
43
  */
25
- export declare abstract class Runnable<RunInput = any, RunOutput = any, CallOptions extends RunnableConfig = RunnableConfig> extends Serializable {
44
+ export declare abstract class Runnable<RunInput = any, RunOutput = any, CallOptions extends RunnableConfig = RunnableConfig> extends Serializable implements RunnableInterface<RunInput, RunOutput, CallOptions> {
26
45
  protected lc_runnable: boolean;
27
46
  abstract invoke(input: RunInput, options?: Partial<CallOptions>): Promise<RunOutput>;
28
47
  /**
@@ -2,7 +2,7 @@ import pRetry from "p-retry";
2
2
  import { CallbackManager, } from "../callbacks/manager.js";
3
3
  import { LogStreamCallbackHandler, RunLogPatch, } from "../tracers/log_stream.js";
4
4
  import { Serializable } from "../load/serializable.js";
5
- import { IterableReadableStream } from "../utils/stream.js";
5
+ import { IterableReadableStream, } from "../utils/stream.js";
6
6
  import { getCallbackMangerForConfig, mergeConfigs, } from "./config.js";
7
7
  import { AsyncCaller } from "../utils/async_caller.js";
8
8
  import { RootListenersTracer } from "../tracers/root_listener.js";
@@ -46,6 +46,16 @@ class IterableReadableStream extends ReadableStream {
46
46
  }
47
47
  return { done: true, value: undefined }; // This cast fixes TS typing, and convention is to ignore final chunk value anyway
48
48
  }
49
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
50
+ async throw(e) {
51
+ this.ensureReader();
52
+ if (this.locked) {
53
+ const cancelPromise = this.reader.cancel(); // cancel first, but don't await yet
54
+ this.reader.releaseLock(); // release lock first
55
+ await cancelPromise; // now await it
56
+ }
57
+ throw e;
58
+ }
49
59
  [Symbol.asyncIterator]() {
50
60
  return this;
51
61
  }
@@ -1,4 +1,6 @@
1
- export declare class IterableReadableStream<T> extends ReadableStream<T> {
1
+ export interface IterableReadableStreamInterface<T> extends ReadableStream<T>, AsyncGenerator<T> {
2
+ }
3
+ export declare class IterableReadableStream<T> extends ReadableStream<T> implements IterableReadableStreamInterface<T> {
2
4
  reader: ReadableStreamDefaultReader<T>;
3
5
  ensureReader(): void;
4
6
  next(): Promise<{
@@ -9,6 +11,7 @@ export declare class IterableReadableStream<T> extends ReadableStream<T> {
9
11
  done: boolean;
10
12
  value: T;
11
13
  }>;
14
+ throw(e: any): Promise<IteratorResult<T>>;
12
15
  [Symbol.asyncIterator](): this;
13
16
  static fromReadableStream<T>(stream: ReadableStream<T>): IterableReadableStream<T>;
14
17
  static fromAsyncGenerator<T>(generator: AsyncGenerator<T>): IterableReadableStream<T>;
@@ -43,6 +43,16 @@ export class IterableReadableStream extends ReadableStream {
43
43
  }
44
44
  return { done: true, value: undefined }; // This cast fixes TS typing, and convention is to ignore final chunk value anyway
45
45
  }
46
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
47
+ async throw(e) {
48
+ this.ensureReader();
49
+ if (this.locked) {
50
+ const cancelPromise = this.reader.cancel(); // cancel first, but don't await yet
51
+ this.reader.releaseLock(); // release lock first
52
+ await cancelPromise; // now await it
53
+ }
54
+ throw e;
55
+ }
46
56
  [Symbol.asyncIterator]() {
47
57
  return this;
48
58
  }
@@ -61,7 +61,7 @@ class FakeRunnable extends base_js_2.Runnable {
61
61
  exports.FakeRunnable = FakeRunnable;
62
62
  class FakeLLM extends llms_js_1.LLM {
63
63
  constructor(fields) {
64
- super({});
64
+ super(fields);
65
65
  Object.defineProperty(this, "response", {
66
66
  enumerable: true,
67
67
  configurable: true,
@@ -80,11 +80,13 @@ class FakeLLM extends llms_js_1.LLM {
80
80
  _llmType() {
81
81
  return "fake";
82
82
  }
83
- async _call(prompt) {
83
+ async _call(prompt, _options, runManager) {
84
84
  if (this.thrownErrorString) {
85
85
  throw new Error(this.thrownErrorString);
86
86
  }
87
- return this.response ?? prompt;
87
+ const response = this.response ?? prompt;
88
+ await runManager?.handleLLMNewToken(response);
89
+ return response;
88
90
  }
89
91
  }
90
92
  exports.FakeLLM = FakeLLM;
@@ -110,7 +112,7 @@ class FakeChatModel extends chat_models_js_1.BaseChatModel {
110
112
  _llmType() {
111
113
  return "fake";
112
114
  }
113
- async _generate(messages, options) {
115
+ async _generate(messages, options, runManager) {
114
116
  if (options?.stop?.length) {
115
117
  return {
116
118
  generations: [
@@ -122,6 +124,7 @@ class FakeChatModel extends chat_models_js_1.BaseChatModel {
122
124
  };
123
125
  }
124
126
  const text = messages.map((m) => m.content).join("\n");
127
+ await runManager?.handleLLMNewToken(text);
125
128
  return {
126
129
  generations: [
127
130
  {
@@ -2,7 +2,7 @@ import { BaseCallbackConfig, CallbackManagerForLLMRun } from "../../callbacks/ma
2
2
  import { BaseChatMessageHistory, BaseListChatMessageHistory } from "../../chat_history.js";
3
3
  import { Document } from "../../documents/document.js";
4
4
  import { BaseChatModel, BaseChatModelParams } from "../../language_models/chat_models.js";
5
- import { LLM } from "../../language_models/llms.js";
5
+ import { BaseLLMParams, LLM } from "../../language_models/llms.js";
6
6
  import { BaseMessage, AIMessage } from "../../messages/index.js";
7
7
  import { BaseOutputParser } from "../../output_parsers/base.js";
8
8
  import { GenerationChunk, type ChatResult, ChatGenerationChunk } from "../../outputs.js";
@@ -31,9 +31,9 @@ export declare class FakeLLM extends LLM {
31
31
  constructor(fields: {
32
32
  response?: string;
33
33
  thrownErrorString?: string;
34
- });
34
+ } & BaseLLMParams);
35
35
  _llmType(): string;
36
- _call(prompt: string): Promise<string>;
36
+ _call(prompt: string, _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
37
37
  }
38
38
  export declare class FakeStreamingLLM extends LLM {
39
39
  _llmType(): string;
@@ -43,7 +43,7 @@ export declare class FakeStreamingLLM extends LLM {
43
43
  export declare class FakeChatModel extends BaseChatModel {
44
44
  _combineLLMOutput(): never[];
45
45
  _llmType(): string;
46
- _generate(messages: BaseMessage[], options?: this["ParsedCallOptions"]): Promise<ChatResult>;
46
+ _generate(messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
47
47
  }
48
48
  export declare class FakeRetriever extends BaseRetriever {
49
49
  lc_namespace: string[];
@@ -56,7 +56,7 @@ export class FakeRunnable extends Runnable {
56
56
  }
57
57
  export class FakeLLM extends LLM {
58
58
  constructor(fields) {
59
- super({});
59
+ super(fields);
60
60
  Object.defineProperty(this, "response", {
61
61
  enumerable: true,
62
62
  configurable: true,
@@ -75,11 +75,13 @@ export class FakeLLM extends LLM {
75
75
  _llmType() {
76
76
  return "fake";
77
77
  }
78
- async _call(prompt) {
78
+ async _call(prompt, _options, runManager) {
79
79
  if (this.thrownErrorString) {
80
80
  throw new Error(this.thrownErrorString);
81
81
  }
82
- return this.response ?? prompt;
82
+ const response = this.response ?? prompt;
83
+ await runManager?.handleLLMNewToken(response);
84
+ return response;
83
85
  }
84
86
  }
85
87
  export class FakeStreamingLLM extends LLM {
@@ -103,7 +105,7 @@ export class FakeChatModel extends BaseChatModel {
103
105
  _llmType() {
104
106
  return "fake";
105
107
  }
106
- async _generate(messages, options) {
108
+ async _generate(messages, options, runManager) {
107
109
  if (options?.stop?.length) {
108
110
  return {
109
111
  generations: [
@@ -115,6 +117,7 @@ export class FakeChatModel extends BaseChatModel {
115
117
  };
116
118
  }
117
119
  const text = messages.map((m) => m.content).join("\n");
120
+ await runManager?.handleLLMNewToken(text);
118
121
  return {
119
122
  generations: [
120
123
  {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/core",
3
- "version": "0.1.1",
3
+ "version": "0.1.2-rc.0",
4
4
  "description": "Core LangChain.js abstractions and schemas",
5
5
  "type": "module",
6
6
  "engines": {