@langchain/core 0.1.8 → 0.1.9-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -105,7 +105,13 @@ export interface BaseLanguageModelInterface<RunOutput = any, CallOptions extends
105
105
  CallOptions: CallOptions;
106
106
  get callKeys(): string[];
107
107
  generatePrompt(promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
108
+ /**
109
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
110
+ */
108
111
  predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
112
+ /**
113
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
114
+ */
109
115
  predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
110
116
  _modelType(): string;
111
117
  _llmType(): string;
@@ -135,7 +141,13 @@ export declare abstract class BaseLanguageModel<RunOutput = any, CallOptions ext
135
141
  cache?: BaseCache;
136
142
  constructor({ callbacks, callbackManager, ...params }: BaseLanguageModelParams);
137
143
  abstract generatePrompt(promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
144
+ /**
145
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
146
+ */
138
147
  abstract predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
148
+ /**
149
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
150
+ */
139
151
  abstract predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
140
152
  abstract _modelType(): string;
141
153
  abstract _llmType(): string;
@@ -286,6 +286,8 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
286
286
  return this.generate(promptMessages, options, callbacks);
287
287
  }
288
288
  /**
289
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
290
+ *
289
291
  * Makes a single call to the chat model.
290
292
  * @param messages An array of BaseMessage instances.
291
293
  * @param options The call options or an array of stop sequences.
@@ -298,6 +300,8 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
298
300
  return generations[0][0].message;
299
301
  }
300
302
  /**
303
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
304
+ *
301
305
  * Makes a single call to the chat model with a prompt value.
302
306
  * @param promptValue The value of the prompt.
303
307
  * @param options The call options or an array of stop sequences.
@@ -309,6 +313,8 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
309
313
  return this.call(promptMessages, options, callbacks);
310
314
  }
311
315
  /**
316
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
317
+ *
312
318
  * Predicts the next message based on the input messages.
313
319
  * @param messages An array of BaseMessage instances.
314
320
  * @param options The call options or an array of stop sequences.
@@ -319,6 +325,8 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
319
325
  return this.call(messages, options, callbacks);
320
326
  }
321
327
  /**
328
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
329
+ *
322
330
  * Predicts the next message based on a text input.
323
331
  * @param text The text input.
324
332
  * @param options The call options or an array of stop sequences.
@@ -93,6 +93,8 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
93
93
  generatePrompt(promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
94
94
  abstract _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
95
95
  /**
96
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
97
+ *
96
98
  * Makes a single call to the chat model.
97
99
  * @param messages An array of BaseMessage instances.
98
100
  * @param options The call options or an array of stop sequences.
@@ -101,6 +103,8 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
101
103
  */
102
104
  call(messages: BaseMessageLike[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
103
105
  /**
106
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
107
+ *
104
108
  * Makes a single call to the chat model with a prompt value.
105
109
  * @param promptValue The value of the prompt.
106
110
  * @param options The call options or an array of stop sequences.
@@ -109,6 +113,8 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
109
113
  */
110
114
  callPrompt(promptValue: BasePromptValueInterface, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
111
115
  /**
116
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
117
+ *
112
118
  * Predicts the next message based on the input messages.
113
119
  * @param messages An array of BaseMessage instances.
114
120
  * @param options The call options or an array of stop sequences.
@@ -117,6 +123,8 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
117
123
  */
118
124
  predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
119
125
  /**
126
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
127
+ *
120
128
  * Predicts the next message based on a text input.
121
129
  * @param text The text input.
122
130
  * @param options The call options or an array of stop sequences.
@@ -282,6 +282,8 @@ export class BaseChatModel extends BaseLanguageModel {
282
282
  return this.generate(promptMessages, options, callbacks);
283
283
  }
284
284
  /**
285
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
286
+ *
285
287
  * Makes a single call to the chat model.
286
288
  * @param messages An array of BaseMessage instances.
287
289
  * @param options The call options or an array of stop sequences.
@@ -294,6 +296,8 @@ export class BaseChatModel extends BaseLanguageModel {
294
296
  return generations[0][0].message;
295
297
  }
296
298
  /**
299
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
300
+ *
297
301
  * Makes a single call to the chat model with a prompt value.
298
302
  * @param promptValue The value of the prompt.
299
303
  * @param options The call options or an array of stop sequences.
@@ -305,6 +309,8 @@ export class BaseChatModel extends BaseLanguageModel {
305
309
  return this.call(promptMessages, options, callbacks);
306
310
  }
307
311
  /**
312
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
313
+ *
308
314
  * Predicts the next message based on the input messages.
309
315
  * @param messages An array of BaseMessage instances.
310
316
  * @param options The call options or an array of stop sequences.
@@ -315,6 +321,8 @@ export class BaseChatModel extends BaseLanguageModel {
315
321
  return this.call(messages, options, callbacks);
316
322
  }
317
323
  /**
324
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
325
+ *
318
326
  * Predicts the next message based on a text input.
319
327
  * @param text The text input.
320
328
  * @param options The call options or an array of stop sequences.
@@ -6,7 +6,7 @@ const outputs_js_1 = require("../outputs.cjs");
6
6
  const manager_js_1 = require("../callbacks/manager.cjs");
7
7
  const base_js_1 = require("./base.cjs");
8
8
  /**
9
- * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
9
+ * LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
10
10
  */
11
11
  class BaseLLM extends base_js_1.BaseLanguageModel {
12
12
  constructor({ concurrency, ...rest }) {
@@ -252,6 +252,7 @@ class BaseLLM extends base_js_1.BaseLanguageModel {
252
252
  return { generations, llmOutput };
253
253
  }
254
254
  /**
255
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
255
256
  * Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
256
257
  */
257
258
  async call(prompt, options, callbacks) {
@@ -259,6 +260,8 @@ class BaseLLM extends base_js_1.BaseLanguageModel {
259
260
  return generations[0][0].text;
260
261
  }
261
262
  /**
263
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
264
+ *
262
265
  * This method is similar to `call`, but it's used for making predictions
263
266
  * based on the input text.
264
267
  * @param text Input text for the prediction.
@@ -270,6 +273,8 @@ class BaseLLM extends base_js_1.BaseLanguageModel {
270
273
  return this.call(text, options, callbacks);
271
274
  }
272
275
  /**
276
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
277
+ *
273
278
  * This method takes a list of messages, options, and callbacks, and
274
279
  * returns a predicted message.
275
280
  * @param messages A list of messages for the prediction.
@@ -25,7 +25,7 @@ interface LLMGenerateCachedParameters<T extends BaseLLM<CallOptions>, CallOption
25
25
  handledOptions: RunnableConfig;
26
26
  }
27
27
  /**
28
- * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
28
+ * LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
29
29
  */
30
30
  export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> extends BaseLanguageModel<string, CallOptions> {
31
31
  ParsedCallOptions: Omit<CallOptions, keyof RunnableConfig & "timeout">;
@@ -71,10 +71,13 @@ export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = B
71
71
  */
72
72
  generate(prompts: string[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
73
73
  /**
74
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
74
75
  * Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
75
76
  */
76
77
  call(prompt: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
77
78
  /**
79
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
80
+ *
78
81
  * This method is similar to `call`, but it's used for making predictions
79
82
  * based on the input text.
80
83
  * @param text Input text for the prediction.
@@ -84,6 +87,8 @@ export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = B
84
87
  */
85
88
  predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
86
89
  /**
90
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
91
+ *
87
92
  * This method takes a list of messages, options, and callbacks, and
88
93
  * returns a predicted message.
89
94
  * @param messages A list of messages for the prediction.
@@ -3,7 +3,7 @@ import { RUN_KEY, GenerationChunk, } from "../outputs.js";
3
3
  import { CallbackManager, } from "../callbacks/manager.js";
4
4
  import { BaseLanguageModel, } from "./base.js";
5
5
  /**
6
- * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
6
+ * LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
7
7
  */
8
8
  export class BaseLLM extends BaseLanguageModel {
9
9
  constructor({ concurrency, ...rest }) {
@@ -249,6 +249,7 @@ export class BaseLLM extends BaseLanguageModel {
249
249
  return { generations, llmOutput };
250
250
  }
251
251
  /**
252
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
252
253
  * Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
253
254
  */
254
255
  async call(prompt, options, callbacks) {
@@ -256,6 +257,8 @@ export class BaseLLM extends BaseLanguageModel {
256
257
  return generations[0][0].text;
257
258
  }
258
259
  /**
260
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
261
+ *
259
262
  * This method is similar to `call`, but it's used for making predictions
260
263
  * based on the input text.
261
264
  * @param text Input text for the prediction.
@@ -267,6 +270,8 @@ export class BaseLLM extends BaseLanguageModel {
267
270
  return this.call(text, options, callbacks);
268
271
  }
269
272
  /**
273
+ * @deprecated Use .invoke() instead. Will be removed in 0.2.0.
274
+ *
270
275
  * This method takes a list of messages, options, and callbacks, and
271
276
  * returns a predicted message.
272
277
  * @param messages A list of messages for the prediction.
@@ -4,6 +4,70 @@ exports.RunnableWithMessageHistory = void 0;
4
4
  const index_js_1 = require("../messages/index.cjs");
5
5
  const base_js_1 = require("./base.cjs");
6
6
  const passthrough_js_1 = require("./passthrough.cjs");
7
+ /**
8
+ * Wraps a LCEL chain and manages history. It appends input messages
9
+ * and chain outputs as history, and adds the current history messages to
10
+ * the chain input.
11
+ * @example
12
+ * ```typescript
13
+ * // yarn add @langchain/anthropic @langchain/community @upstash/redis
14
+ *
15
+ * import {
16
+ * ChatPromptTemplate,
17
+ * MessagesPlaceholder,
18
+ * } from "@langchain/core/prompts";
19
+ * import { ChatAnthropic } from "@langchain/anthropic";
20
+ * import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis";
21
+ * // For demos, you can also use an in-memory store:
22
+ * // import { ChatMessageHistory } from "langchain/stores/message/in_memory";
23
+ *
24
+ * const prompt = ChatPromptTemplate.fromMessages([
25
+ * ["system", "You're an assistant who's good at {ability}"],
26
+ * new MessagesPlaceholder("history"),
27
+ * ["human", "{question}"],
28
+ * ]);
29
+ *
30
+ * const chain = prompt.pipe(new ChatAnthropic({}));
31
+ *
32
+ * const chainWithHistory = new RunnableWithMessageHistory({
33
+ * runnable: chain,
34
+ * getMessageHistory: (sessionId) =>
35
+ * new UpstashRedisChatMessageHistory({
36
+ * sessionId,
37
+ * config: {
38
+ * url: process.env.UPSTASH_REDIS_REST_URL!,
39
+ * token: process.env.UPSTASH_REDIS_REST_TOKEN!,
40
+ * },
41
+ * }),
42
+ * inputMessagesKey: "question",
43
+ * historyMessagesKey: "history",
44
+ * });
45
+ *
46
+ * const result = await chainWithHistory.invoke(
47
+ * {
48
+ * ability: "math",
49
+ * question: "What does cosine mean?",
50
+ * },
51
+ * {
52
+ * configurable: {
53
+ * sessionId: "some_string_identifying_a_user",
54
+ * },
55
+ * }
56
+ * );
57
+ *
58
+ * const result2 = await chainWithHistory.invoke(
59
+ * {
60
+ * ability: "math",
61
+ * question: "What's its inverse?",
62
+ * },
63
+ * {
64
+ * configurable: {
65
+ * sessionId: "some_string_identifying_a_user",
66
+ * },
67
+ * }
68
+ * );
69
+ * ```
70
+ */
7
71
  class RunnableWithMessageHistory extends base_js_1.RunnableBinding {
8
72
  constructor(fields) {
9
73
  let historyChain = new base_js_1.RunnableLambda({
@@ -12,6 +12,70 @@ export interface RunnableWithMessageHistoryInputs<RunInput, RunOutput> extends O
12
12
  historyMessagesKey?: string;
13
13
  config?: RunnableConfig;
14
14
  }
15
+ /**
16
+ * Wraps a LCEL chain and manages history. It appends input messages
17
+ * and chain outputs as history, and adds the current history messages to
18
+ * the chain input.
19
+ * @example
20
+ * ```typescript
21
+ * // yarn add @langchain/anthropic @langchain/community @upstash/redis
22
+ *
23
+ * import {
24
+ * ChatPromptTemplate,
25
+ * MessagesPlaceholder,
26
+ * } from "@langchain/core/prompts";
27
+ * import { ChatAnthropic } from "@langchain/anthropic";
28
+ * import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis";
29
+ * // For demos, you can also use an in-memory store:
30
+ * // import { ChatMessageHistory } from "langchain/stores/message/in_memory";
31
+ *
32
+ * const prompt = ChatPromptTemplate.fromMessages([
33
+ * ["system", "You're an assistant who's good at {ability}"],
34
+ * new MessagesPlaceholder("history"),
35
+ * ["human", "{question}"],
36
+ * ]);
37
+ *
38
+ * const chain = prompt.pipe(new ChatAnthropic({}));
39
+ *
40
+ * const chainWithHistory = new RunnableWithMessageHistory({
41
+ * runnable: chain,
42
+ * getMessageHistory: (sessionId) =>
43
+ * new UpstashRedisChatMessageHistory({
44
+ * sessionId,
45
+ * config: {
46
+ * url: process.env.UPSTASH_REDIS_REST_URL!,
47
+ * token: process.env.UPSTASH_REDIS_REST_TOKEN!,
48
+ * },
49
+ * }),
50
+ * inputMessagesKey: "question",
51
+ * historyMessagesKey: "history",
52
+ * });
53
+ *
54
+ * const result = await chainWithHistory.invoke(
55
+ * {
56
+ * ability: "math",
57
+ * question: "What does cosine mean?",
58
+ * },
59
+ * {
60
+ * configurable: {
61
+ * sessionId: "some_string_identifying_a_user",
62
+ * },
63
+ * }
64
+ * );
65
+ *
66
+ * const result2 = await chainWithHistory.invoke(
67
+ * {
68
+ * ability: "math",
69
+ * question: "What's its inverse?",
70
+ * },
71
+ * {
72
+ * configurable: {
73
+ * sessionId: "some_string_identifying_a_user",
74
+ * },
75
+ * }
76
+ * );
77
+ * ```
78
+ */
15
79
  export declare class RunnableWithMessageHistory<RunInput, RunOutput> extends RunnableBinding<RunInput, RunOutput> {
16
80
  runnable: Runnable<RunInput, RunOutput>;
17
81
  inputMessagesKey?: string;
@@ -1,6 +1,70 @@
1
1
  import { AIMessage, HumanMessage, isBaseMessage, } from "../messages/index.js";
2
2
  import { RunnableBinding, RunnableLambda, } from "./base.js";
3
3
  import { RunnablePassthrough } from "./passthrough.js";
4
+ /**
5
+ * Wraps a LCEL chain and manages history. It appends input messages
6
+ * and chain outputs as history, and adds the current history messages to
7
+ * the chain input.
8
+ * @example
9
+ * ```typescript
10
+ * // yarn add @langchain/anthropic @langchain/community @upstash/redis
11
+ *
12
+ * import {
13
+ * ChatPromptTemplate,
14
+ * MessagesPlaceholder,
15
+ * } from "@langchain/core/prompts";
16
+ * import { ChatAnthropic } from "@langchain/anthropic";
17
+ * import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis";
18
+ * // For demos, you can also use an in-memory store:
19
+ * // import { ChatMessageHistory } from "langchain/stores/message/in_memory";
20
+ *
21
+ * const prompt = ChatPromptTemplate.fromMessages([
22
+ * ["system", "You're an assistant who's good at {ability}"],
23
+ * new MessagesPlaceholder("history"),
24
+ * ["human", "{question}"],
25
+ * ]);
26
+ *
27
+ * const chain = prompt.pipe(new ChatAnthropic({}));
28
+ *
29
+ * const chainWithHistory = new RunnableWithMessageHistory({
30
+ * runnable: chain,
31
+ * getMessageHistory: (sessionId) =>
32
+ * new UpstashRedisChatMessageHistory({
33
+ * sessionId,
34
+ * config: {
35
+ * url: process.env.UPSTASH_REDIS_REST_URL!,
36
+ * token: process.env.UPSTASH_REDIS_REST_TOKEN!,
37
+ * },
38
+ * }),
39
+ * inputMessagesKey: "question",
40
+ * historyMessagesKey: "history",
41
+ * });
42
+ *
43
+ * const result = await chainWithHistory.invoke(
44
+ * {
45
+ * ability: "math",
46
+ * question: "What does cosine mean?",
47
+ * },
48
+ * {
49
+ * configurable: {
50
+ * sessionId: "some_string_identifying_a_user",
51
+ * },
52
+ * }
53
+ * );
54
+ *
55
+ * const result2 = await chainWithHistory.invoke(
56
+ * {
57
+ * ability: "math",
58
+ * question: "What's its inverse?",
59
+ * },
60
+ * {
61
+ * configurable: {
62
+ * sessionId: "some_string_identifying_a_user",
63
+ * },
64
+ * }
65
+ * );
66
+ * ```
67
+ */
4
68
  export class RunnableWithMessageHistory extends RunnableBinding {
5
69
  constructor(fields) {
6
70
  let historyChain = new RunnableLambda({
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/core",
3
- "version": "0.1.8",
3
+ "version": "0.1.9-rc.0",
4
4
  "description": "Core LangChain.js abstractions and schemas",
5
5
  "type": "module",
6
6
  "engines": {