langchain 0.0.153 → 0.0.154

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -94,33 +94,18 @@ class BaseChatModel extends index_js_2.BaseLanguageModel {
94
94
  })));
95
95
  }
96
96
  }
97
- /**
98
- * Generates chat based on the input messages.
99
- * @param messages An array of arrays of BaseMessage instances.
100
- * @param options The call options or an array of stop sequences.
101
- * @param callbacks The callbacks for the language model.
102
- * @returns A Promise that resolves to an LLMResult.
103
- */
104
- async generate(messages, options, callbacks) {
105
- // parse call options
106
- let parsedOptions;
107
- if (Array.isArray(options)) {
108
- parsedOptions = { stop: options };
109
- }
110
- else {
111
- parsedOptions = options;
112
- }
97
+ /** @ignore */
98
+ async _generateUncached(messages, parsedOptions, handledOptions) {
113
99
  const baseMessages = messages.map((messageList) => messageList.map(index_js_1.coerceMessageLikeToMessage));
114
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(parsedOptions);
115
100
  // create callback manager and start run
116
- const callbackManager_ = await manager_js_1.CallbackManager.configure(runnableConfig.callbacks ?? callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
101
+ const callbackManager_ = await manager_js_1.CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
117
102
  const extra = {
118
- options: callOptions,
103
+ options: parsedOptions,
119
104
  invocation_params: this?.invocationParams(parsedOptions),
120
105
  };
121
106
  const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), baseMessages, undefined, undefined, extra);
122
107
  // generate results
123
- const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...callOptions, promptIndex: i }, runManagers?.[i])));
108
+ const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
124
109
  // handle results
125
110
  const generations = [];
126
111
  const llmOutputs = [];
@@ -155,6 +140,54 @@ class BaseChatModel extends index_js_2.BaseLanguageModel {
155
140
  });
156
141
  return output;
157
142
  }
143
+ /**
144
+ * Generates chat based on the input messages.
145
+ * @param messages An array of arrays of BaseMessage instances.
146
+ * @param options The call options or an array of stop sequences.
147
+ * @param callbacks The callbacks for the language model.
148
+ * @returns A Promise that resolves to an LLMResult.
149
+ */
150
+ async generate(messages, options, callbacks) {
151
+ // parse call options
152
+ let parsedOptions;
153
+ if (Array.isArray(options)) {
154
+ parsedOptions = { stop: options };
155
+ }
156
+ else {
157
+ parsedOptions = options;
158
+ }
159
+ const baseMessages = messages.map((messageList) => messageList.map(index_js_1.coerceMessageLikeToMessage));
160
+ const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(parsedOptions);
161
+ runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks;
162
+ if (!this.cache) {
163
+ return this._generateUncached(baseMessages, callOptions, runnableConfig);
164
+ }
165
+ const { cache } = this;
166
+ const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
167
+ const missingPromptIndices = [];
168
+ const generations = await Promise.all(baseMessages.map(async (baseMessage, index) => {
169
+ // Join all content into one string for the prompt index
170
+ const prompt = BaseChatModel._convertInputToPromptValue(baseMessage).toString();
171
+ const result = await cache.lookup(prompt, llmStringKey);
172
+ if (!result) {
173
+ missingPromptIndices.push(index);
174
+ }
175
+ return result;
176
+ }));
177
+ let llmOutput = {};
178
+ if (missingPromptIndices.length > 0) {
179
+ const results = await this._generateUncached(missingPromptIndices.map((i) => baseMessages[i]), callOptions, runnableConfig);
180
+ await Promise.all(results.generations.map(async (generation, index) => {
181
+ const promptIndex = missingPromptIndices[index];
182
+ generations[promptIndex] = generation;
183
+ // Join all content into one string for the prompt index
184
+ const prompt = BaseChatModel._convertInputToPromptValue(baseMessages[promptIndex]).toString();
185
+ return cache.update(prompt, llmStringKey, generation);
186
+ }));
187
+ llmOutput = results.llmOutput ?? {};
188
+ }
189
+ return { generations, llmOutput };
190
+ }
158
191
  /**
159
192
  * Get the parameters used to invoke the model
160
193
  */
@@ -165,6 +198,17 @@ class BaseChatModel extends index_js_2.BaseLanguageModel {
165
198
  _modelType() {
166
199
  return "base_chat_model";
167
200
  }
201
+ /**
202
+ * @deprecated
203
+ * Return a json-like object representing this LLM.
204
+ */
205
+ serialize() {
206
+ return {
207
+ ...this.invocationParams(),
208
+ _type: this._llmType(),
209
+ _model: this._modelType(),
210
+ };
211
+ }
168
212
  /**
169
213
  * Generates a prompt based on the input prompt values.
170
214
  * @param promptValues An array of BasePromptValue instances.
@@ -49,6 +49,8 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
49
49
  invoke(input: BaseLanguageModelInput, options?: CallOptions): Promise<BaseMessageChunk>;
50
50
  _streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
51
51
  _streamIterator(input: BaseLanguageModelInput, options?: CallOptions): AsyncGenerator<BaseMessageChunk>;
52
+ /** @ignore */
53
+ _generateUncached(messages: BaseMessageLike[][], parsedOptions: this["ParsedCallOptions"], handledOptions: RunnableConfig): Promise<LLMResult>;
52
54
  /**
53
55
  * Generates chat based on the input messages.
54
56
  * @param messages An array of arrays of BaseMessage instances.
@@ -63,6 +65,11 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
63
65
  invocationParams(_options?: this["ParsedCallOptions"]): any;
64
66
  _modelType(): string;
65
67
  abstract _llmType(): string;
68
+ /**
69
+ * @deprecated
70
+ * Return a json-like object representing this LLM.
71
+ */
72
+ serialize(): SerializedLLM;
66
73
  /**
67
74
  * Generates a prompt based on the input prompt values.
68
75
  * @param promptValues An array of BasePromptValue instances.
@@ -90,33 +90,18 @@ export class BaseChatModel extends BaseLanguageModel {
90
90
  })));
91
91
  }
92
92
  }
93
- /**
94
- * Generates chat based on the input messages.
95
- * @param messages An array of arrays of BaseMessage instances.
96
- * @param options The call options or an array of stop sequences.
97
- * @param callbacks The callbacks for the language model.
98
- * @returns A Promise that resolves to an LLMResult.
99
- */
100
- async generate(messages, options, callbacks) {
101
- // parse call options
102
- let parsedOptions;
103
- if (Array.isArray(options)) {
104
- parsedOptions = { stop: options };
105
- }
106
- else {
107
- parsedOptions = options;
108
- }
93
+ /** @ignore */
94
+ async _generateUncached(messages, parsedOptions, handledOptions) {
109
95
  const baseMessages = messages.map((messageList) => messageList.map(coerceMessageLikeToMessage));
110
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(parsedOptions);
111
96
  // create callback manager and start run
112
- const callbackManager_ = await CallbackManager.configure(runnableConfig.callbacks ?? callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
97
+ const callbackManager_ = await CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
113
98
  const extra = {
114
- options: callOptions,
99
+ options: parsedOptions,
115
100
  invocation_params: this?.invocationParams(parsedOptions),
116
101
  };
117
102
  const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), baseMessages, undefined, undefined, extra);
118
103
  // generate results
119
- const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...callOptions, promptIndex: i }, runManagers?.[i])));
104
+ const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
120
105
  // handle results
121
106
  const generations = [];
122
107
  const llmOutputs = [];
@@ -151,6 +136,54 @@ export class BaseChatModel extends BaseLanguageModel {
151
136
  });
152
137
  return output;
153
138
  }
139
+ /**
140
+ * Generates chat based on the input messages.
141
+ * @param messages An array of arrays of BaseMessage instances.
142
+ * @param options The call options or an array of stop sequences.
143
+ * @param callbacks The callbacks for the language model.
144
+ * @returns A Promise that resolves to an LLMResult.
145
+ */
146
+ async generate(messages, options, callbacks) {
147
+ // parse call options
148
+ let parsedOptions;
149
+ if (Array.isArray(options)) {
150
+ parsedOptions = { stop: options };
151
+ }
152
+ else {
153
+ parsedOptions = options;
154
+ }
155
+ const baseMessages = messages.map((messageList) => messageList.map(coerceMessageLikeToMessage));
156
+ const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(parsedOptions);
157
+ runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks;
158
+ if (!this.cache) {
159
+ return this._generateUncached(baseMessages, callOptions, runnableConfig);
160
+ }
161
+ const { cache } = this;
162
+ const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
163
+ const missingPromptIndices = [];
164
+ const generations = await Promise.all(baseMessages.map(async (baseMessage, index) => {
165
+ // Join all content into one string for the prompt index
166
+ const prompt = BaseChatModel._convertInputToPromptValue(baseMessage).toString();
167
+ const result = await cache.lookup(prompt, llmStringKey);
168
+ if (!result) {
169
+ missingPromptIndices.push(index);
170
+ }
171
+ return result;
172
+ }));
173
+ let llmOutput = {};
174
+ if (missingPromptIndices.length > 0) {
175
+ const results = await this._generateUncached(missingPromptIndices.map((i) => baseMessages[i]), callOptions, runnableConfig);
176
+ await Promise.all(results.generations.map(async (generation, index) => {
177
+ const promptIndex = missingPromptIndices[index];
178
+ generations[promptIndex] = generation;
179
+ // Join all content into one string for the prompt index
180
+ const prompt = BaseChatModel._convertInputToPromptValue(baseMessages[promptIndex]).toString();
181
+ return cache.update(prompt, llmStringKey, generation);
182
+ }));
183
+ llmOutput = results.llmOutput ?? {};
184
+ }
185
+ return { generations, llmOutput };
186
+ }
154
187
  /**
155
188
  * Get the parameters used to invoke the model
156
189
  */
@@ -161,6 +194,17 @@ export class BaseChatModel extends BaseLanguageModel {
161
194
  _modelType() {
162
195
  return "base_chat_model";
163
196
  }
197
+ /**
198
+ * @deprecated
199
+ * Return a json-like object representing this LLM.
200
+ */
201
+ serialize() {
202
+ return {
203
+ ...this.invocationParams(),
204
+ _type: this._llmType(),
205
+ _model: this._modelType(),
206
+ };
207
+ }
164
208
  /**
165
209
  * Generates a prompt based on the input prompt values.
166
210
  * @param promptValues An array of BasePromptValue instances.
@@ -1,16 +1,15 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.LLM = exports.BaseLLM = void 0;
4
- const index_js_1 = require("../cache/index.cjs");
5
- const index_js_2 = require("../schema/index.cjs");
6
- const index_js_3 = require("../base_language/index.cjs");
4
+ const index_js_1 = require("../schema/index.cjs");
5
+ const index_js_2 = require("../base_language/index.cjs");
7
6
  const manager_js_1 = require("../callbacks/manager.cjs");
8
7
  const base_js_1 = require("../memory/base.cjs");
9
8
  /**
10
9
  * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
11
10
  */
12
- class BaseLLM extends index_js_3.BaseLanguageModel {
13
- constructor({ cache, concurrency, ...rest }) {
11
+ class BaseLLM extends index_js_2.BaseLanguageModel {
12
+ constructor({ concurrency, ...rest }) {
14
13
  super(concurrency ? { maxConcurrency: concurrency, ...rest } : rest);
15
14
  Object.defineProperty(this, "lc_namespace", {
16
15
  enumerable: true,
@@ -18,21 +17,6 @@ class BaseLLM extends index_js_3.BaseLanguageModel {
18
17
  writable: true,
19
18
  value: ["langchain", "llms", this._llmType()]
20
19
  });
21
- Object.defineProperty(this, "cache", {
22
- enumerable: true,
23
- configurable: true,
24
- writable: true,
25
- value: void 0
26
- });
27
- if (typeof cache === "object") {
28
- this.cache = cache;
29
- }
30
- else if (cache) {
31
- this.cache = index_js_1.InMemoryCache.global();
32
- }
33
- else {
34
- this.cache = undefined;
35
- }
36
20
  }
37
21
  /**
38
22
  * This method takes an input and options, and returns a string. It
@@ -72,7 +56,7 @@ class BaseLLM extends index_js_3.BaseLanguageModel {
72
56
  invocation_params: this?.invocationParams(callOptions),
73
57
  };
74
58
  const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], undefined, undefined, extra);
75
- let generation = new index_js_2.GenerationChunk({
59
+ let generation = new index_js_1.GenerationChunk({
76
60
  text: "",
77
61
  });
78
62
  try {
@@ -160,7 +144,7 @@ class BaseLLM extends index_js_3.BaseLanguageModel {
160
144
  // This defines RUN_KEY as a non-enumerable property on the output object
161
145
  // so that it is not serialized when the output is stringified, and so that
162
146
  // it isnt included when listing the keys of the output object.
163
- Object.defineProperty(output, index_js_2.RUN_KEY, {
147
+ Object.defineProperty(output, index_js_1.RUN_KEY, {
164
148
  value: runIds ? { runIds } : undefined,
165
149
  configurable: true,
166
150
  });
@@ -186,9 +170,7 @@ class BaseLLM extends index_js_3.BaseLanguageModel {
186
170
  return this._generateUncached(prompts, callOptions, runnableConfig);
187
171
  }
188
172
  const { cache } = this;
189
- const params = this.serialize();
190
- params.stop = callOptions.stop ?? params.stop;
191
- const llmStringKey = `${Object.entries(params).sort()}`;
173
+ const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
192
174
  const missingPromptIndices = [];
193
175
  const generations = await Promise.all(prompts.map(async (prompt, index) => {
194
176
  const result = await cache.lookup(prompt, llmStringKey);
@@ -238,7 +220,7 @@ class BaseLLM extends index_js_3.BaseLanguageModel {
238
220
  async predictMessages(messages, options, callbacks) {
239
221
  const text = (0, base_js_1.getBufferString)(messages);
240
222
  const prediction = await this.call(text, options, callbacks);
241
- return new index_js_2.AIMessage(prediction);
223
+ return new index_js_1.AIMessage(prediction);
242
224
  }
243
225
  /**
244
226
  * Get the identifying parameters of the LLM.
@@ -248,6 +230,7 @@ class BaseLLM extends index_js_3.BaseLanguageModel {
248
230
  return {};
249
231
  }
250
232
  /**
233
+ * @deprecated
251
234
  * Return a json-like object representing this LLM.
252
235
  */
253
236
  serialize() {
@@ -261,6 +244,7 @@ class BaseLLM extends index_js_3.BaseLanguageModel {
261
244
  return "base_llm";
262
245
  }
263
246
  /**
247
+ * @deprecated
264
248
  * Load an LLM from a json-like object describing it.
265
249
  */
266
250
  static async deserialize(data) {
@@ -1,4 +1,4 @@
1
- import { BaseCache, BaseMessage, BasePromptValue, GenerationChunk, LLMResult } from "../schema/index.js";
1
+ import { BaseMessage, BasePromptValue, GenerationChunk, LLMResult } from "../schema/index.js";
2
2
  import { BaseLanguageModel, BaseLanguageModelCallOptions, BaseLanguageModelInput, BaseLanguageModelParams } from "../base_language/index.js";
3
3
  import { BaseCallbackConfig, CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
4
4
  import { RunnableConfig } from "../schema/runnable/config.js";
@@ -11,7 +11,6 @@ export interface BaseLLMParams extends BaseLanguageModelParams {
11
11
  * @deprecated Use `maxConcurrency` instead
12
12
  */
13
13
  concurrency?: number;
14
- cache?: BaseCache | boolean;
15
14
  }
16
15
  export interface BaseLLMCallOptions extends BaseLanguageModelCallOptions {
17
16
  }
@@ -21,8 +20,7 @@ export interface BaseLLMCallOptions extends BaseLanguageModelCallOptions {
21
20
  export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> extends BaseLanguageModel<string, CallOptions> {
22
21
  ParsedCallOptions: Omit<CallOptions, keyof RunnableConfig & "timeout">;
23
22
  lc_namespace: string[];
24
- cache?: BaseCache;
25
- constructor({ cache, concurrency, ...rest }: BaseLLMParams);
23
+ constructor({ concurrency, ...rest }: BaseLLMParams);
26
24
  /**
27
25
  * This method takes an input and options, and returns a string. It
28
26
  * converts the input to a prompt value and generates a result based on
@@ -90,11 +88,13 @@ export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = B
90
88
  */
91
89
  abstract _llmType(): string;
92
90
  /**
91
+ * @deprecated
93
92
  * Return a json-like object representing this LLM.
94
93
  */
95
94
  serialize(): SerializedLLM;
96
95
  _modelType(): string;
97
96
  /**
97
+ * @deprecated
98
98
  * Load an LLM from a json-like object describing it.
99
99
  */
100
100
  static deserialize(data: SerializedLLM): Promise<BaseLLM>;
package/dist/llms/base.js CHANGED
@@ -1,4 +1,3 @@
1
- import { InMemoryCache } from "../cache/index.js";
2
1
  import { AIMessage, GenerationChunk, RUN_KEY, } from "../schema/index.js";
3
2
  import { BaseLanguageModel, } from "../base_language/index.js";
4
3
  import { CallbackManager, } from "../callbacks/manager.js";
@@ -7,7 +6,7 @@ import { getBufferString } from "../memory/base.js";
7
6
  * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
8
7
  */
9
8
  export class BaseLLM extends BaseLanguageModel {
10
- constructor({ cache, concurrency, ...rest }) {
9
+ constructor({ concurrency, ...rest }) {
11
10
  super(concurrency ? { maxConcurrency: concurrency, ...rest } : rest);
12
11
  Object.defineProperty(this, "lc_namespace", {
13
12
  enumerable: true,
@@ -15,21 +14,6 @@ export class BaseLLM extends BaseLanguageModel {
15
14
  writable: true,
16
15
  value: ["langchain", "llms", this._llmType()]
17
16
  });
18
- Object.defineProperty(this, "cache", {
19
- enumerable: true,
20
- configurable: true,
21
- writable: true,
22
- value: void 0
23
- });
24
- if (typeof cache === "object") {
25
- this.cache = cache;
26
- }
27
- else if (cache) {
28
- this.cache = InMemoryCache.global();
29
- }
30
- else {
31
- this.cache = undefined;
32
- }
33
17
  }
34
18
  /**
35
19
  * This method takes an input and options, and returns a string. It
@@ -183,9 +167,7 @@ export class BaseLLM extends BaseLanguageModel {
183
167
  return this._generateUncached(prompts, callOptions, runnableConfig);
184
168
  }
185
169
  const { cache } = this;
186
- const params = this.serialize();
187
- params.stop = callOptions.stop ?? params.stop;
188
- const llmStringKey = `${Object.entries(params).sort()}`;
170
+ const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
189
171
  const missingPromptIndices = [];
190
172
  const generations = await Promise.all(prompts.map(async (prompt, index) => {
191
173
  const result = await cache.lookup(prompt, llmStringKey);
@@ -245,6 +227,7 @@ export class BaseLLM extends BaseLanguageModel {
245
227
  return {};
246
228
  }
247
229
  /**
230
+ * @deprecated
248
231
  * Return a json-like object representing this LLM.
249
232
  */
250
233
  serialize() {
@@ -258,6 +241,7 @@ export class BaseLLM extends BaseLanguageModel {
258
241
  return "base_llm";
259
242
  }
260
243
  /**
244
+ * @deprecated
261
245
  * Load an LLM from a json-like object describing it.
262
246
  */
263
247
  static async deserialize(data) {
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Docstore = exports.BaseEntityStore = exports.BaseFileStore = exports.BaseCache = exports.BaseListChatMessageHistory = exports.BaseChatMessageHistory = exports.BasePromptValue = exports.ChatGenerationChunk = exports.ChatMessageChunk = exports.coerceMessageLikeToMessage = exports.isBaseMessage = exports.ChatMessage = exports.FunctionMessageChunk = exports.FunctionMessage = exports.SystemChatMessage = exports.AIChatMessage = exports.HumanChatMessage = exports.BaseChatMessage = exports.SystemMessageChunk = exports.SystemMessage = exports.AIMessageChunk = exports.AIMessage = exports.HumanMessageChunk = exports.HumanMessage = exports.BaseMessageChunk = exports.BaseMessage = exports.GenerationChunk = exports.RUN_KEY = void 0;
3
+ exports.Docstore = exports.BaseEntityStore = exports.BaseFileStore = exports.BaseCache = exports.BaseListChatMessageHistory = exports.BaseChatMessageHistory = exports.BasePromptValue = exports.mapStoredMessageToChatMessage = exports.ChatGenerationChunk = exports.ChatMessageChunk = exports.coerceMessageLikeToMessage = exports.isBaseMessage = exports.ChatMessage = exports.FunctionMessageChunk = exports.FunctionMessage = exports.SystemChatMessage = exports.AIChatMessage = exports.HumanChatMessage = exports.BaseChatMessage = exports.SystemMessageChunk = exports.SystemMessage = exports.AIMessageChunk = exports.AIMessage = exports.HumanMessageChunk = exports.HumanMessage = exports.BaseMessageChunk = exports.BaseMessage = exports.GenerationChunk = exports.RUN_KEY = void 0;
4
4
  const serializable_js_1 = require("../load/serializable.cjs");
5
5
  exports.RUN_KEY = "__run";
6
6
  /**
@@ -405,6 +405,55 @@ class ChatGenerationChunk extends GenerationChunk {
405
405
  }
406
406
  }
407
407
  exports.ChatGenerationChunk = ChatGenerationChunk;
408
+ /**
409
+ * Maps messages from an older format (V1) to the current `StoredMessage`
410
+ * format. If the message is already in the `StoredMessage` format, it is
411
+ * returned as is. Otherwise, it transforms the V1 message into a
412
+ * `StoredMessage`. This function is important for maintaining
413
+ * compatibility with older message formats.
414
+ */
415
+ function mapV1MessageToStoredMessage(message) {
416
+ // TODO: Remove this mapper when we deprecate the old message format.
417
+ if (message.data !== undefined) {
418
+ return message;
419
+ }
420
+ else {
421
+ const v1Message = message;
422
+ return {
423
+ type: v1Message.type,
424
+ data: {
425
+ content: v1Message.text,
426
+ role: v1Message.role,
427
+ name: undefined,
428
+ },
429
+ };
430
+ }
431
+ }
432
+ function mapStoredMessageToChatMessage(message) {
433
+ const storedMessage = mapV1MessageToStoredMessage(message);
434
+ switch (storedMessage.type) {
435
+ case "human":
436
+ return new HumanMessage(storedMessage.data);
437
+ case "ai":
438
+ return new AIMessage(storedMessage.data);
439
+ case "system":
440
+ return new SystemMessage(storedMessage.data);
441
+ case "function":
442
+ if (storedMessage.data.name === undefined) {
443
+ throw new Error("Name must be defined for function messages");
444
+ }
445
+ return new FunctionMessage(storedMessage.data);
446
+ case "chat": {
447
+ if (storedMessage.data.role === undefined) {
448
+ throw new Error("Role must be defined for chat messages");
449
+ }
450
+ return new ChatMessage(storedMessage.data);
451
+ }
452
+ default:
453
+ throw new Error(`Got unexpected type: ${storedMessage.type}`);
454
+ }
455
+ }
456
+ exports.mapStoredMessageToChatMessage = mapStoredMessageToChatMessage;
408
457
  /**
409
458
  * Base PromptValue class. All prompt values should extend this class.
410
459
  */
@@ -59,6 +59,10 @@ export interface StoredMessage {
59
59
  type: string;
60
60
  data: StoredMessageData;
61
61
  }
62
+ export interface StoredGeneration {
63
+ text: string;
64
+ message?: StoredMessage;
65
+ }
62
66
  export type MessageType = "human" | "ai" | "generic" | "system" | "function";
63
67
  export interface BaseMessageFields {
64
68
  content: string;
@@ -239,6 +243,7 @@ export declare class ChatGenerationChunk extends GenerationChunk implements Chat
239
243
  constructor(fields: ChatGenerationChunkFields);
240
244
  concat(chunk: ChatGenerationChunk): ChatGenerationChunk;
241
245
  }
246
+ export declare function mapStoredMessageToChatMessage(message: StoredMessage): HumanMessage | AIMessage | SystemMessage | FunctionMessage | ChatMessage;
242
247
  export interface ChatResult {
243
248
  generations: ChatGeneration[];
244
249
  llmOutput?: Record<string, any>;
@@ -386,6 +386,54 @@ export class ChatGenerationChunk extends GenerationChunk {
386
386
  });
387
387
  }
388
388
  }
389
+ /**
390
+ * Maps messages from an older format (V1) to the current `StoredMessage`
391
+ * format. If the message is already in the `StoredMessage` format, it is
392
+ * returned as is. Otherwise, it transforms the V1 message into a
393
+ * `StoredMessage`. This function is important for maintaining
394
+ * compatibility with older message formats.
395
+ */
396
+ function mapV1MessageToStoredMessage(message) {
397
+ // TODO: Remove this mapper when we deprecate the old message format.
398
+ if (message.data !== undefined) {
399
+ return message;
400
+ }
401
+ else {
402
+ const v1Message = message;
403
+ return {
404
+ type: v1Message.type,
405
+ data: {
406
+ content: v1Message.text,
407
+ role: v1Message.role,
408
+ name: undefined,
409
+ },
410
+ };
411
+ }
412
+ }
413
+ export function mapStoredMessageToChatMessage(message) {
414
+ const storedMessage = mapV1MessageToStoredMessage(message);
415
+ switch (storedMessage.type) {
416
+ case "human":
417
+ return new HumanMessage(storedMessage.data);
418
+ case "ai":
419
+ return new AIMessage(storedMessage.data);
420
+ case "system":
421
+ return new SystemMessage(storedMessage.data);
422
+ case "function":
423
+ if (storedMessage.data.name === undefined) {
424
+ throw new Error("Name must be defined for function messages");
425
+ }
426
+ return new FunctionMessage(storedMessage.data);
427
+ case "chat": {
428
+ if (storedMessage.data.role === undefined) {
429
+ throw new Error("Role must be defined for chat messages");
430
+ }
431
+ return new ChatMessage(storedMessage.data);
432
+ }
433
+ default:
434
+ throw new Error(`Got unexpected type: ${storedMessage.type}`);
435
+ }
436
+ }
389
437
  /**
390
438
  * Base PromptValue class. All prompt values should extend this class.
391
439
  */
@@ -1,32 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.mapChatMessagesToStoredMessages = exports.mapStoredMessagesToChatMessages = exports.mapV1MessageToStoredMessage = void 0;
3
+ exports.mapChatMessagesToStoredMessages = exports.mapStoredMessagesToChatMessages = void 0;
4
4
  const index_js_1 = require("../../schema/index.cjs");
5
- /**
6
- * Maps messages from an older format (V1) to the current `StoredMessage`
7
- * format. If the message is already in the `StoredMessage` format, it is
8
- * returned as is. Otherwise, it transforms the V1 message into a
9
- * `StoredMessage`. This function is important for maintaining
10
- * compatibility with older message formats.
11
- */
12
- function mapV1MessageToStoredMessage(message) {
13
- // TODO: Remove this mapper when we deprecate the old message format.
14
- if (message.data !== undefined) {
15
- return message;
16
- }
17
- else {
18
- const v1Message = message;
19
- return {
20
- type: v1Message.type,
21
- data: {
22
- content: v1Message.text,
23
- role: v1Message.role,
24
- name: undefined,
25
- },
26
- };
27
- }
28
- }
29
- exports.mapV1MessageToStoredMessage = mapV1MessageToStoredMessage;
30
5
  /**
31
6
  * Transforms an array of `StoredMessage` instances into an array of
32
7
  * `BaseMessage` instances. It uses the `mapV1MessageToStoredMessage`
@@ -36,30 +11,7 @@ exports.mapV1MessageToStoredMessage = mapV1MessageToStoredMessage;
36
11
  * messages for use in a chat context.
37
12
  */
38
13
  function mapStoredMessagesToChatMessages(messages) {
39
- return messages.map((message) => {
40
- const storedMessage = mapV1MessageToStoredMessage(message);
41
- switch (storedMessage.type) {
42
- case "human":
43
- return new index_js_1.HumanMessage(storedMessage.data);
44
- case "ai":
45
- return new index_js_1.AIMessage(storedMessage.data);
46
- case "system":
47
- return new index_js_1.SystemMessage(storedMessage.data);
48
- case "function":
49
- if (storedMessage.data.name === undefined) {
50
- throw new Error("Name must be defined for function messages");
51
- }
52
- return new index_js_1.FunctionMessage(storedMessage.data);
53
- case "chat": {
54
- if (storedMessage.data.role === undefined) {
55
- throw new Error("Role must be defined for chat messages");
56
- }
57
- return new index_js_1.ChatMessage(storedMessage.data);
58
- }
59
- default:
60
- throw new Error(`Got unexpected type: ${storedMessage.type}`);
61
- }
62
- });
14
+ return messages.map(index_js_1.mapStoredMessageToChatMessage);
63
15
  }
64
16
  exports.mapStoredMessagesToChatMessages = mapStoredMessagesToChatMessages;
65
17
  /**
@@ -1,17 +1,4 @@
1
1
  import { BaseMessage, StoredMessage } from "../../schema/index.js";
2
- interface StoredMessageV1 {
3
- type: string;
4
- role: string | undefined;
5
- text: string;
6
- }
7
- /**
8
- * Maps messages from an older format (V1) to the current `StoredMessage`
9
- * format. If the message is already in the `StoredMessage` format, it is
10
- * returned as is. Otherwise, it transforms the V1 message into a
11
- * `StoredMessage`. This function is important for maintaining
12
- * compatibility with older message formats.
13
- */
14
- export declare function mapV1MessageToStoredMessage(message: StoredMessage | StoredMessageV1): StoredMessage;
15
2
  /**
16
3
  * Transforms an array of `StoredMessage` instances into an array of
17
4
  * `BaseMessage` instances. It uses the `mapV1MessageToStoredMessage`
@@ -28,4 +15,3 @@ export declare function mapStoredMessagesToChatMessages(messages: StoredMessage[
28
15
  * is used to prepare chat messages for storage.
29
16
  */
30
17
  export declare function mapChatMessagesToStoredMessages(messages: BaseMessage[]): StoredMessage[];
31
- export {};