langchain 0.0.84 → 0.0.86

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/cache/momento.cjs +1 -0
  2. package/cache/momento.d.ts +1 -0
  3. package/cache/momento.js +1 -0
  4. package/dist/agents/initialize.cjs +2 -1
  5. package/dist/agents/initialize.d.ts +0 -1
  6. package/dist/agents/initialize.js +2 -1
  7. package/dist/agents/structured_chat/index.cjs +10 -3
  8. package/dist/agents/structured_chat/index.d.ts +5 -1
  9. package/dist/agents/structured_chat/index.js +10 -3
  10. package/dist/cache/momento.cjs +114 -0
  11. package/dist/cache/momento.d.ts +72 -0
  12. package/dist/cache/momento.js +110 -0
  13. package/dist/chains/conversational_retrieval_chain.cjs +32 -14
  14. package/dist/chains/conversational_retrieval_chain.d.ts +11 -2
  15. package/dist/chains/conversational_retrieval_chain.js +33 -15
  16. package/dist/memory/zep.cjs +130 -0
  17. package/dist/memory/zep.d.ts +23 -0
  18. package/dist/memory/zep.js +126 -0
  19. package/dist/retrievers/zep.cjs +56 -0
  20. package/dist/retrievers/zep.d.ts +25 -0
  21. package/dist/retrievers/zep.js +52 -0
  22. package/dist/stores/message/momento.cjs +120 -0
  23. package/dist/stores/message/momento.d.ts +60 -0
  24. package/dist/stores/message/momento.js +116 -0
  25. package/dist/tools/requests.cjs +1 -1
  26. package/dist/tools/requests.js +1 -1
  27. package/dist/util/momento.cjs +26 -0
  28. package/dist/util/momento.d.ts +9 -0
  29. package/dist/util/momento.js +22 -0
  30. package/dist/vectorstores/milvus.cjs +1 -0
  31. package/dist/vectorstores/milvus.d.ts +1 -0
  32. package/dist/vectorstores/milvus.js +1 -0
  33. package/memory/zep.cjs +1 -0
  34. package/memory/zep.d.ts +1 -0
  35. package/memory/zep.js +1 -0
  36. package/package.json +43 -1
  37. package/retrievers/zep.cjs +1 -0
  38. package/retrievers/zep.d.ts +1 -0
  39. package/retrievers/zep.js +1 -0
  40. package/stores/message/momento.cjs +1 -0
  41. package/stores/message/momento.d.ts +1 -0
  42. package/stores/message/momento.js +1 -0
@@ -0,0 +1 @@
1
+ module.exports = require('../dist/cache/momento.cjs');
@@ -0,0 +1 @@
1
+ export * from '../dist/cache/momento.js'
@@ -0,0 +1 @@
1
+ export * from '../dist/cache/momento.js'
@@ -84,10 +84,11 @@ async function initializeAgentExecutorWithOptions(tools, llm, options = {
84
84
  return executor;
85
85
  }
86
86
  case "structured-chat-zero-shot-react-description": {
87
- const { agentArgs, ...rest } = options;
87
+ const { agentArgs, memory, ...rest } = options;
88
88
  const executor = executor_js_1.AgentExecutor.fromAgentAndTools({
89
89
  agent: index_js_3.StructuredChatAgent.fromLLMAndTools(llm, tools, agentArgs),
90
90
  tools,
91
+ memory,
91
92
  ...rest,
92
93
  });
93
94
  return executor;
@@ -32,7 +32,6 @@ export type InitializeAgentExecutorOptions = ({
32
32
  export type InitializeAgentExecutorOptionsStructured = {
33
33
  agentType: "structured-chat-zero-shot-react-description";
34
34
  agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];
35
- memory?: never;
36
35
  } & Omit<AgentExecutorInput, "agent" | "tools">;
37
36
  /**
38
37
  * Initialize an agent executor with options
@@ -80,10 +80,11 @@ export async function initializeAgentExecutorWithOptions(tools, llm, options = {
80
80
  return executor;
81
81
  }
82
82
  case "structured-chat-zero-shot-react-description": {
83
- const { agentArgs, ...rest } = options;
83
+ const { agentArgs, memory, ...rest } = options;
84
84
  const executor = AgentExecutor.fromAgentAndTools({
85
85
  agent: StructuredChatAgent.fromLLMAndTools(llm, tools, agentArgs),
86
86
  tools,
87
+ memory,
87
88
  ...rest,
88
89
  });
89
90
  return executor;
@@ -66,20 +66,27 @@ class StructuredChatAgent extends agent_js_1.Agent {
66
66
  * @param args - Arguments to create the prompt with.
67
67
  * @param args.suffix - String to put after the list of tools.
68
68
  * @param args.prefix - String to put before the list of tools.
69
+ * @param args.inputVariables List of input variables the final prompt will expect.
70
+ * @param args.memoryPrompts List of historical prompts from memory.
69
71
  */
70
72
  static createPrompt(tools, args) {
71
- const { prefix = prompt_js_2.PREFIX, suffix = prompt_js_2.SUFFIX } = args ?? {};
73
+ const { prefix = prompt_js_2.PREFIX, suffix = prompt_js_2.SUFFIX, inputVariables = ["input", "agent_scratchpad"], memoryPrompts = [], } = args ?? {};
72
74
  const template = [prefix, prompt_js_2.FORMAT_INSTRUCTIONS, suffix].join("\n\n");
75
+ const humanMessageTemplate = "{input}\n\n{agent_scratchpad}";
73
76
  const messages = [
74
77
  new chat_js_1.SystemMessagePromptTemplate(new prompt_js_1.PromptTemplate({
75
78
  template,
76
- inputVariables: [],
79
+ inputVariables,
77
80
  partialVariables: {
78
81
  tool_schemas: StructuredChatAgent.createToolSchemasString(tools),
79
82
  tool_names: tools.map((tool) => tool.name).join(", "),
80
83
  },
81
84
  })),
82
- chat_js_1.HumanMessagePromptTemplate.fromTemplate("{input}\n\n{agent_scratchpad}"),
85
+ ...memoryPrompts,
86
+ new chat_js_1.HumanMessagePromptTemplate(new prompt_js_1.PromptTemplate({
87
+ template: humanMessageTemplate,
88
+ inputVariables,
89
+ })),
83
90
  ];
84
91
  return chat_js_1.ChatPromptTemplate.fromPromptMessages(messages);
85
92
  }
@@ -1,5 +1,5 @@
1
1
  import { BaseLanguageModel } from "../../base_language/index.js";
2
- import { ChatPromptTemplate } from "../../prompts/chat.js";
2
+ import { BaseMessagePromptTemplate, ChatPromptTemplate } from "../../prompts/chat.js";
3
3
  import { AgentStep } from "../../schema/index.js";
4
4
  import { StructuredTool } from "../../tools/base.js";
5
5
  import { Optional } from "../../types/type-utils.js";
@@ -13,6 +13,8 @@ export interface StructuredChatCreatePromptArgs {
13
13
  prefix?: string;
14
14
  /** List of input variables the final prompt will expect. */
15
15
  inputVariables?: string[];
16
+ /** List of historical prompts from memory. */
17
+ memoryPrompts?: BaseMessagePromptTemplate[];
16
18
  }
17
19
  export type StructuredChatAgentInput = Optional<AgentInput, "outputParser">;
18
20
  /**
@@ -38,6 +40,8 @@ export declare class StructuredChatAgent extends Agent {
38
40
  * @param args - Arguments to create the prompt with.
39
41
  * @param args.suffix - String to put after the list of tools.
40
42
  * @param args.prefix - String to put before the list of tools.
43
+ * @param args.inputVariables List of input variables the final prompt will expect.
44
+ * @param args.memoryPrompts List of historical prompts from memory.
41
45
  */
42
46
  static createPrompt(tools: StructuredTool[], args?: StructuredChatCreatePromptArgs): ChatPromptTemplate;
43
47
  static fromLLMAndTools(llm: BaseLanguageModel, tools: StructuredTool[], args?: StructuredChatCreatePromptArgs & AgentArgs): StructuredChatAgent;
@@ -63,20 +63,27 @@ export class StructuredChatAgent extends Agent {
63
63
  * @param args - Arguments to create the prompt with.
64
64
  * @param args.suffix - String to put after the list of tools.
65
65
  * @param args.prefix - String to put before the list of tools.
66
+ * @param args.inputVariables List of input variables the final prompt will expect.
67
+ * @param args.memoryPrompts List of historical prompts from memory.
66
68
  */
67
69
  static createPrompt(tools, args) {
68
- const { prefix = PREFIX, suffix = SUFFIX } = args ?? {};
70
+ const { prefix = PREFIX, suffix = SUFFIX, inputVariables = ["input", "agent_scratchpad"], memoryPrompts = [], } = args ?? {};
69
71
  const template = [prefix, FORMAT_INSTRUCTIONS, suffix].join("\n\n");
72
+ const humanMessageTemplate = "{input}\n\n{agent_scratchpad}";
70
73
  const messages = [
71
74
  new SystemMessagePromptTemplate(new PromptTemplate({
72
75
  template,
73
- inputVariables: [],
76
+ inputVariables,
74
77
  partialVariables: {
75
78
  tool_schemas: StructuredChatAgent.createToolSchemasString(tools),
76
79
  tool_names: tools.map((tool) => tool.name).join(", "),
77
80
  },
78
81
  })),
79
- HumanMessagePromptTemplate.fromTemplate("{input}\n\n{agent_scratchpad}"),
82
+ ...memoryPrompts,
83
+ new HumanMessagePromptTemplate(new PromptTemplate({
84
+ template: humanMessageTemplate,
85
+ inputVariables,
86
+ })),
80
87
  ];
81
88
  return ChatPromptTemplate.fromPromptMessages(messages);
82
89
  }
@@ -0,0 +1,114 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.MomentoCache = void 0;
4
+ /* eslint-disable no-instanceof/no-instanceof */
5
+ const sdk_1 = require("@gomomento/sdk");
6
+ const index_js_1 = require("../schema/index.cjs");
7
+ const base_js_1 = require("./base.cjs");
8
+ const momento_js_1 = require("../util/momento.cjs");
9
+ /**
10
+ * A cache that uses Momento as the backing store.
11
+ * See https://gomomento.com.
12
+ */
13
+ class MomentoCache extends index_js_1.BaseCache {
14
+ constructor(props) {
15
+ super();
16
+ Object.defineProperty(this, "client", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: void 0
21
+ });
22
+ Object.defineProperty(this, "cacheName", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: void 0
27
+ });
28
+ Object.defineProperty(this, "ttlSeconds", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: void 0
33
+ });
34
+ this.client = props.client;
35
+ this.cacheName = props.cacheName;
36
+ this.validateTtlSeconds(props.ttlSeconds);
37
+ this.ttlSeconds = props.ttlSeconds;
38
+ }
39
+ /**
40
+ * Create a new standard cache backed by Momento.
41
+ *
42
+ * @param {MomentoCacheProps} props The settings to instantiate the cache.
43
+ * @param {ICacheClient} props.client The Momento cache client.
44
+ * @param {string} props.cacheName The name of the cache to use to store the data.
45
+ * @param {number} props.ttlSeconds The time to live for the cache items. If not specified,
46
+ * the cache client default is used.
47
+ * @param {boolean} props.ensureCacheExists If true, ensure that the cache exists before returning.
48
+ * If false, the cache is not checked for existence. Defaults to true.
49
+ * @throws {@link InvalidArgumentError} if {@link props.ttlSeconds} is not strictly positive.
50
+ * @returns The Momento-backed cache.
51
+ */
52
+ static async fromProps(props) {
53
+ const instance = new MomentoCache(props);
54
+ if (props.ensureCacheExists || props.ensureCacheExists === undefined) {
55
+ await (0, momento_js_1.ensureCacheExists)(props.client, props.cacheName);
56
+ }
57
+ return instance;
58
+ }
59
+ /**
60
+ * Validate the user-specified TTL, if provided, is strictly positive.
61
+ * @param ttlSeconds The TTL to validate.
62
+ */
63
+ validateTtlSeconds(ttlSeconds) {
64
+ if (ttlSeconds !== undefined && ttlSeconds <= 0) {
65
+ throw new sdk_1.InvalidArgumentError("ttlSeconds must be positive.");
66
+ }
67
+ }
68
+ /**
69
+ * Lookup LLM generations in cache by prompt and associated LLM key.
70
+ * @param prompt The prompt to lookup.
71
+ * @param llmKey The LLM key to lookup.
72
+ * @returns The generations associated with the prompt and LLM key, or null if not found.
73
+ */
74
+ async lookup(prompt, llmKey) {
75
+ const key = (0, base_js_1.getCacheKey)(prompt, llmKey);
76
+ const getResponse = await this.client.get(this.cacheName, key);
77
+ if (getResponse instanceof sdk_1.CacheGet.Hit) {
78
+ const value = getResponse.valueString();
79
+ return JSON.parse(value);
80
+ }
81
+ else if (getResponse instanceof sdk_1.CacheGet.Miss) {
82
+ return null;
83
+ }
84
+ else if (getResponse instanceof sdk_1.CacheGet.Error) {
85
+ throw getResponse.innerException();
86
+ }
87
+ else {
88
+ throw new Error(`Unknown response type: ${getResponse.toString()}`);
89
+ }
90
+ }
91
+ /**
92
+ * Update the cache with the given generations.
93
+ *
94
+ * Note this overwrites any existing generations for the given prompt and LLM key.
95
+ *
96
+ * @param prompt The prompt to update.
97
+ * @param llmKey The LLM key to update.
98
+ * @param value The generations to store.
99
+ */
100
+ async update(prompt, llmKey, value) {
101
+ const key = (0, base_js_1.getCacheKey)(prompt, llmKey);
102
+ const setResponse = await this.client.set(this.cacheName, key, JSON.stringify(value), { ttl: this.ttlSeconds });
103
+ if (setResponse instanceof sdk_1.CacheSet.Success) {
104
+ // pass
105
+ }
106
+ else if (setResponse instanceof sdk_1.CacheSet.Error) {
107
+ throw setResponse.innerException();
108
+ }
109
+ else {
110
+ throw new Error(`Unknown response type: ${setResponse.toString()}`);
111
+ }
112
+ }
113
+ }
114
+ exports.MomentoCache = MomentoCache;
@@ -0,0 +1,72 @@
1
+ import { ICacheClient } from "@gomomento/sdk";
2
+ import { BaseCache, Generation } from "../schema/index.js";
3
+ /**
4
+ * The settings to instantiate the Momento standard cache.
5
+ */
6
+ export interface MomentoCacheProps {
7
+ /**
8
+ * The Momento cache client.
9
+ */
10
+ client: ICacheClient;
11
+ /**
12
+ * The name of the cache to use to store the data.
13
+ */
14
+ cacheName: string;
15
+ /**
16
+ * The time to live for the cache items. If not specified,
17
+ * the cache client default is used.
18
+ */
19
+ ttlSeconds?: number;
20
+ /**
21
+ * If true, ensure that the cache exists before returning.
22
+ * If false, the cache is not checked for existence.
23
+ * Defaults to true.
24
+ */
25
+ ensureCacheExists?: true;
26
+ }
27
+ /**
28
+ * A cache that uses Momento as the backing store.
29
+ * See https://gomomento.com.
30
+ */
31
+ export declare class MomentoCache extends BaseCache {
32
+ private client;
33
+ private readonly cacheName;
34
+ private readonly ttlSeconds?;
35
+ private constructor();
36
+ /**
37
+ * Create a new standard cache backed by Momento.
38
+ *
39
+ * @param {MomentoCacheProps} props The settings to instantiate the cache.
40
+ * @param {ICacheClient} props.client The Momento cache client.
41
+ * @param {string} props.cacheName The name of the cache to use to store the data.
42
+ * @param {number} props.ttlSeconds The time to live for the cache items. If not specified,
43
+ * the cache client default is used.
44
+ * @param {boolean} props.ensureCacheExists If true, ensure that the cache exists before returning.
45
+ * If false, the cache is not checked for existence. Defaults to true.
46
+ * @throws {@link InvalidArgumentError} if {@link props.ttlSeconds} is not strictly positive.
47
+ * @returns The Momento-backed cache.
48
+ */
49
+ static fromProps(props: MomentoCacheProps): Promise<MomentoCache>;
50
+ /**
51
+ * Validate the user-specified TTL, if provided, is strictly positive.
52
+ * @param ttlSeconds The TTL to validate.
53
+ */
54
+ private validateTtlSeconds;
55
+ /**
56
+ * Lookup LLM generations in cache by prompt and associated LLM key.
57
+ * @param prompt The prompt to lookup.
58
+ * @param llmKey The LLM key to lookup.
59
+ * @returns The generations associated with the prompt and LLM key, or null if not found.
60
+ */
61
+ lookup(prompt: string, llmKey: string): Promise<Generation[] | null>;
62
+ /**
63
+ * Update the cache with the given generations.
64
+ *
65
+ * Note this overwrites any existing generations for the given prompt and LLM key.
66
+ *
67
+ * @param prompt The prompt to update.
68
+ * @param llmKey The LLM key to update.
69
+ * @param value The generations to store.
70
+ */
71
+ update(prompt: string, llmKey: string, value: Generation[]): Promise<void>;
72
+ }
@@ -0,0 +1,110 @@
1
+ /* eslint-disable no-instanceof/no-instanceof */
2
+ import { CacheGet, CacheSet, InvalidArgumentError, } from "@gomomento/sdk";
3
+ import { BaseCache } from "../schema/index.js";
4
+ import { getCacheKey } from "./base.js";
5
+ import { ensureCacheExists } from "../util/momento.js";
6
+ /**
7
+ * A cache that uses Momento as the backing store.
8
+ * See https://gomomento.com.
9
+ */
10
+ export class MomentoCache extends BaseCache {
11
+ constructor(props) {
12
+ super();
13
+ Object.defineProperty(this, "client", {
14
+ enumerable: true,
15
+ configurable: true,
16
+ writable: true,
17
+ value: void 0
18
+ });
19
+ Object.defineProperty(this, "cacheName", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: void 0
24
+ });
25
+ Object.defineProperty(this, "ttlSeconds", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: void 0
30
+ });
31
+ this.client = props.client;
32
+ this.cacheName = props.cacheName;
33
+ this.validateTtlSeconds(props.ttlSeconds);
34
+ this.ttlSeconds = props.ttlSeconds;
35
+ }
36
+ /**
37
+ * Create a new standard cache backed by Momento.
38
+ *
39
+ * @param {MomentoCacheProps} props The settings to instantiate the cache.
40
+ * @param {ICacheClient} props.client The Momento cache client.
41
+ * @param {string} props.cacheName The name of the cache to use to store the data.
42
+ * @param {number} props.ttlSeconds The time to live for the cache items. If not specified,
43
+ * the cache client default is used.
44
+ * @param {boolean} props.ensureCacheExists If true, ensure that the cache exists before returning.
45
+ * If false, the cache is not checked for existence. Defaults to true.
46
+ * @throws {@link InvalidArgumentError} if {@link props.ttlSeconds} is not strictly positive.
47
+ * @returns The Momento-backed cache.
48
+ */
49
+ static async fromProps(props) {
50
+ const instance = new MomentoCache(props);
51
+ if (props.ensureCacheExists || props.ensureCacheExists === undefined) {
52
+ await ensureCacheExists(props.client, props.cacheName);
53
+ }
54
+ return instance;
55
+ }
56
+ /**
57
+ * Validate the user-specified TTL, if provided, is strictly positive.
58
+ * @param ttlSeconds The TTL to validate.
59
+ */
60
+ validateTtlSeconds(ttlSeconds) {
61
+ if (ttlSeconds !== undefined && ttlSeconds <= 0) {
62
+ throw new InvalidArgumentError("ttlSeconds must be positive.");
63
+ }
64
+ }
65
+ /**
66
+ * Lookup LLM generations in cache by prompt and associated LLM key.
67
+ * @param prompt The prompt to lookup.
68
+ * @param llmKey The LLM key to lookup.
69
+ * @returns The generations associated with the prompt and LLM key, or null if not found.
70
+ */
71
+ async lookup(prompt, llmKey) {
72
+ const key = getCacheKey(prompt, llmKey);
73
+ const getResponse = await this.client.get(this.cacheName, key);
74
+ if (getResponse instanceof CacheGet.Hit) {
75
+ const value = getResponse.valueString();
76
+ return JSON.parse(value);
77
+ }
78
+ else if (getResponse instanceof CacheGet.Miss) {
79
+ return null;
80
+ }
81
+ else if (getResponse instanceof CacheGet.Error) {
82
+ throw getResponse.innerException();
83
+ }
84
+ else {
85
+ throw new Error(`Unknown response type: ${getResponse.toString()}`);
86
+ }
87
+ }
88
+ /**
89
+ * Update the cache with the given generations.
90
+ *
91
+ * Note this overwrites any existing generations for the given prompt and LLM key.
92
+ *
93
+ * @param prompt The prompt to update.
94
+ * @param llmKey The LLM key to update.
95
+ * @param value The generations to store.
96
+ */
97
+ async update(prompt, llmKey, value) {
98
+ const key = getCacheKey(prompt, llmKey);
99
+ const setResponse = await this.client.set(this.cacheName, key, JSON.stringify(value), { ttl: this.ttlSeconds });
100
+ if (setResponse instanceof CacheSet.Success) {
101
+ // pass
102
+ }
103
+ else if (setResponse instanceof CacheSet.Error) {
104
+ throw setResponse.innerException();
105
+ }
106
+ else {
107
+ throw new Error(`Unknown response type: ${setResponse.toString()}`);
108
+ }
109
+ }
110
+ }
@@ -11,12 +11,6 @@ Chat History:
11
11
  {chat_history}
12
12
  Follow Up Input: {question}
13
13
  Standalone question:`;
14
- const qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
15
-
16
- {context}
17
-
18
- Question: {question}
19
- Helpful Answer:`;
20
14
  class ConversationalRetrievalQAChain extends base_js_1.BaseChain {
21
15
  get inputKeys() {
22
16
  return [this.inputKey, this.chatHistoryKey];
@@ -69,16 +63,34 @@ class ConversationalRetrievalQAChain extends base_js_1.BaseChain {
69
63
  this.returnSourceDocuments =
70
64
  fields.returnSourceDocuments ?? this.returnSourceDocuments;
71
65
  }
66
+ static getChatHistoryString(chatHistory) {
67
+ if (Array.isArray(chatHistory)) {
68
+ return chatHistory
69
+ .map((chatMessage) => {
70
+ if (chatMessage._getType() === "human") {
71
+ return `Human: ${chatMessage.text}`;
72
+ }
73
+ else if (chatMessage._getType() === "ai") {
74
+ return `Assistant: ${chatMessage.text}`;
75
+ }
76
+ else {
77
+ return `${chatMessage.text}`;
78
+ }
79
+ })
80
+ .join("\n");
81
+ }
82
+ return chatHistory;
83
+ }
72
84
  /** @ignore */
73
85
  async _call(values, runManager) {
74
86
  if (!(this.inputKey in values)) {
75
87
  throw new Error(`Question key ${this.inputKey} not found.`);
76
88
  }
77
89
  if (!(this.chatHistoryKey in values)) {
78
- throw new Error(`chat history key ${this.inputKey} not found.`);
90
+ throw new Error(`Chat history key ${this.chatHistoryKey} not found.`);
79
91
  }
80
92
  const question = values[this.inputKey];
81
- const chatHistory = values[this.chatHistoryKey];
93
+ const chatHistory = ConversationalRetrievalQAChain.getChatHistoryString(values[this.chatHistoryKey]);
82
94
  let newQuestion = question;
83
95
  if (chatHistory.length > 0) {
84
96
  const result = await this.questionGeneratorChain.call({
@@ -118,13 +130,19 @@ class ConversationalRetrievalQAChain extends base_js_1.BaseChain {
118
130
  throw new Error("Not implemented.");
119
131
  }
120
132
  static fromLLM(llm, retriever, options = {}) {
121
- const { questionGeneratorTemplate, qaTemplate, verbose, ...rest } = options;
122
- const question_generator_prompt = prompt_js_1.PromptTemplate.fromTemplate(questionGeneratorTemplate || question_generator_template);
123
- const qa_prompt = prompt_js_1.PromptTemplate.fromTemplate(qaTemplate || qa_template);
124
- const qaChain = (0, load_js_1.loadQAStuffChain)(llm, { prompt: qa_prompt, verbose });
133
+ const { questionGeneratorTemplate, qaTemplate, qaChainOptions = {
134
+ type: "stuff",
135
+ prompt: qaTemplate
136
+ ? prompt_js_1.PromptTemplate.fromTemplate(qaTemplate)
137
+ : undefined,
138
+ }, questionGeneratorChainOptions, verbose, ...rest } = options;
139
+ const qaChain = (0, load_js_1.loadQAChain)(llm, qaChainOptions);
140
+ const questionGeneratorChainPrompt = prompt_js_1.PromptTemplate.fromTemplate(questionGeneratorChainOptions?.template ??
141
+ questionGeneratorTemplate ??
142
+ question_generator_template);
125
143
  const questionGeneratorChain = new llm_chain_js_1.LLMChain({
126
- prompt: question_generator_prompt,
127
- llm,
144
+ prompt: questionGeneratorChainPrompt,
145
+ llm: questionGeneratorChainOptions?.llm ?? llm,
128
146
  verbose,
129
147
  });
130
148
  const instance = new this({
@@ -1,11 +1,12 @@
1
1
  import { BaseLanguageModel } from "../base_language/index.js";
2
2
  import { SerializedChatVectorDBQAChain } from "./serde.js";
3
- import { ChainValues, BaseRetriever } from "../schema/index.js";
3
+ import { ChainValues, BaseRetriever, BaseChatMessage } from "../schema/index.js";
4
4
  import { BaseChain, ChainInputs } from "./base.js";
5
5
  import { LLMChain } from "./llm_chain.js";
6
+ import { QAChainParams } from "./question_answering/load.js";
6
7
  import { CallbackManagerForChainRun } from "../callbacks/manager.js";
7
8
  export type LoadValues = Record<string, any>;
8
- export interface ConversationalRetrievalQAChainInput extends Omit<ChainInputs, "memory"> {
9
+ export interface ConversationalRetrievalQAChainInput extends ChainInputs {
9
10
  retriever: BaseRetriever;
10
11
  combineDocumentsChain: BaseChain;
11
12
  questionGeneratorChain: LLMChain;
@@ -22,6 +23,7 @@ export declare class ConversationalRetrievalQAChain extends BaseChain implements
22
23
  questionGeneratorChain: LLMChain;
23
24
  returnSourceDocuments: boolean;
24
25
  constructor(fields: ConversationalRetrievalQAChainInput);
26
+ static getChatHistoryString(chatHistory: string | BaseChatMessage[]): string;
25
27
  /** @ignore */
26
28
  _call(values: ChainValues, runManager?: CallbackManagerForChainRun): Promise<ChainValues>;
27
29
  _chainType(): string;
@@ -30,7 +32,14 @@ export declare class ConversationalRetrievalQAChain extends BaseChain implements
30
32
  static fromLLM(llm: BaseLanguageModel, retriever: BaseRetriever, options?: {
31
33
  outputKey?: string;
32
34
  returnSourceDocuments?: boolean;
35
+ /** @deprecated Pass in questionGeneratorChainOptions.template instead */
33
36
  questionGeneratorTemplate?: string;
37
+ /** @deprecated Pass in qaChainOptions.prompt instead */
34
38
  qaTemplate?: string;
39
+ qaChainOptions?: QAChainParams;
40
+ questionGeneratorChainOptions?: {
41
+ llm?: BaseLanguageModel;
42
+ template?: string;
43
+ };
35
44
  } & Omit<ConversationalRetrievalQAChainInput, "retriever" | "combineDocumentsChain" | "questionGeneratorChain">): ConversationalRetrievalQAChain;
36
45
  }
@@ -1,19 +1,13 @@
1
1
  import { PromptTemplate } from "../prompts/prompt.js";
2
2
  import { BaseChain } from "./base.js";
3
3
  import { LLMChain } from "./llm_chain.js";
4
- import { loadQAStuffChain } from "./question_answering/load.js";
4
+ import { loadQAChain } from "./question_answering/load.js";
5
5
  const question_generator_template = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
6
6
 
7
7
  Chat History:
8
8
  {chat_history}
9
9
  Follow Up Input: {question}
10
10
  Standalone question:`;
11
- const qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
12
-
13
- {context}
14
-
15
- Question: {question}
16
- Helpful Answer:`;
17
11
  export class ConversationalRetrievalQAChain extends BaseChain {
18
12
  get inputKeys() {
19
13
  return [this.inputKey, this.chatHistoryKey];
@@ -66,16 +60,34 @@ export class ConversationalRetrievalQAChain extends BaseChain {
66
60
  this.returnSourceDocuments =
67
61
  fields.returnSourceDocuments ?? this.returnSourceDocuments;
68
62
  }
63
+ static getChatHistoryString(chatHistory) {
64
+ if (Array.isArray(chatHistory)) {
65
+ return chatHistory
66
+ .map((chatMessage) => {
67
+ if (chatMessage._getType() === "human") {
68
+ return `Human: ${chatMessage.text}`;
69
+ }
70
+ else if (chatMessage._getType() === "ai") {
71
+ return `Assistant: ${chatMessage.text}`;
72
+ }
73
+ else {
74
+ return `${chatMessage.text}`;
75
+ }
76
+ })
77
+ .join("\n");
78
+ }
79
+ return chatHistory;
80
+ }
69
81
  /** @ignore */
70
82
  async _call(values, runManager) {
71
83
  if (!(this.inputKey in values)) {
72
84
  throw new Error(`Question key ${this.inputKey} not found.`);
73
85
  }
74
86
  if (!(this.chatHistoryKey in values)) {
75
- throw new Error(`chat history key ${this.inputKey} not found.`);
87
+ throw new Error(`Chat history key ${this.chatHistoryKey} not found.`);
76
88
  }
77
89
  const question = values[this.inputKey];
78
- const chatHistory = values[this.chatHistoryKey];
90
+ const chatHistory = ConversationalRetrievalQAChain.getChatHistoryString(values[this.chatHistoryKey]);
79
91
  let newQuestion = question;
80
92
  if (chatHistory.length > 0) {
81
93
  const result = await this.questionGeneratorChain.call({
@@ -115,13 +127,19 @@ export class ConversationalRetrievalQAChain extends BaseChain {
115
127
  throw new Error("Not implemented.");
116
128
  }
117
129
  static fromLLM(llm, retriever, options = {}) {
118
- const { questionGeneratorTemplate, qaTemplate, verbose, ...rest } = options;
119
- const question_generator_prompt = PromptTemplate.fromTemplate(questionGeneratorTemplate || question_generator_template);
120
- const qa_prompt = PromptTemplate.fromTemplate(qaTemplate || qa_template);
121
- const qaChain = loadQAStuffChain(llm, { prompt: qa_prompt, verbose });
130
+ const { questionGeneratorTemplate, qaTemplate, qaChainOptions = {
131
+ type: "stuff",
132
+ prompt: qaTemplate
133
+ ? PromptTemplate.fromTemplate(qaTemplate)
134
+ : undefined,
135
+ }, questionGeneratorChainOptions, verbose, ...rest } = options;
136
+ const qaChain = loadQAChain(llm, qaChainOptions);
137
+ const questionGeneratorChainPrompt = PromptTemplate.fromTemplate(questionGeneratorChainOptions?.template ??
138
+ questionGeneratorTemplate ??
139
+ question_generator_template);
122
140
  const questionGeneratorChain = new LLMChain({
123
- prompt: question_generator_prompt,
124
- llm,
141
+ prompt: questionGeneratorChainPrompt,
142
+ llm: questionGeneratorChainOptions?.llm ?? llm,
125
143
  verbose,
126
144
  });
127
145
  const instance = new this({