langchain 0.0.212 → 0.0.214
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/chains/combine_documents.cjs +1 -0
- package/chains/combine_documents.d.ts +1 -0
- package/chains/combine_documents.js +1 -0
- package/chains/history_aware_retriever.cjs +1 -0
- package/chains/history_aware_retriever.d.ts +1 -0
- package/chains/history_aware_retriever.js +1 -0
- package/chains/retrieval.cjs +1 -0
- package/chains/retrieval.d.ts +1 -0
- package/chains/retrieval.js +1 -0
- package/dist/agents/agent.cjs +1 -0
- package/dist/agents/agent.js +1 -0
- package/dist/agents/executor.cjs +21 -3
- package/dist/agents/executor.d.ts +1 -0
- package/dist/agents/executor.js +21 -3
- package/dist/agents/format_scratchpad/openai_functions.cjs +22 -1
- package/dist/agents/format_scratchpad/openai_functions.d.ts +10 -0
- package/dist/agents/format_scratchpad/openai_functions.js +21 -1
- package/dist/agents/index.cjs +11 -4
- package/dist/agents/index.d.ts +6 -3
- package/dist/agents/index.js +5 -3
- package/dist/agents/initialize.cjs +1 -1
- package/dist/agents/initialize.d.ts +1 -1
- package/dist/agents/initialize.js +1 -1
- package/dist/agents/openai/output_parser.cjs +20 -196
- package/dist/agents/openai/output_parser.d.ts +2 -111
- package/dist/agents/openai/output_parser.js +6 -193
- package/dist/agents/{openai → openai_functions}/index.cjs +80 -2
- package/dist/agents/{openai → openai_functions}/index.d.ts +77 -3
- package/dist/agents/{openai → openai_functions}/index.js +78 -1
- package/dist/agents/openai_functions/output_parser.cjs +102 -0
- package/dist/agents/openai_functions/output_parser.d.ts +56 -0
- package/dist/agents/openai_functions/output_parser.js +98 -0
- package/dist/agents/openai_tools/index.cjs +83 -0
- package/dist/agents/openai_tools/index.d.ts +82 -0
- package/dist/agents/openai_tools/index.js +79 -0
- package/dist/agents/openai_tools/output_parser.cjs +102 -0
- package/dist/agents/openai_tools/output_parser.d.ts +57 -0
- package/dist/agents/openai_tools/output_parser.js +98 -0
- package/dist/agents/react/index.cjs +77 -0
- package/dist/agents/react/index.d.ts +62 -0
- package/dist/agents/react/index.js +73 -0
- package/dist/agents/react/output_parser.cjs +0 -1
- package/dist/agents/react/output_parser.d.ts +0 -1
- package/dist/agents/react/output_parser.js +0 -1
- package/dist/agents/structured_chat/index.cjs +87 -1
- package/dist/agents/structured_chat/index.d.ts +73 -0
- package/dist/agents/structured_chat/index.js +85 -0
- package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.cjs +1 -1
- package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.js +1 -1
- package/dist/agents/toolkits/conversational_retrieval/tool.cjs +1 -0
- package/dist/agents/toolkits/conversational_retrieval/tool.d.ts +1 -0
- package/dist/agents/toolkits/conversational_retrieval/tool.js +1 -0
- package/dist/agents/toolkits/json/json.cjs +2 -0
- package/dist/agents/toolkits/json/json.d.ts +2 -0
- package/dist/agents/toolkits/json/json.js +2 -0
- package/dist/agents/toolkits/openapi/openapi.cjs +2 -0
- package/dist/agents/toolkits/openapi/openapi.d.ts +2 -0
- package/dist/agents/toolkits/openapi/openapi.js +2 -0
- package/dist/agents/toolkits/vectorstore/vectorstore.cjs +2 -0
- package/dist/agents/toolkits/vectorstore/vectorstore.d.ts +2 -0
- package/dist/agents/toolkits/vectorstore/vectorstore.js +2 -0
- package/dist/agents/xml/index.cjs +77 -1
- package/dist/agents/xml/index.d.ts +67 -0
- package/dist/agents/xml/index.js +75 -0
- package/dist/callbacks/index.cjs +1 -4
- package/dist/callbacks/index.d.ts +1 -2
- package/dist/callbacks/index.js +1 -2
- package/dist/chains/combine_documents/base.cjs +16 -0
- package/dist/chains/combine_documents/base.d.ts +13 -0
- package/dist/chains/combine_documents/base.js +12 -0
- package/dist/chains/combine_documents/index.cjs +5 -0
- package/dist/chains/combine_documents/index.d.ts +1 -0
- package/dist/chains/combine_documents/index.js +1 -0
- package/dist/chains/combine_documents/reduce.cjs +5 -2
- package/dist/chains/combine_documents/reduce.js +4 -1
- package/dist/chains/combine_documents/stuff.cjs +42 -0
- package/dist/chains/combine_documents/stuff.d.ts +28 -0
- package/dist/chains/combine_documents/stuff.js +38 -0
- package/dist/chains/conversational_retrieval_chain.cjs +3 -3
- package/dist/chains/conversational_retrieval_chain.js +1 -1
- package/dist/chains/history_aware_retriever.cjs +55 -0
- package/dist/chains/history_aware_retriever.d.ts +55 -0
- package/dist/chains/history_aware_retriever.js +51 -0
- package/dist/chains/openai_functions/structured_output.cjs +63 -21
- package/dist/chains/openai_functions/structured_output.d.ts +25 -17
- package/dist/chains/openai_functions/structured_output.js +62 -20
- package/dist/chains/retrieval.cjs +60 -0
- package/dist/chains/retrieval.d.ts +65 -0
- package/dist/chains/retrieval.js +56 -0
- package/dist/experimental/autogpt/prompt.cjs +1 -1
- package/dist/experimental/autogpt/prompt.d.ts +1 -1
- package/dist/experimental/autogpt/prompt.js +1 -1
- package/dist/load/import_map.cjs +7 -3
- package/dist/load/import_map.d.ts +4 -0
- package/dist/load/import_map.js +4 -0
- package/dist/output_parsers/json.cjs +2 -78
- package/dist/output_parsers/json.d.ts +1 -1
- package/dist/output_parsers/json.js +1 -77
- package/dist/output_parsers/openai_functions.d.ts +1 -1
- package/dist/retrievers/multi_vector.cjs +11 -2
- package/dist/retrievers/multi_vector.d.ts +5 -3
- package/dist/retrievers/multi_vector.js +11 -2
- package/dist/retrievers/parent_document.cjs +1 -2
- package/dist/retrievers/parent_document.d.ts +1 -1
- package/dist/retrievers/parent_document.js +1 -2
- package/dist/retrievers/remote/chatgpt-plugin.cjs +5 -4
- package/dist/retrievers/remote/chatgpt-plugin.d.ts +5 -2
- package/dist/retrievers/remote/chatgpt-plugin.js +3 -2
- package/dist/retrievers/remote/index.cjs +2 -2
- package/dist/retrievers/remote/index.d.ts +1 -1
- package/dist/retrievers/remote/index.js +1 -1
- package/dist/retrievers/remote/remote-retriever.cjs +3 -2
- package/dist/retrievers/remote/remote-retriever.d.ts +3 -1
- package/dist/retrievers/remote/remote-retriever.js +2 -1
- package/dist/retrievers/vespa.cjs +15 -78
- package/dist/retrievers/vespa.d.ts +1 -54
- package/dist/retrievers/vespa.js +1 -76
- package/dist/schema/runnable/config.d.ts +1 -1
- package/dist/tools/retriever.cjs +17 -0
- package/dist/tools/retriever.d.ts +10 -0
- package/dist/tools/retriever.js +13 -0
- package/dist/util/entrypoint_deprecation.cjs +18 -0
- package/dist/util/entrypoint_deprecation.d.ts +5 -0
- package/dist/util/entrypoint_deprecation.js +14 -0
- package/package.json +36 -4
- package/tools/retriever.cjs +1 -0
- package/tools/retriever.d.ts +1 -0
- package/tools/retriever.js +1 -0
- package/dist/callbacks/handlers/tracer_langchain_v1.cjs +0 -17
- package/dist/callbacks/handlers/tracer_langchain_v1.d.ts +0 -1
- package/dist/callbacks/handlers/tracer_langchain_v1.js +0 -1
- package/dist/retrievers/remote/base.cjs +0 -68
- package/dist/retrievers/remote/base.d.ts +0 -60
- package/dist/retrievers/remote/base.js +0 -64
- /package/dist/agents/{openai → openai_functions}/prompt.cjs +0 -0
- /package/dist/agents/{openai → openai_functions}/prompt.d.ts +0 -0
- /package/dist/agents/{openai → openai_functions}/prompt.js +0 -0
|
@@ -42,6 +42,8 @@ class JsonToolkit extends base_js_1.Toolkit {
|
|
|
42
42
|
}
|
|
43
43
|
exports.JsonToolkit = JsonToolkit;
|
|
44
44
|
/**
|
|
45
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
46
|
+
*
|
|
45
47
|
* Creates a JSON agent using a language model, a JSON toolkit, and
|
|
46
48
|
* optional prompt arguments. It creates a prompt for the agent using the
|
|
47
49
|
* JSON tools and the provided prefix and suffix. It then creates a
|
|
@@ -22,6 +22,8 @@ export declare class JsonToolkit extends Toolkit {
|
|
|
22
22
|
constructor(jsonSpec: JsonSpec);
|
|
23
23
|
}
|
|
24
24
|
/**
|
|
25
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
26
|
+
*
|
|
25
27
|
* Creates a JSON agent using a language model, a JSON toolkit, and
|
|
26
28
|
* optional prompt arguments. It creates a prompt for the agent using the
|
|
27
29
|
* JSON tools and the provided prefix and suffix. It then creates a
|
|
@@ -38,6 +38,8 @@ export class JsonToolkit extends Toolkit {
|
|
|
38
38
|
}
|
|
39
39
|
}
|
|
40
40
|
/**
|
|
41
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
42
|
+
*
|
|
41
43
|
* Creates a JSON agent using a language model, a JSON toolkit, and
|
|
42
44
|
* optional prompt arguments. It creates a prompt for the agent using the
|
|
43
45
|
* JSON tools and the provided prefix and suffix. It then creates a
|
|
@@ -69,6 +69,8 @@ class OpenApiToolkit extends RequestsToolkit {
|
|
|
69
69
|
}
|
|
70
70
|
exports.OpenApiToolkit = OpenApiToolkit;
|
|
71
71
|
/**
|
|
72
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
73
|
+
*
|
|
72
74
|
* Creates an OpenAPI agent using a language model, an OpenAPI toolkit,
|
|
73
75
|
* and optional prompt arguments. It creates a prompt for the agent using
|
|
74
76
|
* the OpenAPI tools and the provided prefix and suffix. It then creates a
|
|
@@ -41,6 +41,8 @@ export declare class OpenApiToolkit extends RequestsToolkit {
|
|
|
41
41
|
constructor(jsonSpec: JsonSpec, llm: BaseLanguageModelInterface, headers?: Headers);
|
|
42
42
|
}
|
|
43
43
|
/**
|
|
44
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
45
|
+
*
|
|
44
46
|
* Creates an OpenAPI agent using a language model, an OpenAPI toolkit,
|
|
45
47
|
* and optional prompt arguments. It creates a prompt for the agent using
|
|
46
48
|
* the OpenAPI tools and the provided prefix and suffix. It then creates a
|
|
@@ -64,6 +64,8 @@ export class OpenApiToolkit extends RequestsToolkit {
|
|
|
64
64
|
}
|
|
65
65
|
}
|
|
66
66
|
/**
|
|
67
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
68
|
+
*
|
|
67
69
|
* Creates an OpenAPI agent using a language model, an OpenAPI toolkit,
|
|
68
70
|
* and optional prompt arguments. It creates a prompt for the agent using
|
|
69
71
|
* the OpenAPI tools and the provided prefix and suffix. It then creates a
|
|
@@ -93,6 +93,7 @@ class VectorStoreRouterToolkit extends base_js_1.Toolkit {
|
|
|
93
93
|
}
|
|
94
94
|
}
|
|
95
95
|
exports.VectorStoreRouterToolkit = VectorStoreRouterToolkit;
|
|
96
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
96
97
|
function createVectorStoreAgent(llm, toolkit, args) {
|
|
97
98
|
const { prefix = prompt_js_1.VECTOR_PREFIX, suffix = prompt_js_2.SUFFIX, inputVariables = ["input", "agent_scratchpad"], } = args ?? {};
|
|
98
99
|
const { tools } = toolkit;
|
|
@@ -113,6 +114,7 @@ function createVectorStoreAgent(llm, toolkit, args) {
|
|
|
113
114
|
});
|
|
114
115
|
}
|
|
115
116
|
exports.createVectorStoreAgent = createVectorStoreAgent;
|
|
117
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
116
118
|
function createVectorStoreRouterAgent(llm, toolkit, args) {
|
|
117
119
|
const { prefix = prompt_js_1.VECTOR_ROUTER_PREFIX, suffix = prompt_js_2.SUFFIX, inputVariables = ["input", "agent_scratchpad"], } = args ?? {};
|
|
118
120
|
const { tools } = toolkit;
|
|
@@ -50,5 +50,7 @@ export declare class VectorStoreRouterToolkit extends Toolkit {
|
|
|
50
50
|
llm: BaseLanguageModelInterface;
|
|
51
51
|
constructor(vectorStoreInfos: VectorStoreInfo[], llm: BaseLanguageModelInterface);
|
|
52
52
|
}
|
|
53
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
53
54
|
export declare function createVectorStoreAgent(llm: BaseLanguageModelInterface, toolkit: VectorStoreToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor;
|
|
55
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
54
56
|
export declare function createVectorStoreRouterAgent(llm: BaseLanguageModelInterface, toolkit: VectorStoreRouterToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor;
|
|
@@ -88,6 +88,7 @@ export class VectorStoreRouterToolkit extends Toolkit {
|
|
|
88
88
|
});
|
|
89
89
|
}
|
|
90
90
|
}
|
|
91
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
91
92
|
export function createVectorStoreAgent(llm, toolkit, args) {
|
|
92
93
|
const { prefix = VECTOR_PREFIX, suffix = SUFFIX, inputVariables = ["input", "agent_scratchpad"], } = args ?? {};
|
|
93
94
|
const { tools } = toolkit;
|
|
@@ -107,6 +108,7 @@ export function createVectorStoreAgent(llm, toolkit, args) {
|
|
|
107
108
|
returnIntermediateSteps: true,
|
|
108
109
|
});
|
|
109
110
|
}
|
|
111
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
110
112
|
export function createVectorStoreRouterAgent(llm, toolkit, args) {
|
|
111
113
|
const { prefix = VECTOR_ROUTER_PREFIX, suffix = SUFFIX, inputVariables = ["input", "agent_scratchpad"], } = args ?? {};
|
|
112
114
|
const { tools } = toolkit;
|
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.XMLAgent = void 0;
|
|
3
|
+
exports.createXmlAgent = exports.XMLAgent = void 0;
|
|
4
|
+
const runnables_1 = require("@langchain/core/runnables");
|
|
4
5
|
const llm_chain_js_1 = require("../../chains/llm_chain.cjs");
|
|
5
6
|
const chat_js_1 = require("../../prompts/chat.cjs");
|
|
6
7
|
const agent_js_1 = require("../agent.cjs");
|
|
7
8
|
const prompt_js_1 = require("./prompt.cjs");
|
|
8
9
|
const output_parser_js_1 = require("./output_parser.cjs");
|
|
10
|
+
const render_js_1 = require("../../tools/render.cjs");
|
|
11
|
+
const xml_js_1 = require("../format_scratchpad/xml.cjs");
|
|
9
12
|
/**
|
|
10
13
|
* Class that represents an agent that uses XML tags.
|
|
11
14
|
*/
|
|
@@ -101,3 +104,76 @@ class XMLAgent extends agent_js_1.BaseSingleActionAgent {
|
|
|
101
104
|
}
|
|
102
105
|
}
|
|
103
106
|
exports.XMLAgent = XMLAgent;
|
|
107
|
+
/**
|
|
108
|
+
* Create an agent that uses XML to format its logic.
|
|
109
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
110
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
111
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
112
|
+
* AgentAction or AgentFinish.
|
|
113
|
+
*
|
|
114
|
+
* @example
|
|
115
|
+
* ```typescript
|
|
116
|
+
* import { AgentExecutor, createXmlAgent } from "langchain/agents";
|
|
117
|
+
* import { pull } from "langchain/hub";
|
|
118
|
+
* import type { PromptTemplate } from "@langchain/core/prompts";
|
|
119
|
+
*
|
|
120
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
121
|
+
*
|
|
122
|
+
* // Define the tools the agent will have access to.
|
|
123
|
+
* const tools = [...];
|
|
124
|
+
*
|
|
125
|
+
* // Get the prompt to use - you can modify this!
|
|
126
|
+
* // If you want to see the prompt in full, you can at:
|
|
127
|
+
* // https://smith.langchain.com/hub/hwchase17/xml-agent-convo
|
|
128
|
+
* const prompt = await pull<PromptTemplate>("hwchase17/xml-agent-convo");
|
|
129
|
+
*
|
|
130
|
+
* const llm = new ChatAnthropic({
|
|
131
|
+
* temperature: 0,
|
|
132
|
+
* });
|
|
133
|
+
*
|
|
134
|
+
* const agent = await createXmlAgent({
|
|
135
|
+
* llm,
|
|
136
|
+
* tools,
|
|
137
|
+
* prompt,
|
|
138
|
+
* });
|
|
139
|
+
*
|
|
140
|
+
* const agentExecutor = new AgentExecutor({
|
|
141
|
+
* agent,
|
|
142
|
+
* tools,
|
|
143
|
+
* });
|
|
144
|
+
*
|
|
145
|
+
* const result = await agentExecutor.invoke({
|
|
146
|
+
* input: "what is LangChain?",
|
|
147
|
+
* });
|
|
148
|
+
*
|
|
149
|
+
* // With chat history
|
|
150
|
+
* const result2 = await agentExecutor.invoke({
|
|
151
|
+
* input: "what's my name?",
|
|
152
|
+
* // Notice that chat_history is a string, since this prompt is aimed at LLMs, not chat models
|
|
153
|
+
* chat_history: "Human: Hi! My name is Cob\nAI: Hello Cob! Nice to meet you",
|
|
154
|
+
* });
|
|
155
|
+
* ```
|
|
156
|
+
*/
|
|
157
|
+
async function createXmlAgent({ llm, tools, prompt, }) {
|
|
158
|
+
const missingVariables = ["tools", "agent_scratchpad"].filter((v) => !prompt.inputVariables.includes(v));
|
|
159
|
+
if (missingVariables.length > 0) {
|
|
160
|
+
throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`);
|
|
161
|
+
}
|
|
162
|
+
const partialedPrompt = await prompt.partial({
|
|
163
|
+
tools: (0, render_js_1.renderTextDescription)(tools),
|
|
164
|
+
});
|
|
165
|
+
// TODO: Add .bind to core runnable interface.
|
|
166
|
+
const llmWithStop = llm.bind({
|
|
167
|
+
stop: ["</tool_input>", "</final_answer>"],
|
|
168
|
+
});
|
|
169
|
+
const agent = runnables_1.RunnableSequence.from([
|
|
170
|
+
runnables_1.RunnablePassthrough.assign({
|
|
171
|
+
agent_scratchpad: (input) => (0, xml_js_1.formatXml)(input.steps),
|
|
172
|
+
}),
|
|
173
|
+
partialedPrompt,
|
|
174
|
+
llmWithStop,
|
|
175
|
+
new output_parser_js_1.XMLAgentOutputParser(),
|
|
176
|
+
]);
|
|
177
|
+
return agent;
|
|
178
|
+
}
|
|
179
|
+
exports.createXmlAgent = createXmlAgent;
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
|
|
2
2
|
import type { ToolInterface } from "@langchain/core/tools";
|
|
3
|
+
import { RunnableSequence } from "@langchain/core/runnables";
|
|
4
|
+
import type { BasePromptTemplate } from "@langchain/core/prompts";
|
|
3
5
|
import { LLMChain } from "../../chains/llm_chain.js";
|
|
4
6
|
import { AgentStep, AgentAction, AgentFinish, ChainValues } from "../../schema/index.js";
|
|
5
7
|
import { ChatPromptTemplate } from "../../prompts/chat.js";
|
|
@@ -44,3 +46,68 @@ export declare class XMLAgent extends BaseSingleActionAgent implements XMLAgentI
|
|
|
44
46
|
*/
|
|
45
47
|
static fromLLMAndTools(llm: BaseLanguageModelInterface, tools: ToolInterface[], args?: XMLAgentInput & Pick<AgentArgs, "callbacks">): XMLAgent;
|
|
46
48
|
}
|
|
49
|
+
/**
|
|
50
|
+
* Params used by the createXmlAgent function.
|
|
51
|
+
*/
|
|
52
|
+
export type CreateXmlAgentParams = {
|
|
53
|
+
/** LLM to use for the agent. */
|
|
54
|
+
llm: BaseLanguageModelInterface;
|
|
55
|
+
/** Tools this agent has access to. */
|
|
56
|
+
tools: ToolInterface[];
|
|
57
|
+
/**
|
|
58
|
+
* The prompt to use. Must have input keys for
|
|
59
|
+
* `tools` and `agent_scratchpad`.
|
|
60
|
+
*/
|
|
61
|
+
prompt: BasePromptTemplate;
|
|
62
|
+
};
|
|
63
|
+
/**
|
|
64
|
+
* Create an agent that uses XML to format its logic.
|
|
65
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
66
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
67
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
68
|
+
* AgentAction or AgentFinish.
|
|
69
|
+
*
|
|
70
|
+
* @example
|
|
71
|
+
* ```typescript
|
|
72
|
+
* import { AgentExecutor, createXmlAgent } from "langchain/agents";
|
|
73
|
+
* import { pull } from "langchain/hub";
|
|
74
|
+
* import type { PromptTemplate } from "@langchain/core/prompts";
|
|
75
|
+
*
|
|
76
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
77
|
+
*
|
|
78
|
+
* // Define the tools the agent will have access to.
|
|
79
|
+
* const tools = [...];
|
|
80
|
+
*
|
|
81
|
+
* // Get the prompt to use - you can modify this!
|
|
82
|
+
* // If you want to see the prompt in full, you can at:
|
|
83
|
+
* // https://smith.langchain.com/hub/hwchase17/xml-agent-convo
|
|
84
|
+
* const prompt = await pull<PromptTemplate>("hwchase17/xml-agent-convo");
|
|
85
|
+
*
|
|
86
|
+
* const llm = new ChatAnthropic({
|
|
87
|
+
* temperature: 0,
|
|
88
|
+
* });
|
|
89
|
+
*
|
|
90
|
+
* const agent = await createXmlAgent({
|
|
91
|
+
* llm,
|
|
92
|
+
* tools,
|
|
93
|
+
* prompt,
|
|
94
|
+
* });
|
|
95
|
+
*
|
|
96
|
+
* const agentExecutor = new AgentExecutor({
|
|
97
|
+
* agent,
|
|
98
|
+
* tools,
|
|
99
|
+
* });
|
|
100
|
+
*
|
|
101
|
+
* const result = await agentExecutor.invoke({
|
|
102
|
+
* input: "what is LangChain?",
|
|
103
|
+
* });
|
|
104
|
+
*
|
|
105
|
+
* // With chat history
|
|
106
|
+
* const result2 = await agentExecutor.invoke({
|
|
107
|
+
* input: "what's my name?",
|
|
108
|
+
* // Notice that chat_history is a string, since this prompt is aimed at LLMs, not chat models
|
|
109
|
+
* chat_history: "Human: Hi! My name is Cob\nAI: Hello Cob! Nice to meet you",
|
|
110
|
+
* });
|
|
111
|
+
* ```
|
|
112
|
+
*/
|
|
113
|
+
export declare function createXmlAgent({ llm, tools, prompt, }: CreateXmlAgentParams): Promise<RunnableSequence<Record<string, unknown>, AgentAction | AgentFinish>>;
|
package/dist/agents/xml/index.js
CHANGED
|
@@ -1,8 +1,11 @@
|
|
|
1
|
+
import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
|
|
1
2
|
import { LLMChain } from "../../chains/llm_chain.js";
|
|
2
3
|
import { AIMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, } from "../../prompts/chat.js";
|
|
3
4
|
import { BaseSingleActionAgent } from "../agent.js";
|
|
4
5
|
import { AGENT_INSTRUCTIONS } from "./prompt.js";
|
|
5
6
|
import { XMLAgentOutputParser } from "./output_parser.js";
|
|
7
|
+
import { renderTextDescription } from "../../tools/render.js";
|
|
8
|
+
import { formatXml } from "../format_scratchpad/xml.js";
|
|
6
9
|
/**
|
|
7
10
|
* Class that represents an agent that uses XML tags.
|
|
8
11
|
*/
|
|
@@ -97,3 +100,75 @@ export class XMLAgent extends BaseSingleActionAgent {
|
|
|
97
100
|
});
|
|
98
101
|
}
|
|
99
102
|
}
|
|
103
|
+
/**
|
|
104
|
+
* Create an agent that uses XML to format its logic.
|
|
105
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
106
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
107
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
108
|
+
* AgentAction or AgentFinish.
|
|
109
|
+
*
|
|
110
|
+
* @example
|
|
111
|
+
* ```typescript
|
|
112
|
+
* import { AgentExecutor, createXmlAgent } from "langchain/agents";
|
|
113
|
+
* import { pull } from "langchain/hub";
|
|
114
|
+
* import type { PromptTemplate } from "@langchain/core/prompts";
|
|
115
|
+
*
|
|
116
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
117
|
+
*
|
|
118
|
+
* // Define the tools the agent will have access to.
|
|
119
|
+
* const tools = [...];
|
|
120
|
+
*
|
|
121
|
+
* // Get the prompt to use - you can modify this!
|
|
122
|
+
* // If you want to see the prompt in full, you can at:
|
|
123
|
+
* // https://smith.langchain.com/hub/hwchase17/xml-agent-convo
|
|
124
|
+
* const prompt = await pull<PromptTemplate>("hwchase17/xml-agent-convo");
|
|
125
|
+
*
|
|
126
|
+
* const llm = new ChatAnthropic({
|
|
127
|
+
* temperature: 0,
|
|
128
|
+
* });
|
|
129
|
+
*
|
|
130
|
+
* const agent = await createXmlAgent({
|
|
131
|
+
* llm,
|
|
132
|
+
* tools,
|
|
133
|
+
* prompt,
|
|
134
|
+
* });
|
|
135
|
+
*
|
|
136
|
+
* const agentExecutor = new AgentExecutor({
|
|
137
|
+
* agent,
|
|
138
|
+
* tools,
|
|
139
|
+
* });
|
|
140
|
+
*
|
|
141
|
+
* const result = await agentExecutor.invoke({
|
|
142
|
+
* input: "what is LangChain?",
|
|
143
|
+
* });
|
|
144
|
+
*
|
|
145
|
+
* // With chat history
|
|
146
|
+
* const result2 = await agentExecutor.invoke({
|
|
147
|
+
* input: "what's my name?",
|
|
148
|
+
* // Notice that chat_history is a string, since this prompt is aimed at LLMs, not chat models
|
|
149
|
+
* chat_history: "Human: Hi! My name is Cob\nAI: Hello Cob! Nice to meet you",
|
|
150
|
+
* });
|
|
151
|
+
* ```
|
|
152
|
+
*/
|
|
153
|
+
export async function createXmlAgent({ llm, tools, prompt, }) {
|
|
154
|
+
const missingVariables = ["tools", "agent_scratchpad"].filter((v) => !prompt.inputVariables.includes(v));
|
|
155
|
+
if (missingVariables.length > 0) {
|
|
156
|
+
throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`);
|
|
157
|
+
}
|
|
158
|
+
const partialedPrompt = await prompt.partial({
|
|
159
|
+
tools: renderTextDescription(tools),
|
|
160
|
+
});
|
|
161
|
+
// TODO: Add .bind to core runnable interface.
|
|
162
|
+
const llmWithStop = llm.bind({
|
|
163
|
+
stop: ["</tool_input>", "</final_answer>"],
|
|
164
|
+
});
|
|
165
|
+
const agent = RunnableSequence.from([
|
|
166
|
+
RunnablePassthrough.assign({
|
|
167
|
+
agent_scratchpad: (input) => formatXml(input.steps),
|
|
168
|
+
}),
|
|
169
|
+
partialedPrompt,
|
|
170
|
+
llmWithStop,
|
|
171
|
+
new XMLAgentOutputParser(),
|
|
172
|
+
]);
|
|
173
|
+
return agent;
|
|
174
|
+
}
|
package/dist/callbacks/index.cjs
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.consumeCallback = exports.awaitAllCallbacks = exports.traceAsGroup = exports.TraceGroup = exports.CallbackManagerForToolRun = exports.CallbackManagerForLLMRun = exports.CallbackManagerForChainRun = exports.CallbackManagerForRetrieverRun = exports.CallbackManager = exports.getTracingV2CallbackHandler = exports.
|
|
3
|
+
exports.consumeCallback = exports.awaitAllCallbacks = exports.traceAsGroup = exports.TraceGroup = exports.CallbackManagerForToolRun = exports.CallbackManagerForLLMRun = exports.CallbackManagerForChainRun = exports.CallbackManagerForRetrieverRun = exports.CallbackManager = exports.getTracingV2CallbackHandler = exports.LangChainTracer = exports.RunCollectorCallbackHandler = exports.ConsoleCallbackHandler = exports.BaseTracer = exports.BaseCallbackHandler = void 0;
|
|
4
4
|
var base_js_1 = require("./base.cjs");
|
|
5
5
|
Object.defineProperty(exports, "BaseCallbackHandler", { enumerable: true, get: function () { return base_js_1.BaseCallbackHandler; } });
|
|
6
6
|
var tracer_js_1 = require("./handlers/tracer.cjs");
|
|
@@ -11,10 +11,7 @@ var run_collector_js_1 = require("./handlers/run_collector.cjs");
|
|
|
11
11
|
Object.defineProperty(exports, "RunCollectorCallbackHandler", { enumerable: true, get: function () { return run_collector_js_1.RunCollectorCallbackHandler; } });
|
|
12
12
|
var tracer_langchain_js_1 = require("./handlers/tracer_langchain.cjs");
|
|
13
13
|
Object.defineProperty(exports, "LangChainTracer", { enumerable: true, get: function () { return tracer_langchain_js_1.LangChainTracer; } });
|
|
14
|
-
var tracer_langchain_v1_js_1 = require("./handlers/tracer_langchain_v1.cjs");
|
|
15
|
-
Object.defineProperty(exports, "LangChainTracerV1", { enumerable: true, get: function () { return tracer_langchain_v1_js_1.LangChainTracerV1; } });
|
|
16
14
|
var initialize_js_1 = require("./handlers/initialize.cjs");
|
|
17
|
-
Object.defineProperty(exports, "getTracingCallbackHandler", { enumerable: true, get: function () { return initialize_js_1.getTracingCallbackHandler; } });
|
|
18
15
|
Object.defineProperty(exports, "getTracingV2CallbackHandler", { enumerable: true, get: function () { return initialize_js_1.getTracingV2CallbackHandler; } });
|
|
19
16
|
var manager_js_1 = require("./manager.cjs");
|
|
20
17
|
Object.defineProperty(exports, "CallbackManager", { enumerable: true, get: function () { return manager_js_1.CallbackManager; } });
|
|
@@ -3,7 +3,6 @@ export { type Run, type RunType, BaseTracer } from "./handlers/tracer.js";
|
|
|
3
3
|
export { ConsoleCallbackHandler } from "./handlers/console.js";
|
|
4
4
|
export { RunCollectorCallbackHandler } from "./handlers/run_collector.js";
|
|
5
5
|
export { LangChainTracer } from "./handlers/tracer_langchain.js";
|
|
6
|
-
export {
|
|
7
|
-
export { getTracingCallbackHandler, getTracingV2CallbackHandler, } from "./handlers/initialize.js";
|
|
6
|
+
export { getTracingV2CallbackHandler } from "./handlers/initialize.js";
|
|
8
7
|
export { CallbackManager, CallbackManagerForRetrieverRun, CallbackManagerForChainRun, CallbackManagerForLLMRun, CallbackManagerForToolRun, type CallbackManagerOptions, type Callbacks, type BaseCallbackConfig, TraceGroup, traceAsGroup, } from "./manager.js";
|
|
9
8
|
export { awaitAllCallbacks, consumeCallback } from "./promises.js";
|
package/dist/callbacks/index.js
CHANGED
|
@@ -3,7 +3,6 @@ export { BaseTracer } from "./handlers/tracer.js";
|
|
|
3
3
|
export { ConsoleCallbackHandler } from "./handlers/console.js";
|
|
4
4
|
export { RunCollectorCallbackHandler } from "./handlers/run_collector.js";
|
|
5
5
|
export { LangChainTracer } from "./handlers/tracer_langchain.js";
|
|
6
|
-
export {
|
|
7
|
-
export { getTracingCallbackHandler, getTracingV2CallbackHandler, } from "./handlers/initialize.js";
|
|
6
|
+
export { getTracingV2CallbackHandler } from "./handlers/initialize.js";
|
|
8
7
|
export { CallbackManager, CallbackManagerForRetrieverRun, CallbackManagerForChainRun, CallbackManagerForLLMRun, CallbackManagerForToolRun, TraceGroup, traceAsGroup, } from "./manager.js";
|
|
9
8
|
export { awaitAllCallbacks, consumeCallback } from "./promises.js";
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.formatDocuments = exports.DEFAULT_DOCUMENT_PROMPT = exports.INTERMEDIATE_STEPS_KEY = exports.DOCUMENTS_KEY = exports.DEFAULT_DOCUMENT_SEPARATOR = void 0;
|
|
4
|
+
const prompts_1 = require("@langchain/core/prompts");
|
|
5
|
+
exports.DEFAULT_DOCUMENT_SEPARATOR = "\n\n";
|
|
6
|
+
exports.DOCUMENTS_KEY = "context";
|
|
7
|
+
exports.INTERMEDIATE_STEPS_KEY = "intermediate_steps";
|
|
8
|
+
exports.DEFAULT_DOCUMENT_PROMPT =
|
|
9
|
+
/* #__PURE__ */ prompts_1.PromptTemplate.fromTemplate("{page_content}");
|
|
10
|
+
async function formatDocuments({ documentPrompt, documentSeparator, documents, config, }) {
|
|
11
|
+
const formattedDocs = await Promise.all(documents.map((document) => documentPrompt
|
|
12
|
+
.withConfig({ runName: "document_formatter" })
|
|
13
|
+
.invoke({ page_content: document.pageContent }, config)));
|
|
14
|
+
return formattedDocs.join(documentSeparator);
|
|
15
|
+
}
|
|
16
|
+
exports.formatDocuments = formatDocuments;
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { Document } from "@langchain/core/documents";
|
|
2
|
+
import { BasePromptTemplate, PromptTemplate } from "@langchain/core/prompts";
|
|
3
|
+
import { RunnableConfig } from "@langchain/core/runnables";
|
|
4
|
+
export declare const DEFAULT_DOCUMENT_SEPARATOR = "\n\n";
|
|
5
|
+
export declare const DOCUMENTS_KEY = "context";
|
|
6
|
+
export declare const INTERMEDIATE_STEPS_KEY = "intermediate_steps";
|
|
7
|
+
export declare const DEFAULT_DOCUMENT_PROMPT: PromptTemplate<import("@langchain/core/prompts").ParamsFromFString<"{page_content}">, any>;
|
|
8
|
+
export declare function formatDocuments({ documentPrompt, documentSeparator, documents, config, }: {
|
|
9
|
+
documentPrompt: BasePromptTemplate;
|
|
10
|
+
documentSeparator: string;
|
|
11
|
+
documents: Document[];
|
|
12
|
+
config?: RunnableConfig;
|
|
13
|
+
}): Promise<string>;
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { PromptTemplate } from "@langchain/core/prompts";
|
|
2
|
+
export const DEFAULT_DOCUMENT_SEPARATOR = "\n\n";
|
|
3
|
+
export const DOCUMENTS_KEY = "context";
|
|
4
|
+
export const INTERMEDIATE_STEPS_KEY = "intermediate_steps";
|
|
5
|
+
export const DEFAULT_DOCUMENT_PROMPT =
|
|
6
|
+
/* #__PURE__ */ PromptTemplate.fromTemplate("{page_content}");
|
|
7
|
+
export async function formatDocuments({ documentPrompt, documentSeparator, documents, config, }) {
|
|
8
|
+
const formattedDocs = await Promise.all(documents.map((document) => documentPrompt
|
|
9
|
+
.withConfig({ runName: "document_formatter" })
|
|
10
|
+
.invoke({ page_content: document.pageContent }, config)));
|
|
11
|
+
return formattedDocs.join(documentSeparator);
|
|
12
|
+
}
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createStuffDocumentsChain = void 0;
|
|
4
|
+
var stuff_js_1 = require("./stuff.cjs");
|
|
5
|
+
Object.defineProperty(exports, "createStuffDocumentsChain", { enumerable: true, get: function () { return stuff_js_1.createStuffDocumentsChain; } });
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export { createStuffDocumentsChain } from "./stuff.js";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export { createStuffDocumentsChain } from "./stuff.js";
|
|
@@ -48,6 +48,10 @@ exports.splitListOfDocs = splitListOfDocs;
|
|
|
48
48
|
*/
|
|
49
49
|
async function collapseDocs(docs, combineDocumentFunc) {
|
|
50
50
|
const result = await combineDocumentFunc(docs);
|
|
51
|
+
return { pageContent: result, metadata: collapseDocsMetadata(docs) };
|
|
52
|
+
}
|
|
53
|
+
exports.collapseDocs = collapseDocs;
|
|
54
|
+
function collapseDocsMetadata(docs) {
|
|
51
55
|
const combinedMetadata = {};
|
|
52
56
|
for (const key in docs[0].metadata) {
|
|
53
57
|
if (key in docs[0].metadata) {
|
|
@@ -64,6 +68,5 @@ async function collapseDocs(docs, combineDocumentFunc) {
|
|
|
64
68
|
}
|
|
65
69
|
}
|
|
66
70
|
}
|
|
67
|
-
return
|
|
71
|
+
return combinedMetadata;
|
|
68
72
|
}
|
|
69
|
-
exports.collapseDocs = collapseDocs;
|
|
@@ -44,6 +44,9 @@ lengthFunc, tokenMax) {
|
|
|
44
44
|
*/
|
|
45
45
|
export async function collapseDocs(docs, combineDocumentFunc) {
|
|
46
46
|
const result = await combineDocumentFunc(docs);
|
|
47
|
+
return { pageContent: result, metadata: collapseDocsMetadata(docs) };
|
|
48
|
+
}
|
|
49
|
+
function collapseDocsMetadata(docs) {
|
|
47
50
|
const combinedMetadata = {};
|
|
48
51
|
for (const key in docs[0].metadata) {
|
|
49
52
|
if (key in docs[0].metadata) {
|
|
@@ -60,5 +63,5 @@ export async function collapseDocs(docs, combineDocumentFunc) {
|
|
|
60
63
|
}
|
|
61
64
|
}
|
|
62
65
|
}
|
|
63
|
-
return
|
|
66
|
+
return combinedMetadata;
|
|
64
67
|
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createStuffDocumentsChain = void 0;
|
|
4
|
+
const output_parsers_1 = require("@langchain/core/output_parsers");
|
|
5
|
+
const runnables_1 = require("@langchain/core/runnables");
|
|
6
|
+
const base_js_1 = require("./base.cjs");
|
|
7
|
+
/**
|
|
8
|
+
* Create a chain that passes a list of documents to a model.
|
|
9
|
+
*
|
|
10
|
+
* @param llm Language model to use for responding.
|
|
11
|
+
* @param prompt Prompt template. Must contain input variable "context", which will be
|
|
12
|
+
used for passing in the formatted documents.
|
|
13
|
+
* @param outputParser Output parser. Defaults to `StringOutputParser`.
|
|
14
|
+
* @param documentPrompt Prompt used for formatting each document into a string. Input
|
|
15
|
+
variables can be "page_content" or any metadata keys that are in all documents.
|
|
16
|
+
"page_content" will automatically retrieve the `Document.page_content`, and all
|
|
17
|
+
other inputs variables will be automatically retrieved from the `Document.metadata` dictionary. Default to a prompt that only contains `Document.page_content`.
|
|
18
|
+
* @param documentSeparator String separator to use between formatted document strings.
|
|
19
|
+
* @returns An LCEL `Runnable` chain.
|
|
20
|
+
Expects a dictionary as input with a list of `Document`s being passed under
|
|
21
|
+
the "context" key.
|
|
22
|
+
Return type depends on the `output_parser` used.
|
|
23
|
+
*/
|
|
24
|
+
async function createStuffDocumentsChain({ llm, prompt, outputParser = new output_parsers_1.StringOutputParser(), documentPrompt = base_js_1.DEFAULT_DOCUMENT_PROMPT, documentSeparator = base_js_1.DEFAULT_DOCUMENT_SEPARATOR, }) {
|
|
25
|
+
if (!prompt.inputVariables.includes(base_js_1.DOCUMENTS_KEY)) {
|
|
26
|
+
throw new Error(`Prompt must include a "${base_js_1.DOCUMENTS_KEY}" variable`);
|
|
27
|
+
}
|
|
28
|
+
return runnables_1.RunnableSequence.from([
|
|
29
|
+
runnables_1.RunnablePassthrough.assign({
|
|
30
|
+
[base_js_1.DOCUMENTS_KEY]: new runnables_1.RunnablePick(base_js_1.DOCUMENTS_KEY).pipe((documents, metadata) => (0, base_js_1.formatDocuments)({
|
|
31
|
+
documents,
|
|
32
|
+
documentPrompt,
|
|
33
|
+
documentSeparator,
|
|
34
|
+
config: metadata?.config,
|
|
35
|
+
})),
|
|
36
|
+
}),
|
|
37
|
+
prompt,
|
|
38
|
+
llm,
|
|
39
|
+
outputParser,
|
|
40
|
+
], "stuff_documents_chain");
|
|
41
|
+
}
|
|
42
|
+
exports.createStuffDocumentsChain = createStuffDocumentsChain;
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import { LanguageModelLike } from "@langchain/core/language_models/base";
|
|
2
|
+
import { BaseOutputParser } from "@langchain/core/output_parsers";
|
|
3
|
+
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
4
|
+
import { RunnableSequence } from "@langchain/core/runnables";
|
|
5
|
+
/**
|
|
6
|
+
* Create a chain that passes a list of documents to a model.
|
|
7
|
+
*
|
|
8
|
+
* @param llm Language model to use for responding.
|
|
9
|
+
* @param prompt Prompt template. Must contain input variable "context", which will be
|
|
10
|
+
used for passing in the formatted documents.
|
|
11
|
+
* @param outputParser Output parser. Defaults to `StringOutputParser`.
|
|
12
|
+
* @param documentPrompt Prompt used for formatting each document into a string. Input
|
|
13
|
+
variables can be "page_content" or any metadata keys that are in all documents.
|
|
14
|
+
"page_content" will automatically retrieve the `Document.page_content`, and all
|
|
15
|
+
other inputs variables will be automatically retrieved from the `Document.metadata` dictionary. Default to a prompt that only contains `Document.page_content`.
|
|
16
|
+
* @param documentSeparator String separator to use between formatted document strings.
|
|
17
|
+
* @returns An LCEL `Runnable` chain.
|
|
18
|
+
Expects a dictionary as input with a list of `Document`s being passed under
|
|
19
|
+
the "context" key.
|
|
20
|
+
Return type depends on the `output_parser` used.
|
|
21
|
+
*/
|
|
22
|
+
export declare function createStuffDocumentsChain<RunOutput = string>({ llm, prompt, outputParser, documentPrompt, documentSeparator, }: {
|
|
23
|
+
llm: LanguageModelLike;
|
|
24
|
+
prompt: BasePromptTemplate;
|
|
25
|
+
outputParser?: BaseOutputParser<RunOutput>;
|
|
26
|
+
documentPrompt?: BasePromptTemplate;
|
|
27
|
+
documentSeparator?: string;
|
|
28
|
+
}): Promise<RunnableSequence<Record<string, unknown>, Exclude<RunOutput, Error>>>;
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { StringOutputParser, } from "@langchain/core/output_parsers";
|
|
2
|
+
import { RunnablePassthrough, RunnablePick, RunnableSequence, } from "@langchain/core/runnables";
|
|
3
|
+
import { DEFAULT_DOCUMENT_PROMPT, DEFAULT_DOCUMENT_SEPARATOR, DOCUMENTS_KEY, formatDocuments, } from "./base.js";
|
|
4
|
+
/**
|
|
5
|
+
* Create a chain that passes a list of documents to a model.
|
|
6
|
+
*
|
|
7
|
+
* @param llm Language model to use for responding.
|
|
8
|
+
* @param prompt Prompt template. Must contain input variable "context", which will be
|
|
9
|
+
used for passing in the formatted documents.
|
|
10
|
+
* @param outputParser Output parser. Defaults to `StringOutputParser`.
|
|
11
|
+
* @param documentPrompt Prompt used for formatting each document into a string. Input
|
|
12
|
+
variables can be "page_content" or any metadata keys that are in all documents.
|
|
13
|
+
"page_content" will automatically retrieve the `Document.page_content`, and all
|
|
14
|
+
other inputs variables will be automatically retrieved from the `Document.metadata` dictionary. Default to a prompt that only contains `Document.page_content`.
|
|
15
|
+
* @param documentSeparator String separator to use between formatted document strings.
|
|
16
|
+
* @returns An LCEL `Runnable` chain.
|
|
17
|
+
Expects a dictionary as input with a list of `Document`s being passed under
|
|
18
|
+
the "context" key.
|
|
19
|
+
Return type depends on the `output_parser` used.
|
|
20
|
+
*/
|
|
21
|
+
export async function createStuffDocumentsChain({ llm, prompt, outputParser = new StringOutputParser(), documentPrompt = DEFAULT_DOCUMENT_PROMPT, documentSeparator = DEFAULT_DOCUMENT_SEPARATOR, }) {
|
|
22
|
+
if (!prompt.inputVariables.includes(DOCUMENTS_KEY)) {
|
|
23
|
+
throw new Error(`Prompt must include a "${DOCUMENTS_KEY}" variable`);
|
|
24
|
+
}
|
|
25
|
+
return RunnableSequence.from([
|
|
26
|
+
RunnablePassthrough.assign({
|
|
27
|
+
[DOCUMENTS_KEY]: new RunnablePick(DOCUMENTS_KEY).pipe((documents, metadata) => formatDocuments({
|
|
28
|
+
documents,
|
|
29
|
+
documentPrompt,
|
|
30
|
+
documentSeparator,
|
|
31
|
+
config: metadata?.config,
|
|
32
|
+
})),
|
|
33
|
+
}),
|
|
34
|
+
prompt,
|
|
35
|
+
llm,
|
|
36
|
+
outputParser,
|
|
37
|
+
], "stuff_documents_chain");
|
|
38
|
+
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.ConversationalRetrievalQAChain = void 0;
|
|
4
|
-
const
|
|
4
|
+
const prompts_1 = require("@langchain/core/prompts");
|
|
5
5
|
const index_js_1 = require("../schema/index.cjs");
|
|
6
6
|
const base_js_1 = require("./base.cjs");
|
|
7
7
|
const llm_chain_js_1 = require("./llm_chain.cjs");
|
|
@@ -218,11 +218,11 @@ class ConversationalRetrievalQAChain extends base_js_1.BaseChain {
|
|
|
218
218
|
const { questionGeneratorTemplate, qaTemplate, qaChainOptions = {
|
|
219
219
|
type: "stuff",
|
|
220
220
|
prompt: qaTemplate
|
|
221
|
-
?
|
|
221
|
+
? prompts_1.PromptTemplate.fromTemplate(qaTemplate)
|
|
222
222
|
: undefined,
|
|
223
223
|
}, questionGeneratorChainOptions, verbose, ...rest } = options;
|
|
224
224
|
const qaChain = (0, load_js_1.loadQAChain)(llm, qaChainOptions);
|
|
225
|
-
const questionGeneratorChainPrompt =
|
|
225
|
+
const questionGeneratorChainPrompt = prompts_1.PromptTemplate.fromTemplate(questionGeneratorChainOptions?.template ??
|
|
226
226
|
questionGeneratorTemplate ??
|
|
227
227
|
question_generator_template);
|
|
228
228
|
const questionGeneratorChain = new llm_chain_js_1.LLMChain({
|