langchain 0.0.184 → 0.0.186
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/agents/format_scratchpad/openai_tools.cjs +1 -0
- package/agents/format_scratchpad/openai_tools.d.ts +1 -0
- package/agents/format_scratchpad/openai_tools.js +1 -0
- package/dist/agents/format_scratchpad/openai_tools.cjs +19 -0
- package/dist/agents/format_scratchpad/openai_tools.d.ts +3 -0
- package/dist/agents/format_scratchpad/openai_tools.js +15 -0
- package/dist/agents/openai/index.cjs +2 -1
- package/dist/agents/openai/index.js +2 -1
- package/dist/agents/openai/output_parser.cjs +66 -1
- package/dist/agents/openai/output_parser.d.ts +26 -2
- package/dist/agents/openai/output_parser.js +65 -1
- package/dist/agents/structured_chat/index.cjs +1 -2
- package/dist/agents/structured_chat/index.d.ts +2 -0
- package/dist/agents/structured_chat/index.js +1 -2
- package/dist/agents/types.cjs +8 -1
- package/dist/agents/types.d.ts +6 -0
- package/dist/agents/types.js +6 -0
- package/dist/chains/combine_docs_chain.cjs +1 -1
- package/dist/chains/combine_docs_chain.js +1 -1
- package/dist/chains/llm_chain.cjs +52 -7
- package/dist/chains/llm_chain.d.ts +20 -12
- package/dist/chains/llm_chain.js +53 -8
- package/dist/chat_models/ollama.cjs +8 -0
- package/dist/chat_models/ollama.d.ts +3 -0
- package/dist/chat_models/ollama.js +8 -0
- package/dist/chat_models/openai.cjs +3 -0
- package/dist/chat_models/openai.js +3 -0
- package/dist/document_loaders/fs/pdf.cjs +17 -3
- package/dist/document_loaders/fs/pdf.js +17 -3
- package/dist/document_loaders/fs/unstructured.d.ts +1 -5
- package/dist/document_loaders/web/apify_dataset.cjs +12 -6
- package/dist/document_loaders/web/apify_dataset.d.ts +9 -6
- package/dist/document_loaders/web/apify_dataset.js +12 -6
- package/dist/document_loaders/web/pdf.cjs +17 -3
- package/dist/document_loaders/web/pdf.js +17 -3
- package/dist/document_loaders/web/puppeteer.cjs +37 -0
- package/dist/document_loaders/web/puppeteer.d.ts +17 -0
- package/dist/document_loaders/web/puppeteer.js +37 -0
- package/dist/embeddings/ollama.d.ts +1 -1
- package/dist/experimental/plan_and_execute/agent_executor.cjs +28 -2
- package/dist/experimental/plan_and_execute/agent_executor.d.ts +10 -3
- package/dist/experimental/plan_and_execute/agent_executor.js +26 -1
- package/dist/experimental/plan_and_execute/prompt.d.ts +2 -1
- package/dist/llms/ollama.cjs +8 -0
- package/dist/llms/ollama.d.ts +3 -0
- package/dist/llms/ollama.js +8 -0
- package/dist/llms/openai.cjs +1 -1
- package/dist/llms/openai.js +1 -1
- package/dist/load/import_map.cjs +3 -2
- package/dist/load/import_map.d.ts +1 -0
- package/dist/load/import_map.js +1 -0
- package/dist/output_parsers/index.cjs +3 -1
- package/dist/output_parsers/index.d.ts +1 -0
- package/dist/output_parsers/index.js +1 -0
- package/dist/output_parsers/openai_functions.cjs +3 -3
- package/dist/output_parsers/openai_functions.js +3 -3
- package/dist/output_parsers/openai_tools.cjs +53 -0
- package/dist/output_parsers/openai_tools.d.ts +22 -0
- package/dist/output_parsers/openai_tools.js +49 -0
- package/dist/prompts/base.d.ts +2 -1
- package/dist/prompts/chat.cjs +23 -2
- package/dist/prompts/chat.d.ts +1 -0
- package/dist/prompts/chat.js +23 -2
- package/dist/schema/index.d.ts +3 -4
- package/dist/schema/runnable/base.d.ts +2 -2
- package/dist/tools/convert_to_openai.cjs +2 -1
- package/dist/tools/convert_to_openai.js +2 -1
- package/dist/tools/index.cjs +2 -1
- package/dist/tools/index.d.ts +1 -1
- package/dist/tools/index.js +1 -1
- package/dist/util/ollama.d.ts +3 -0
- package/dist/util/types.cjs +5 -0
- package/dist/util/types.d.ts +4 -0
- package/dist/util/types.js +4 -0
- package/dist/vectorstores/momento_vector_index.cjs +1 -1
- package/dist/vectorstores/momento_vector_index.js +1 -1
- package/package.json +15 -7
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../../dist/agents/format_scratchpad/openai_tools.cjs');
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../../dist/agents/format_scratchpad/openai_tools.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../../dist/agents/format_scratchpad/openai_tools.js'
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.formatToOpenAIToolMessages = void 0;
|
|
4
|
+
const index_js_1 = require("../../schema/index.cjs");
|
|
5
|
+
function formatToOpenAIToolMessages(steps) {
|
|
6
|
+
return steps.flatMap(({ action, observation }) => {
|
|
7
|
+
if ("messageLog" in action && action.messageLog !== undefined) {
|
|
8
|
+
const log = action.messageLog;
|
|
9
|
+
return log.concat(new index_js_1.ToolMessage({
|
|
10
|
+
content: observation,
|
|
11
|
+
tool_call_id: action.toolCallId,
|
|
12
|
+
}));
|
|
13
|
+
}
|
|
14
|
+
else {
|
|
15
|
+
return [new index_js_1.AIMessage(action.log)];
|
|
16
|
+
}
|
|
17
|
+
});
|
|
18
|
+
}
|
|
19
|
+
exports.formatToOpenAIToolMessages = formatToOpenAIToolMessages;
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { ToolMessage, AIMessage, } from "../../schema/index.js";
|
|
2
|
+
export function formatToOpenAIToolMessages(steps) {
|
|
3
|
+
return steps.flatMap(({ action, observation }) => {
|
|
4
|
+
if ("messageLog" in action && action.messageLog !== undefined) {
|
|
5
|
+
const log = action.messageLog;
|
|
6
|
+
return log.concat(new ToolMessage({
|
|
7
|
+
content: observation,
|
|
8
|
+
tool_call_id: action.toolCallId,
|
|
9
|
+
}));
|
|
10
|
+
}
|
|
11
|
+
else {
|
|
12
|
+
return [new AIMessage(action.log)];
|
|
13
|
+
}
|
|
14
|
+
});
|
|
15
|
+
}
|
|
@@ -143,7 +143,8 @@ class OpenAIAgent extends agent_js_1.Agent {
|
|
|
143
143
|
const valuesForLLM = {
|
|
144
144
|
functions: this.tools.map(convert_to_openai_js_1.formatToOpenAIFunction),
|
|
145
145
|
};
|
|
146
|
-
|
|
146
|
+
const callKeys = "callKeys" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];
|
|
147
|
+
for (const key of callKeys) {
|
|
147
148
|
if (key in inputs) {
|
|
148
149
|
valuesForLLM[key] = inputs[key];
|
|
149
150
|
delete valuesForPrompt[key];
|
|
@@ -139,7 +139,8 @@ export class OpenAIAgent extends Agent {
|
|
|
139
139
|
const valuesForLLM = {
|
|
140
140
|
functions: this.tools.map(formatToOpenAIFunction),
|
|
141
141
|
};
|
|
142
|
-
|
|
142
|
+
const callKeys = "callKeys" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];
|
|
143
|
+
for (const key of callKeys) {
|
|
143
144
|
if (key in inputs) {
|
|
144
145
|
valuesForLLM[key] = inputs[key];
|
|
145
146
|
delete valuesForPrompt[key];
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.OpenAIFunctionsAgentOutputParser = void 0;
|
|
3
|
+
exports.OpenAIToolsAgentOutputParser = exports.OpenAIFunctionsAgentOutputParser = void 0;
|
|
4
4
|
const index_js_1 = require("../../schema/index.cjs");
|
|
5
5
|
const types_js_1 = require("../types.cjs");
|
|
6
6
|
const output_parser_js_1 = require("../../schema/output_parser.cjs");
|
|
@@ -66,3 +66,68 @@ class OpenAIFunctionsAgentOutputParser extends types_js_1.AgentActionOutputParse
|
|
|
66
66
|
}
|
|
67
67
|
}
|
|
68
68
|
exports.OpenAIFunctionsAgentOutputParser = OpenAIFunctionsAgentOutputParser;
|
|
69
|
+
class OpenAIToolsAgentOutputParser extends types_js_1.AgentMultiActionOutputParser {
|
|
70
|
+
constructor() {
|
|
71
|
+
super(...arguments);
|
|
72
|
+
Object.defineProperty(this, "lc_namespace", {
|
|
73
|
+
enumerable: true,
|
|
74
|
+
configurable: true,
|
|
75
|
+
writable: true,
|
|
76
|
+
value: ["langchain", "agents", "openai"]
|
|
77
|
+
});
|
|
78
|
+
}
|
|
79
|
+
static lc_name() {
|
|
80
|
+
return "OpenAIToolsAgentOutputParser";
|
|
81
|
+
}
|
|
82
|
+
async parse(text) {
|
|
83
|
+
throw new Error(`OpenAIFunctionsAgentOutputParser can only parse messages.\nPassed input: ${text}`);
|
|
84
|
+
}
|
|
85
|
+
async parseResult(generations) {
|
|
86
|
+
if ("message" in generations[0] && (0, index_js_1.isBaseMessage)(generations[0].message)) {
|
|
87
|
+
return this.parseAIMessage(generations[0].message);
|
|
88
|
+
}
|
|
89
|
+
throw new Error("parseResult on OpenAIFunctionsAgentOutputParser only works on ChatGeneration output");
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Parses the output message into a ToolsAgentAction[] or AgentFinish
|
|
93
|
+
* object.
|
|
94
|
+
* @param message The BaseMessage to parse.
|
|
95
|
+
* @returns A ToolsAgentAction[] or AgentFinish object.
|
|
96
|
+
*/
|
|
97
|
+
parseAIMessage(message) {
|
|
98
|
+
if (message.content && typeof message.content !== "string") {
|
|
99
|
+
throw new Error("This agent cannot parse non-string model responses.");
|
|
100
|
+
}
|
|
101
|
+
if (message.additional_kwargs.tool_calls) {
|
|
102
|
+
const toolCalls = message.additional_kwargs.tool_calls;
|
|
103
|
+
try {
|
|
104
|
+
return toolCalls.map((toolCall, i) => {
|
|
105
|
+
const toolInput = toolCall.function.arguments
|
|
106
|
+
? JSON.parse(toolCall.function.arguments)
|
|
107
|
+
: {};
|
|
108
|
+
const messageLog = i === 0 ? [message] : [];
|
|
109
|
+
return {
|
|
110
|
+
tool: toolCall.function.name,
|
|
111
|
+
toolInput,
|
|
112
|
+
toolCallId: toolCall.id,
|
|
113
|
+
log: `Invoking "${toolCall.function.name}" with ${toolCall.function.arguments ?? "{}"}\n${message.content}`,
|
|
114
|
+
messageLog,
|
|
115
|
+
};
|
|
116
|
+
});
|
|
117
|
+
}
|
|
118
|
+
catch (error) {
|
|
119
|
+
throw new output_parser_js_1.OutputParserException(`Failed to parse tool arguments from chat model response. Text: "${JSON.stringify(toolCalls)}". ${error}`);
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
else {
|
|
123
|
+
return {
|
|
124
|
+
returnValues: { output: message.content },
|
|
125
|
+
log: message.content,
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
getFormatInstructions() {
|
|
130
|
+
throw new Error("getFormatInstructions not implemented inside OpenAIToolsAgentOutputParser.");
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
exports.OpenAIToolsAgentOutputParser = OpenAIToolsAgentOutputParser;
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { AgentAction, AgentFinish, BaseMessage, ChatGeneration } from "../../schema/index.js";
|
|
2
|
-
import { AgentActionOutputParser } from "../types.js";
|
|
1
|
+
import { AgentAction, AgentFinish, AgentStep, BaseMessage, ChatGeneration } from "../../schema/index.js";
|
|
2
|
+
import { AgentActionOutputParser, AgentMultiActionOutputParser } from "../types.js";
|
|
3
3
|
/**
|
|
4
4
|
* Type that represents an agent action with an optional message log.
|
|
5
5
|
*/
|
|
@@ -20,3 +20,27 @@ export declare class OpenAIFunctionsAgentOutputParser extends AgentActionOutputP
|
|
|
20
20
|
parseAIMessage(message: BaseMessage): FunctionsAgentAction | AgentFinish;
|
|
21
21
|
getFormatInstructions(): string;
|
|
22
22
|
}
|
|
23
|
+
/**
|
|
24
|
+
* Type that represents an agent action with an optional message log.
|
|
25
|
+
*/
|
|
26
|
+
export type ToolsAgentAction = AgentAction & {
|
|
27
|
+
toolCallId: string;
|
|
28
|
+
messageLog?: BaseMessage[];
|
|
29
|
+
};
|
|
30
|
+
export type ToolsAgentStep = AgentStep & {
|
|
31
|
+
action: ToolsAgentAction;
|
|
32
|
+
};
|
|
33
|
+
export declare class OpenAIToolsAgentOutputParser extends AgentMultiActionOutputParser {
|
|
34
|
+
lc_namespace: string[];
|
|
35
|
+
static lc_name(): string;
|
|
36
|
+
parse(text: string): Promise<AgentAction[] | AgentFinish>;
|
|
37
|
+
parseResult(generations: ChatGeneration[]): Promise<AgentFinish | ToolsAgentAction[]>;
|
|
38
|
+
/**
|
|
39
|
+
* Parses the output message into a ToolsAgentAction[] or AgentFinish
|
|
40
|
+
* object.
|
|
41
|
+
* @param message The BaseMessage to parse.
|
|
42
|
+
* @returns A ToolsAgentAction[] or AgentFinish object.
|
|
43
|
+
*/
|
|
44
|
+
parseAIMessage(message: BaseMessage): ToolsAgentAction[] | AgentFinish;
|
|
45
|
+
getFormatInstructions(): string;
|
|
46
|
+
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { isBaseMessage, } from "../../schema/index.js";
|
|
2
|
-
import { AgentActionOutputParser } from "../types.js";
|
|
2
|
+
import { AgentActionOutputParser, AgentMultiActionOutputParser, } from "../types.js";
|
|
3
3
|
import { OutputParserException } from "../../schema/output_parser.js";
|
|
4
4
|
export class OpenAIFunctionsAgentOutputParser extends AgentActionOutputParser {
|
|
5
5
|
constructor() {
|
|
@@ -62,3 +62,67 @@ export class OpenAIFunctionsAgentOutputParser extends AgentActionOutputParser {
|
|
|
62
62
|
throw new Error("getFormatInstructions not implemented inside OpenAIFunctionsAgentOutputParser.");
|
|
63
63
|
}
|
|
64
64
|
}
|
|
65
|
+
export class OpenAIToolsAgentOutputParser extends AgentMultiActionOutputParser {
|
|
66
|
+
constructor() {
|
|
67
|
+
super(...arguments);
|
|
68
|
+
Object.defineProperty(this, "lc_namespace", {
|
|
69
|
+
enumerable: true,
|
|
70
|
+
configurable: true,
|
|
71
|
+
writable: true,
|
|
72
|
+
value: ["langchain", "agents", "openai"]
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
static lc_name() {
|
|
76
|
+
return "OpenAIToolsAgentOutputParser";
|
|
77
|
+
}
|
|
78
|
+
async parse(text) {
|
|
79
|
+
throw new Error(`OpenAIFunctionsAgentOutputParser can only parse messages.\nPassed input: ${text}`);
|
|
80
|
+
}
|
|
81
|
+
async parseResult(generations) {
|
|
82
|
+
if ("message" in generations[0] && isBaseMessage(generations[0].message)) {
|
|
83
|
+
return this.parseAIMessage(generations[0].message);
|
|
84
|
+
}
|
|
85
|
+
throw new Error("parseResult on OpenAIFunctionsAgentOutputParser only works on ChatGeneration output");
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* Parses the output message into a ToolsAgentAction[] or AgentFinish
|
|
89
|
+
* object.
|
|
90
|
+
* @param message The BaseMessage to parse.
|
|
91
|
+
* @returns A ToolsAgentAction[] or AgentFinish object.
|
|
92
|
+
*/
|
|
93
|
+
parseAIMessage(message) {
|
|
94
|
+
if (message.content && typeof message.content !== "string") {
|
|
95
|
+
throw new Error("This agent cannot parse non-string model responses.");
|
|
96
|
+
}
|
|
97
|
+
if (message.additional_kwargs.tool_calls) {
|
|
98
|
+
const toolCalls = message.additional_kwargs.tool_calls;
|
|
99
|
+
try {
|
|
100
|
+
return toolCalls.map((toolCall, i) => {
|
|
101
|
+
const toolInput = toolCall.function.arguments
|
|
102
|
+
? JSON.parse(toolCall.function.arguments)
|
|
103
|
+
: {};
|
|
104
|
+
const messageLog = i === 0 ? [message] : [];
|
|
105
|
+
return {
|
|
106
|
+
tool: toolCall.function.name,
|
|
107
|
+
toolInput,
|
|
108
|
+
toolCallId: toolCall.id,
|
|
109
|
+
log: `Invoking "${toolCall.function.name}" with ${toolCall.function.arguments ?? "{}"}\n${message.content}`,
|
|
110
|
+
messageLog,
|
|
111
|
+
};
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
catch (error) {
|
|
115
|
+
throw new OutputParserException(`Failed to parse tool arguments from chat model response. Text: "${JSON.stringify(toolCalls)}". ${error}`);
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
else {
|
|
119
|
+
return {
|
|
120
|
+
returnValues: { output: message.content },
|
|
121
|
+
log: message.content,
|
|
122
|
+
};
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
getFormatInstructions() {
|
|
126
|
+
throw new Error("getFormatInstructions not implemented inside OpenAIToolsAgentOutputParser.");
|
|
127
|
+
}
|
|
128
|
+
}
|
|
@@ -102,9 +102,8 @@ class StructuredChatAgent extends agent_js_1.Agent {
|
|
|
102
102
|
* @param args.memoryPrompts List of historical prompts from memory.
|
|
103
103
|
*/
|
|
104
104
|
static createPrompt(tools, args) {
|
|
105
|
-
const { prefix = prompt_js_2.PREFIX, suffix = prompt_js_2.SUFFIX, inputVariables = ["input", "agent_scratchpad"], memoryPrompts = [], } = args ?? {};
|
|
105
|
+
const { prefix = prompt_js_2.PREFIX, suffix = prompt_js_2.SUFFIX, inputVariables = ["input", "agent_scratchpad"], humanMessageTemplate = "{input}\n\n{agent_scratchpad}", memoryPrompts = [], } = args ?? {};
|
|
106
106
|
const template = [prefix, prompt_js_2.FORMAT_INSTRUCTIONS, suffix].join("\n\n");
|
|
107
|
-
const humanMessageTemplate = "{input}\n\n{agent_scratchpad}";
|
|
108
107
|
const messages = [
|
|
109
108
|
new chat_js_1.SystemMessagePromptTemplate(new prompt_js_1.PromptTemplate({
|
|
110
109
|
template,
|
|
@@ -15,6 +15,8 @@ export interface StructuredChatCreatePromptArgs {
|
|
|
15
15
|
suffix?: string;
|
|
16
16
|
/** String to put before the list of tools. */
|
|
17
17
|
prefix?: string;
|
|
18
|
+
/** String to use directly as the human message template. */
|
|
19
|
+
humanMessageTemplate?: string;
|
|
18
20
|
/** List of input variables the final prompt will expect. */
|
|
19
21
|
inputVariables?: string[];
|
|
20
22
|
/** List of historical prompts from memory. */
|
|
@@ -99,9 +99,8 @@ export class StructuredChatAgent extends Agent {
|
|
|
99
99
|
* @param args.memoryPrompts List of historical prompts from memory.
|
|
100
100
|
*/
|
|
101
101
|
static createPrompt(tools, args) {
|
|
102
|
-
const { prefix = PREFIX, suffix = SUFFIX, inputVariables = ["input", "agent_scratchpad"], memoryPrompts = [], } = args ?? {};
|
|
102
|
+
const { prefix = PREFIX, suffix = SUFFIX, inputVariables = ["input", "agent_scratchpad"], humanMessageTemplate = "{input}\n\n{agent_scratchpad}", memoryPrompts = [], } = args ?? {};
|
|
103
103
|
const template = [prefix, FORMAT_INSTRUCTIONS, suffix].join("\n\n");
|
|
104
|
-
const humanMessageTemplate = "{input}\n\n{agent_scratchpad}";
|
|
105
104
|
const messages = [
|
|
106
105
|
new SystemMessagePromptTemplate(new PromptTemplate({
|
|
107
106
|
template,
|
package/dist/agents/types.cjs
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.AgentActionOutputParser = void 0;
|
|
3
|
+
exports.AgentMultiActionOutputParser = exports.AgentActionOutputParser = void 0;
|
|
4
4
|
const output_parser_js_1 = require("../schema/output_parser.cjs");
|
|
5
5
|
/**
|
|
6
6
|
* Abstract class representing an output parser specifically for agent
|
|
@@ -10,3 +10,10 @@ const output_parser_js_1 = require("../schema/output_parser.cjs");
|
|
|
10
10
|
class AgentActionOutputParser extends output_parser_js_1.BaseOutputParser {
|
|
11
11
|
}
|
|
12
12
|
exports.AgentActionOutputParser = AgentActionOutputParser;
|
|
13
|
+
/**
|
|
14
|
+
* Abstract class representing an output parser specifically for agents
|
|
15
|
+
* that return multiple actions.
|
|
16
|
+
*/
|
|
17
|
+
class AgentMultiActionOutputParser extends output_parser_js_1.BaseOutputParser {
|
|
18
|
+
}
|
|
19
|
+
exports.AgentMultiActionOutputParser = AgentMultiActionOutputParser;
|
package/dist/agents/types.d.ts
CHANGED
|
@@ -31,6 +31,12 @@ export interface RunnableAgentInput {
|
|
|
31
31
|
*/
|
|
32
32
|
export declare abstract class AgentActionOutputParser extends BaseOutputParser<AgentAction | AgentFinish> {
|
|
33
33
|
}
|
|
34
|
+
/**
|
|
35
|
+
* Abstract class representing an output parser specifically for agents
|
|
36
|
+
* that return multiple actions.
|
|
37
|
+
*/
|
|
38
|
+
export declare abstract class AgentMultiActionOutputParser extends BaseOutputParser<AgentAction[] | AgentFinish> {
|
|
39
|
+
}
|
|
34
40
|
/**
|
|
35
41
|
* Type representing the stopping method for an agent. It can be either
|
|
36
42
|
* 'force' or 'generate'.
|
package/dist/agents/types.js
CHANGED
|
@@ -6,3 +6,9 @@ import { BaseOutputParser } from "../schema/output_parser.js";
|
|
|
6
6
|
*/
|
|
7
7
|
export class AgentActionOutputParser extends BaseOutputParser {
|
|
8
8
|
}
|
|
9
|
+
/**
|
|
10
|
+
* Abstract class representing an output parser specifically for agents
|
|
11
|
+
* that return multiple actions.
|
|
12
|
+
*/
|
|
13
|
+
export class AgentMultiActionOutputParser extends BaseOutputParser {
|
|
14
|
+
}
|
|
@@ -177,7 +177,7 @@ class MapReduceDocumentsChain extends base_js_1.BaseChain {
|
|
|
177
177
|
[this.combineDocumentChain.inputKey]: currentDocs,
|
|
178
178
|
...rest,
|
|
179
179
|
}));
|
|
180
|
-
const length = await this.combineDocumentChain.llmChain.
|
|
180
|
+
const length = await this.combineDocumentChain.llmChain._getNumTokens(formatted);
|
|
181
181
|
const withinTokenLimit = length < this.maxTokens;
|
|
182
182
|
// If we can skip the map step, and we're within the token limit, we don't
|
|
183
183
|
// need to run the map step, so just break out of the loop.
|
|
@@ -173,7 +173,7 @@ export class MapReduceDocumentsChain extends BaseChain {
|
|
|
173
173
|
[this.combineDocumentChain.inputKey]: currentDocs,
|
|
174
174
|
...rest,
|
|
175
175
|
}));
|
|
176
|
-
const length = await this.combineDocumentChain.llmChain.
|
|
176
|
+
const length = await this.combineDocumentChain.llmChain._getNumTokens(formatted);
|
|
177
177
|
const withinTokenLimit = length < this.maxTokens;
|
|
178
178
|
// If we can skip the map step, and we're within the token limit, we don't
|
|
179
179
|
// need to run the map step, so just break out of the loop.
|
|
@@ -5,6 +5,29 @@ const base_js_1 = require("./base.cjs");
|
|
|
5
5
|
const base_js_2 = require("../prompts/base.cjs");
|
|
6
6
|
const index_js_1 = require("../base_language/index.cjs");
|
|
7
7
|
const noop_js_1 = require("../output_parsers/noop.cjs");
|
|
8
|
+
const base_js_3 = require("../schema/runnable/base.cjs");
|
|
9
|
+
function isBaseLanguageModel(llmLike) {
|
|
10
|
+
return typeof llmLike._llmType === "function";
|
|
11
|
+
}
|
|
12
|
+
function _getLanguageModel(llmLike) {
|
|
13
|
+
if (isBaseLanguageModel(llmLike)) {
|
|
14
|
+
return llmLike;
|
|
15
|
+
}
|
|
16
|
+
else if ("bound" in llmLike && base_js_3.Runnable.isRunnable(llmLike.bound)) {
|
|
17
|
+
return _getLanguageModel(llmLike.bound);
|
|
18
|
+
}
|
|
19
|
+
else if ("runnable" in llmLike &&
|
|
20
|
+
"fallbacks" in llmLike &&
|
|
21
|
+
base_js_3.Runnable.isRunnable(llmLike.runnable)) {
|
|
22
|
+
return _getLanguageModel(llmLike.runnable);
|
|
23
|
+
}
|
|
24
|
+
else if ("default" in llmLike && base_js_3.Runnable.isRunnable(llmLike.default)) {
|
|
25
|
+
return _getLanguageModel(llmLike.default);
|
|
26
|
+
}
|
|
27
|
+
else {
|
|
28
|
+
throw new Error("Unable to extract BaseLanguageModel from llmLike object.");
|
|
29
|
+
}
|
|
30
|
+
}
|
|
8
31
|
/**
|
|
9
32
|
* Chain to run queries against LLMs.
|
|
10
33
|
*
|
|
@@ -79,10 +102,15 @@ class LLMChain extends base_js_1.BaseChain {
|
|
|
79
102
|
this.outputParser = this.prompt.outputParser;
|
|
80
103
|
}
|
|
81
104
|
}
|
|
105
|
+
getCallKeys() {
|
|
106
|
+
const callKeys = "callKeys" in this.llm ? this.llm.callKeys : [];
|
|
107
|
+
return callKeys;
|
|
108
|
+
}
|
|
82
109
|
/** @ignore */
|
|
83
110
|
_selectMemoryInputs(values) {
|
|
84
111
|
const valuesForMemory = super._selectMemoryInputs(values);
|
|
85
|
-
|
|
112
|
+
const callKeys = this.getCallKeys();
|
|
113
|
+
for (const key of callKeys) {
|
|
86
114
|
if (key in values) {
|
|
87
115
|
delete valuesForMemory[key];
|
|
88
116
|
}
|
|
@@ -114,16 +142,29 @@ class LLMChain extends base_js_1.BaseChain {
|
|
|
114
142
|
const valuesForLLM = {
|
|
115
143
|
...this.llmKwargs,
|
|
116
144
|
};
|
|
117
|
-
|
|
145
|
+
const callKeys = this.getCallKeys();
|
|
146
|
+
for (const key of callKeys) {
|
|
118
147
|
if (key in values) {
|
|
119
|
-
valuesForLLM
|
|
120
|
-
|
|
148
|
+
if (valuesForLLM) {
|
|
149
|
+
valuesForLLM[key] =
|
|
150
|
+
values[key];
|
|
151
|
+
delete valuesForPrompt[key];
|
|
152
|
+
}
|
|
121
153
|
}
|
|
122
154
|
}
|
|
123
155
|
const promptValue = await this.prompt.formatPromptValue(valuesForPrompt);
|
|
124
|
-
|
|
156
|
+
if ("generatePrompt" in this.llm) {
|
|
157
|
+
const { generations } = await this.llm.generatePrompt([promptValue], valuesForLLM, runManager?.getChild());
|
|
158
|
+
return {
|
|
159
|
+
[this.outputKey]: await this._getFinalOutput(generations[0], promptValue, runManager),
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
const modelWithParser = this.outputParser
|
|
163
|
+
? this.llm.pipe(this.outputParser)
|
|
164
|
+
: this.llm;
|
|
165
|
+
const response = await modelWithParser.invoke(promptValue, runManager?.getChild());
|
|
125
166
|
return {
|
|
126
|
-
[this.outputKey]:
|
|
167
|
+
[this.outputKey]: response,
|
|
127
168
|
};
|
|
128
169
|
}
|
|
129
170
|
/**
|
|
@@ -160,11 +201,15 @@ class LLMChain extends base_js_1.BaseChain {
|
|
|
160
201
|
}
|
|
161
202
|
/** @deprecated */
|
|
162
203
|
serialize() {
|
|
204
|
+
const serialize = "serialize" in this.llm ? this.llm.serialize() : undefined;
|
|
163
205
|
return {
|
|
164
206
|
_type: `${this._chainType()}_chain`,
|
|
165
|
-
llm:
|
|
207
|
+
llm: serialize,
|
|
166
208
|
prompt: this.prompt.serialize(),
|
|
167
209
|
};
|
|
168
210
|
}
|
|
211
|
+
_getNumTokens(text) {
|
|
212
|
+
return _getLanguageModel(this.llm).getNumTokens(text);
|
|
213
|
+
}
|
|
169
214
|
}
|
|
170
215
|
exports.LLMChain = LLMChain;
|
|
@@ -1,21 +1,26 @@
|
|
|
1
1
|
import { BaseChain, ChainInputs } from "./base.js";
|
|
2
2
|
import { BasePromptTemplate } from "../prompts/base.js";
|
|
3
|
-
import { BaseLanguageModel } from "../base_language/index.js";
|
|
4
|
-
import { ChainValues, Generation, BasePromptValue } from "../schema/index.js";
|
|
3
|
+
import { BaseLanguageModel, BaseLanguageModelInput } from "../base_language/index.js";
|
|
4
|
+
import { ChainValues, Generation, BasePromptValue, BaseMessage } from "../schema/index.js";
|
|
5
5
|
import { BaseLLMOutputParser } from "../schema/output_parser.js";
|
|
6
6
|
import { SerializedLLMChain } from "./serde.js";
|
|
7
7
|
import { CallbackManager } from "../callbacks/index.js";
|
|
8
8
|
import { BaseCallbackConfig, CallbackManagerForChainRun, Callbacks } from "../callbacks/manager.js";
|
|
9
|
+
import { Runnable } from "../schema/runnable/base.js";
|
|
10
|
+
type LLMType = BaseLanguageModel | Runnable<BaseLanguageModelInput, string> | Runnable<BaseLanguageModelInput, BaseMessage>;
|
|
11
|
+
type CallOptionsIfAvailable<T> = T extends {
|
|
12
|
+
CallOptions: infer CO;
|
|
13
|
+
} ? CO : any;
|
|
9
14
|
/**
|
|
10
15
|
* Interface for the input parameters of the LLMChain class.
|
|
11
16
|
*/
|
|
12
|
-
export interface LLMChainInput<T extends string | object = string,
|
|
17
|
+
export interface LLMChainInput<T extends string | object = string, Model extends LLMType = LLMType> extends ChainInputs {
|
|
13
18
|
/** Prompt object to use */
|
|
14
19
|
prompt: BasePromptTemplate;
|
|
15
20
|
/** LLM Wrapper to use */
|
|
16
|
-
llm:
|
|
21
|
+
llm: Model;
|
|
17
22
|
/** Kwargs to pass to LLM */
|
|
18
|
-
llmKwargs?:
|
|
23
|
+
llmKwargs?: CallOptionsIfAvailable<Model>;
|
|
19
24
|
/** OutputParser to use */
|
|
20
25
|
outputParser?: BaseLLMOutputParser<T>;
|
|
21
26
|
/** Key to use for output, defaults to `text` */
|
|
@@ -34,17 +39,18 @@ export interface LLMChainInput<T extends string | object = string, L extends Bas
|
|
|
34
39
|
* const llm = new LLMChain({ llm: new OpenAI(), prompt });
|
|
35
40
|
* ```
|
|
36
41
|
*/
|
|
37
|
-
export declare class LLMChain<T extends string | object = string,
|
|
42
|
+
export declare class LLMChain<T extends string | object = string, Model extends LLMType = LLMType> extends BaseChain implements LLMChainInput<T> {
|
|
38
43
|
static lc_name(): string;
|
|
39
44
|
lc_serializable: boolean;
|
|
40
45
|
prompt: BasePromptTemplate;
|
|
41
|
-
llm:
|
|
42
|
-
llmKwargs?:
|
|
46
|
+
llm: Model;
|
|
47
|
+
llmKwargs?: CallOptionsIfAvailable<Model>;
|
|
43
48
|
outputKey: string;
|
|
44
49
|
outputParser?: BaseLLMOutputParser<T>;
|
|
45
50
|
get inputKeys(): string[];
|
|
46
51
|
get outputKeys(): string[];
|
|
47
|
-
constructor(fields: LLMChainInput<T,
|
|
52
|
+
constructor(fields: LLMChainInput<T, Model>);
|
|
53
|
+
private getCallKeys;
|
|
48
54
|
/** @ignore */
|
|
49
55
|
_selectMemoryInputs(values: ChainValues): ChainValues;
|
|
50
56
|
/** @ignore */
|
|
@@ -54,9 +60,9 @@ export declare class LLMChain<T extends string | object = string, L extends Base
|
|
|
54
60
|
*
|
|
55
61
|
* Wraps _call and handles memory.
|
|
56
62
|
*/
|
|
57
|
-
call(values: ChainValues &
|
|
63
|
+
call(values: ChainValues & CallOptionsIfAvailable<Model>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;
|
|
58
64
|
/** @ignore */
|
|
59
|
-
_call(values: ChainValues &
|
|
65
|
+
_call(values: ChainValues & CallOptionsIfAvailable<Model>, runManager?: CallbackManagerForChainRun): Promise<ChainValues>;
|
|
60
66
|
/**
|
|
61
67
|
* Format prompt with values and pass to LLM
|
|
62
68
|
*
|
|
@@ -69,9 +75,11 @@ export declare class LLMChain<T extends string | object = string, L extends Base
|
|
|
69
75
|
* llm.predict({ adjective: "funny" })
|
|
70
76
|
* ```
|
|
71
77
|
*/
|
|
72
|
-
predict(values: ChainValues &
|
|
78
|
+
predict(values: ChainValues & CallOptionsIfAvailable<Model>, callbackManager?: CallbackManager): Promise<T>;
|
|
73
79
|
_chainType(): "llm";
|
|
74
80
|
static deserialize(data: SerializedLLMChain): Promise<LLMChain<string, BaseLanguageModel<any, import("../base_language/index.js").BaseLanguageModelCallOptions>>>;
|
|
75
81
|
/** @deprecated */
|
|
76
82
|
serialize(): SerializedLLMChain;
|
|
83
|
+
_getNumTokens(text: string): Promise<number>;
|
|
77
84
|
}
|
|
85
|
+
export {};
|
package/dist/chains/llm_chain.js
CHANGED
|
@@ -1,7 +1,30 @@
|
|
|
1
1
|
import { BaseChain } from "./base.js";
|
|
2
2
|
import { BasePromptTemplate } from "../prompts/base.js";
|
|
3
|
-
import { BaseLanguageModel } from "../base_language/index.js";
|
|
3
|
+
import { BaseLanguageModel, } from "../base_language/index.js";
|
|
4
4
|
import { NoOpOutputParser } from "../output_parsers/noop.js";
|
|
5
|
+
import { Runnable } from "../schema/runnable/base.js";
|
|
6
|
+
function isBaseLanguageModel(llmLike) {
|
|
7
|
+
return typeof llmLike._llmType === "function";
|
|
8
|
+
}
|
|
9
|
+
function _getLanguageModel(llmLike) {
|
|
10
|
+
if (isBaseLanguageModel(llmLike)) {
|
|
11
|
+
return llmLike;
|
|
12
|
+
}
|
|
13
|
+
else if ("bound" in llmLike && Runnable.isRunnable(llmLike.bound)) {
|
|
14
|
+
return _getLanguageModel(llmLike.bound);
|
|
15
|
+
}
|
|
16
|
+
else if ("runnable" in llmLike &&
|
|
17
|
+
"fallbacks" in llmLike &&
|
|
18
|
+
Runnable.isRunnable(llmLike.runnable)) {
|
|
19
|
+
return _getLanguageModel(llmLike.runnable);
|
|
20
|
+
}
|
|
21
|
+
else if ("default" in llmLike && Runnable.isRunnable(llmLike.default)) {
|
|
22
|
+
return _getLanguageModel(llmLike.default);
|
|
23
|
+
}
|
|
24
|
+
else {
|
|
25
|
+
throw new Error("Unable to extract BaseLanguageModel from llmLike object.");
|
|
26
|
+
}
|
|
27
|
+
}
|
|
5
28
|
/**
|
|
6
29
|
* Chain to run queries against LLMs.
|
|
7
30
|
*
|
|
@@ -76,10 +99,15 @@ export class LLMChain extends BaseChain {
|
|
|
76
99
|
this.outputParser = this.prompt.outputParser;
|
|
77
100
|
}
|
|
78
101
|
}
|
|
102
|
+
getCallKeys() {
|
|
103
|
+
const callKeys = "callKeys" in this.llm ? this.llm.callKeys : [];
|
|
104
|
+
return callKeys;
|
|
105
|
+
}
|
|
79
106
|
/** @ignore */
|
|
80
107
|
_selectMemoryInputs(values) {
|
|
81
108
|
const valuesForMemory = super._selectMemoryInputs(values);
|
|
82
|
-
|
|
109
|
+
const callKeys = this.getCallKeys();
|
|
110
|
+
for (const key of callKeys) {
|
|
83
111
|
if (key in values) {
|
|
84
112
|
delete valuesForMemory[key];
|
|
85
113
|
}
|
|
@@ -111,16 +139,29 @@ export class LLMChain extends BaseChain {
|
|
|
111
139
|
const valuesForLLM = {
|
|
112
140
|
...this.llmKwargs,
|
|
113
141
|
};
|
|
114
|
-
|
|
142
|
+
const callKeys = this.getCallKeys();
|
|
143
|
+
for (const key of callKeys) {
|
|
115
144
|
if (key in values) {
|
|
116
|
-
valuesForLLM
|
|
117
|
-
|
|
145
|
+
if (valuesForLLM) {
|
|
146
|
+
valuesForLLM[key] =
|
|
147
|
+
values[key];
|
|
148
|
+
delete valuesForPrompt[key];
|
|
149
|
+
}
|
|
118
150
|
}
|
|
119
151
|
}
|
|
120
152
|
const promptValue = await this.prompt.formatPromptValue(valuesForPrompt);
|
|
121
|
-
|
|
153
|
+
if ("generatePrompt" in this.llm) {
|
|
154
|
+
const { generations } = await this.llm.generatePrompt([promptValue], valuesForLLM, runManager?.getChild());
|
|
155
|
+
return {
|
|
156
|
+
[this.outputKey]: await this._getFinalOutput(generations[0], promptValue, runManager),
|
|
157
|
+
};
|
|
158
|
+
}
|
|
159
|
+
const modelWithParser = this.outputParser
|
|
160
|
+
? this.llm.pipe(this.outputParser)
|
|
161
|
+
: this.llm;
|
|
162
|
+
const response = await modelWithParser.invoke(promptValue, runManager?.getChild());
|
|
122
163
|
return {
|
|
123
|
-
[this.outputKey]:
|
|
164
|
+
[this.outputKey]: response,
|
|
124
165
|
};
|
|
125
166
|
}
|
|
126
167
|
/**
|
|
@@ -157,10 +198,14 @@ export class LLMChain extends BaseChain {
|
|
|
157
198
|
}
|
|
158
199
|
/** @deprecated */
|
|
159
200
|
serialize() {
|
|
201
|
+
const serialize = "serialize" in this.llm ? this.llm.serialize() : undefined;
|
|
160
202
|
return {
|
|
161
203
|
_type: `${this._chainType()}_chain`,
|
|
162
|
-
llm:
|
|
204
|
+
llm: serialize,
|
|
163
205
|
prompt: this.prompt.serialize(),
|
|
164
206
|
};
|
|
165
207
|
}
|
|
208
|
+
_getNumTokens(text) {
|
|
209
|
+
return _getLanguageModel(this.llm).getNumTokens(text);
|
|
210
|
+
}
|
|
166
211
|
}
|