langchain 0.3.25 → 0.3.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/openai_functions/index.cjs +5 -3
- package/dist/agents/openai_functions/index.js +5 -3
- package/dist/agents/openai_functions/output_parser.cjs +1 -3
- package/dist/agents/openai_functions/output_parser.d.ts +1 -3
- package/dist/agents/openai_functions/output_parser.js +1 -3
- package/dist/agents/openai_tools/index.cjs +5 -3
- package/dist/agents/openai_tools/index.js +5 -3
- package/dist/agents/openai_tools/output_parser.cjs +1 -1
- package/dist/agents/openai_tools/output_parser.d.ts +1 -1
- package/dist/agents/openai_tools/output_parser.js +1 -1
- package/dist/agents/react/index.cjs +1 -2
- package/dist/agents/react/index.js +1 -2
- package/dist/agents/structured_chat/index.cjs +1 -1
- package/dist/agents/structured_chat/index.js +1 -1
- package/dist/agents/xml/index.cjs +1 -1
- package/dist/agents/xml/index.js +1 -1
- package/dist/agents/xml/output_parser.cjs +1 -1
- package/dist/agents/xml/output_parser.d.ts +1 -1
- package/dist/agents/xml/output_parser.js +1 -1
- package/dist/chains/openai_functions/base.cjs +1 -1
- package/dist/chains/openai_functions/base.js +1 -1
- package/dist/chains/sql_db/sql_db_chain.cjs +1 -1
- package/dist/chains/sql_db/sql_db_chain.js +1 -1
- package/dist/chat_models/universal.cjs +4 -3
- package/dist/chat_models/universal.js +4 -3
- package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.cjs +6 -4
- package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.js +6 -4
- package/package.json +2 -2
|
@@ -226,9 +226,11 @@ async function createOpenAIFunctionsAgent({ llm, tools, prompt, streamRunnable,
|
|
|
226
226
|
`Found ${JSON.stringify(prompt.inputVariables)} instead.`,
|
|
227
227
|
].join("\n"));
|
|
228
228
|
}
|
|
229
|
-
const llmWithTools = llm.
|
|
230
|
-
|
|
231
|
-
|
|
229
|
+
const llmWithTools = llm.bindTools
|
|
230
|
+
? llm.bindTools(tools)
|
|
231
|
+
: llm.withConfig({
|
|
232
|
+
functions: tools.map((tool) => (0, function_calling_1.convertToOpenAIFunction)(tool)),
|
|
233
|
+
});
|
|
232
234
|
const agent = agent_js_1.AgentRunnableSequence.fromRunnables([
|
|
233
235
|
runnables_1.RunnablePassthrough.assign({
|
|
234
236
|
agent_scratchpad: (input) => (0, openai_functions_js_1.formatToOpenAIFunctionMessages)(input.steps),
|
|
@@ -221,9 +221,11 @@ export async function createOpenAIFunctionsAgent({ llm, tools, prompt, streamRun
|
|
|
221
221
|
`Found ${JSON.stringify(prompt.inputVariables)} instead.`,
|
|
222
222
|
].join("\n"));
|
|
223
223
|
}
|
|
224
|
-
const llmWithTools = llm.
|
|
225
|
-
|
|
226
|
-
|
|
224
|
+
const llmWithTools = llm.bindTools
|
|
225
|
+
? llm.bindTools(tools)
|
|
226
|
+
: llm.withConfig({
|
|
227
|
+
functions: tools.map((tool) => convertToOpenAIFunction(tool)),
|
|
228
|
+
});
|
|
227
229
|
const agent = AgentRunnableSequence.fromRunnables([
|
|
228
230
|
RunnablePassthrough.assign({
|
|
229
231
|
agent_scratchpad: (input) => formatToOpenAIFunctionMessages(input.steps),
|
|
@@ -17,9 +17,7 @@ const types_js_1 = require("../types.cjs");
|
|
|
17
17
|
* const modelWithFunctions = new ChatOpenAI({
|
|
18
18
|
* modelName: "gpt-4",
|
|
19
19
|
* temperature: 0,
|
|
20
|
-
* }).
|
|
21
|
-
* functions: tools.map((tool) => convertToOpenAIFunction(tool)),
|
|
22
|
-
* });
|
|
20
|
+
* }).bindTools(tools);
|
|
23
21
|
*
|
|
24
22
|
* const runnableAgent = RunnableSequence.from([
|
|
25
23
|
* {
|
|
@@ -21,9 +21,7 @@ export type FunctionsAgentAction = AgentAction & {
|
|
|
21
21
|
* const modelWithFunctions = new ChatOpenAI({
|
|
22
22
|
* modelName: "gpt-4",
|
|
23
23
|
* temperature: 0,
|
|
24
|
-
* }).
|
|
25
|
-
* functions: tools.map((tool) => convertToOpenAIFunction(tool)),
|
|
26
|
-
* });
|
|
24
|
+
* }).bindTools(tools);
|
|
27
25
|
*
|
|
28
26
|
* const runnableAgent = RunnableSequence.from([
|
|
29
27
|
* {
|
|
@@ -14,9 +14,7 @@ import { AgentActionOutputParser } from "../types.js";
|
|
|
14
14
|
* const modelWithFunctions = new ChatOpenAI({
|
|
15
15
|
* modelName: "gpt-4",
|
|
16
16
|
* temperature: 0,
|
|
17
|
-
* }).
|
|
18
|
-
* functions: tools.map((tool) => convertToOpenAIFunction(tool)),
|
|
19
|
-
* });
|
|
17
|
+
* }).bindTools(tools);
|
|
20
18
|
*
|
|
21
19
|
* const runnableAgent = RunnableSequence.from([
|
|
22
20
|
* {
|
|
@@ -70,9 +70,11 @@ async function createOpenAIToolsAgent({ llm, tools, prompt, streamRunnable, }) {
|
|
|
70
70
|
`Found ${JSON.stringify(prompt.inputVariables)} instead.`,
|
|
71
71
|
].join("\n"));
|
|
72
72
|
}
|
|
73
|
-
const modelWithTools = llm.
|
|
74
|
-
|
|
75
|
-
|
|
73
|
+
const modelWithTools = llm.bindTools
|
|
74
|
+
? llm.bindTools(tools)
|
|
75
|
+
: llm.withConfig({
|
|
76
|
+
tools: tools.map((tool) => (0, function_calling_1.convertToOpenAITool)(tool)),
|
|
77
|
+
});
|
|
76
78
|
const agent = agent_js_1.AgentRunnableSequence.fromRunnables([
|
|
77
79
|
runnables_1.RunnablePassthrough.assign({
|
|
78
80
|
agent_scratchpad: (input) => (0, openai_tools_js_1.formatToOpenAIToolMessages)(input.steps),
|
|
@@ -67,9 +67,11 @@ export async function createOpenAIToolsAgent({ llm, tools, prompt, streamRunnabl
|
|
|
67
67
|
`Found ${JSON.stringify(prompt.inputVariables)} instead.`,
|
|
68
68
|
].join("\n"));
|
|
69
69
|
}
|
|
70
|
-
const modelWithTools = llm.
|
|
71
|
-
|
|
72
|
-
|
|
70
|
+
const modelWithTools = llm.bindTools
|
|
71
|
+
? llm.bindTools(tools)
|
|
72
|
+
: llm.withConfig({
|
|
73
|
+
tools: tools.map((tool) => convertToOpenAITool(tool)),
|
|
74
|
+
});
|
|
73
75
|
const agent = AgentRunnableSequence.fromRunnables([
|
|
74
76
|
RunnablePassthrough.assign({
|
|
75
77
|
agent_scratchpad: (input) => formatToOpenAIToolMessages(input.steps),
|
|
@@ -23,7 +23,7 @@ const types_js_1 = require("../types.cjs");
|
|
|
23
23
|
* new ChatOpenAI({
|
|
24
24
|
* modelName: "gpt-3.5-turbo-1106",
|
|
25
25
|
* temperature: 0,
|
|
26
|
-
* }).
|
|
26
|
+
* }).bindTools(tools),
|
|
27
27
|
* new OpenAIToolsAgentOutputParser(),
|
|
28
28
|
* ]).withConfig({ runName: "OpenAIToolsAgent" });
|
|
29
29
|
*
|
|
@@ -23,7 +23,7 @@ export type { ToolsAgentAction, ToolsAgentStep };
|
|
|
23
23
|
* new ChatOpenAI({
|
|
24
24
|
* modelName: "gpt-3.5-turbo-1106",
|
|
25
25
|
* temperature: 0,
|
|
26
|
-
* }).
|
|
26
|
+
* }).bindTools(tools),
|
|
27
27
|
* new OpenAIToolsAgentOutputParser(),
|
|
28
28
|
* ]).withConfig({ runName: "OpenAIToolsAgent" });
|
|
29
29
|
*
|
|
@@ -20,7 +20,7 @@ import { AgentMultiActionOutputParser } from "../types.js";
|
|
|
20
20
|
* new ChatOpenAI({
|
|
21
21
|
* modelName: "gpt-3.5-turbo-1106",
|
|
22
22
|
* temperature: 0,
|
|
23
|
-
* }).
|
|
23
|
+
* }).bindTools(tools),
|
|
24
24
|
* new OpenAIToolsAgentOutputParser(),
|
|
25
25
|
* ]).withConfig({ runName: "OpenAIToolsAgent" });
|
|
26
26
|
*
|
|
@@ -59,8 +59,7 @@ async function createReactAgent({ llm, tools, prompt, streamRunnable, }) {
|
|
|
59
59
|
tools: (0, render_js_1.renderTextDescription)(tools),
|
|
60
60
|
tool_names: toolNames.join(", "),
|
|
61
61
|
});
|
|
62
|
-
|
|
63
|
-
const llmWithStop = llm.bind({
|
|
62
|
+
const llmWithStop = llm.withConfig({
|
|
64
63
|
stop: ["\nObservation:"],
|
|
65
64
|
});
|
|
66
65
|
const agent = agent_js_1.AgentRunnableSequence.fromRunnables([
|
|
@@ -56,8 +56,7 @@ export async function createReactAgent({ llm, tools, prompt, streamRunnable, })
|
|
|
56
56
|
tools: renderTextDescription(tools),
|
|
57
57
|
tool_names: toolNames.join(", "),
|
|
58
58
|
});
|
|
59
|
-
|
|
60
|
-
const llmWithStop = llm.bind({
|
|
59
|
+
const llmWithStop = llm.withConfig({
|
|
61
60
|
stop: ["\nObservation:"],
|
|
62
61
|
});
|
|
63
62
|
const agent = AgentRunnableSequence.fromRunnables([
|
|
@@ -236,7 +236,7 @@ async function createStructuredChatAgent({ llm, tools, prompt, streamRunnable, }
|
|
|
236
236
|
tool_names: toolNames.join(", "),
|
|
237
237
|
});
|
|
238
238
|
// TODO: Add .bind to core runnable interface.
|
|
239
|
-
const llmWithStop = llm.
|
|
239
|
+
const llmWithStop = llm.withConfig({
|
|
240
240
|
stop: ["Observation"],
|
|
241
241
|
});
|
|
242
242
|
const agent = agent_js_1.AgentRunnableSequence.fromRunnables([
|
|
@@ -232,7 +232,7 @@ export async function createStructuredChatAgent({ llm, tools, prompt, streamRunn
|
|
|
232
232
|
tool_names: toolNames.join(", "),
|
|
233
233
|
});
|
|
234
234
|
// TODO: Add .bind to core runnable interface.
|
|
235
|
-
const llmWithStop = llm.
|
|
235
|
+
const llmWithStop = llm.withConfig({
|
|
236
236
|
stop: ["Observation"],
|
|
237
237
|
});
|
|
238
238
|
const agent = AgentRunnableSequence.fromRunnables([
|
|
@@ -165,7 +165,7 @@ async function createXmlAgent({ llm, tools, prompt, streamRunnable, }) {
|
|
|
165
165
|
tools: (0, render_js_1.renderTextDescription)(tools),
|
|
166
166
|
});
|
|
167
167
|
// TODO: Add .bind to core runnable interface.
|
|
168
|
-
const llmWithStop = llm.
|
|
168
|
+
const llmWithStop = llm.withConfig({
|
|
169
169
|
stop: ["</tool_input>", "</final_answer>"],
|
|
170
170
|
});
|
|
171
171
|
const agent = agent_js_1.AgentRunnableSequence.fromRunnables([
|
package/dist/agents/xml/index.js
CHANGED
|
@@ -161,7 +161,7 @@ export async function createXmlAgent({ llm, tools, prompt, streamRunnable, }) {
|
|
|
161
161
|
tools: renderTextDescription(tools),
|
|
162
162
|
});
|
|
163
163
|
// TODO: Add .bind to core runnable interface.
|
|
164
|
-
const llmWithStop = llm.
|
|
164
|
+
const llmWithStop = llm.withConfig({
|
|
165
165
|
stop: ["</tool_input>", "</final_answer>"],
|
|
166
166
|
});
|
|
167
167
|
const agent = AgentRunnableSequence.fromRunnables([
|
|
@@ -13,7 +13,7 @@ const types_js_1 = require("../types.cjs");
|
|
|
13
13
|
* const runnableAgent = RunnableSequence.from([
|
|
14
14
|
* ...rest of runnable
|
|
15
15
|
* prompt,
|
|
16
|
-
* new ChatAnthropic({ modelName: "claude-2", temperature: 0 }).
|
|
16
|
+
* new ChatAnthropic({ modelName: "claude-2", temperature: 0 }).withConfig({
|
|
17
17
|
* stop: ["</tool_input>", "</final_answer>"],
|
|
18
18
|
* }),
|
|
19
19
|
* new XMLAgentOutputParser(),
|
|
@@ -10,7 +10,7 @@ import { AgentActionOutputParser } from "../types.js";
|
|
|
10
10
|
* const runnableAgent = RunnableSequence.from([
|
|
11
11
|
* ...rest of runnable
|
|
12
12
|
* prompt,
|
|
13
|
-
* new ChatAnthropic({ modelName: "claude-2", temperature: 0 }).
|
|
13
|
+
* new ChatAnthropic({ modelName: "claude-2", temperature: 0 }).withConfig({
|
|
14
14
|
* stop: ["</tool_input>", "</final_answer>"],
|
|
15
15
|
* }),
|
|
16
16
|
* new XMLAgentOutputParser(),
|
|
@@ -10,7 +10,7 @@ import { AgentActionOutputParser } from "../types.js";
|
|
|
10
10
|
* const runnableAgent = RunnableSequence.from([
|
|
11
11
|
* ...rest of runnable
|
|
12
12
|
* prompt,
|
|
13
|
-
* new ChatAnthropic({ modelName: "claude-2", temperature: 0 }).
|
|
13
|
+
* new ChatAnthropic({ modelName: "claude-2", temperature: 0 }).withConfig({
|
|
14
14
|
* stop: ["</tool_input>", "</final_answer>"],
|
|
15
15
|
* }),
|
|
16
16
|
* new XMLAgentOutputParser(),
|
|
@@ -64,7 +64,7 @@ function createOpenAIFnRunnable(config) {
|
|
|
64
64
|
name: functions[0].name,
|
|
65
65
|
};
|
|
66
66
|
}
|
|
67
|
-
const llmWithKwargs = llm.
|
|
67
|
+
const llmWithKwargs = llm.withConfig(llmKwargs);
|
|
68
68
|
return prompt.pipe(llmWithKwargs).pipe(outputParser);
|
|
69
69
|
}
|
|
70
70
|
exports.createOpenAIFnRunnable = createOpenAIFnRunnable;
|
|
@@ -262,7 +262,7 @@ async function createSqlQueryChain({ llm, db, prompt, k = 5, dialect, }) {
|
|
|
262
262
|
return newInputs;
|
|
263
263
|
},
|
|
264
264
|
promptToUse,
|
|
265
|
-
llm.
|
|
265
|
+
llm.withConfig({ stop: ["\nSQLResult:"] }),
|
|
266
266
|
new output_parsers_1.StringOutputParser(),
|
|
267
267
|
strip,
|
|
268
268
|
]);
|
|
@@ -258,7 +258,7 @@ export async function createSqlQueryChain({ llm, db, prompt, k = 5, dialect, })
|
|
|
258
258
|
return newInputs;
|
|
259
259
|
},
|
|
260
260
|
promptToUse,
|
|
261
|
-
llm.
|
|
261
|
+
llm.withConfig({ stop: ["\nSQLResult:"] }),
|
|
262
262
|
new StringOutputParser(),
|
|
263
263
|
strip,
|
|
264
264
|
]);
|
|
@@ -553,9 +553,10 @@ exports.ConfigurableModel = ConfigurableModel;
|
|
|
553
553
|
* temperature: 0,
|
|
554
554
|
* });
|
|
555
555
|
*
|
|
556
|
-
* const configurableModelWithTools = configurableModel.
|
|
557
|
-
*
|
|
558
|
-
*
|
|
556
|
+
* const configurableModelWithTools = configurableModel.bindTools([
|
|
557
|
+
* getWeatherTool,
|
|
558
|
+
* getPopulationTool,
|
|
559
|
+
* ]);
|
|
559
560
|
*
|
|
560
561
|
* const configurableToolResult = await configurableModelWithTools.invoke(
|
|
561
562
|
* "Which city is hotter today and which is bigger: LA or NY?",
|
|
@@ -548,9 +548,10 @@ export class ConfigurableModel extends BaseChatModel {
|
|
|
548
548
|
* temperature: 0,
|
|
549
549
|
* });
|
|
550
550
|
*
|
|
551
|
-
* const configurableModelWithTools = configurableModel.
|
|
552
|
-
*
|
|
553
|
-
*
|
|
551
|
+
* const configurableModelWithTools = configurableModel.bindTools([
|
|
552
|
+
* getWeatherTool,
|
|
553
|
+
* getPopulationTool,
|
|
554
|
+
* ]);
|
|
554
555
|
*
|
|
555
556
|
* const configurableToolResult = await configurableModelWithTools.invoke(
|
|
556
557
|
* "Which city is hotter today and which is bigger: LA or NY?",
|
package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.cjs
CHANGED
|
@@ -187,8 +187,9 @@ class ViolationOfExpectationsChain extends base_js_1.BaseChain {
|
|
|
187
187
|
*/
|
|
188
188
|
async predictNextUserMessage(chatHistory, runManager) {
|
|
189
189
|
const messageString = this.getChatHistoryString(chatHistory);
|
|
190
|
-
const llmWithFunctions = this.llm
|
|
191
|
-
|
|
190
|
+
const llmWithFunctions = this.llm
|
|
191
|
+
.bindTools([types_js_1.PREDICT_NEXT_USER_MESSAGE_FUNCTION])
|
|
192
|
+
.withConfig({
|
|
192
193
|
function_call: { name: types_js_1.PREDICT_NEXT_USER_MESSAGE_FUNCTION.name },
|
|
193
194
|
});
|
|
194
195
|
const chain = violation_of_expectations_prompt_js_1.PREDICT_NEXT_USER_MESSAGE_PROMPT.pipe(llmWithFunctions).pipe(this.jsonOutputParser);
|
|
@@ -242,8 +243,9 @@ class ViolationOfExpectationsChain extends base_js_1.BaseChain {
|
|
|
242
243
|
* @throws {Error} If the response from the language model does not contain the expected keys: 'violationExplanation', 'explainedPredictionErrors', and 'accuratePrediction'.
|
|
243
244
|
*/
|
|
244
245
|
async getPredictionViolations({ userPredictions, userResponse, runManager, }) {
|
|
245
|
-
const llmWithFunctions = this.llm
|
|
246
|
-
|
|
246
|
+
const llmWithFunctions = this.llm
|
|
247
|
+
.bindTools([types_js_1.PREDICTION_VIOLATIONS_FUNCTION])
|
|
248
|
+
.withConfig({
|
|
247
249
|
function_call: { name: types_js_1.PREDICTION_VIOLATIONS_FUNCTION.name },
|
|
248
250
|
});
|
|
249
251
|
const chain = violation_of_expectations_prompt_js_1.PREDICTION_VIOLATIONS_PROMPT.pipe(llmWithFunctions).pipe(this.jsonOutputParser);
|
package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.js
CHANGED
|
@@ -184,8 +184,9 @@ export class ViolationOfExpectationsChain extends BaseChain {
|
|
|
184
184
|
*/
|
|
185
185
|
async predictNextUserMessage(chatHistory, runManager) {
|
|
186
186
|
const messageString = this.getChatHistoryString(chatHistory);
|
|
187
|
-
const llmWithFunctions = this.llm
|
|
188
|
-
|
|
187
|
+
const llmWithFunctions = this.llm
|
|
188
|
+
.bindTools([PREDICT_NEXT_USER_MESSAGE_FUNCTION])
|
|
189
|
+
.withConfig({
|
|
189
190
|
function_call: { name: PREDICT_NEXT_USER_MESSAGE_FUNCTION.name },
|
|
190
191
|
});
|
|
191
192
|
const chain = PREDICT_NEXT_USER_MESSAGE_PROMPT.pipe(llmWithFunctions).pipe(this.jsonOutputParser);
|
|
@@ -239,8 +240,9 @@ export class ViolationOfExpectationsChain extends BaseChain {
|
|
|
239
240
|
* @throws {Error} If the response from the language model does not contain the expected keys: 'violationExplanation', 'explainedPredictionErrors', and 'accuratePrediction'.
|
|
240
241
|
*/
|
|
241
242
|
async getPredictionViolations({ userPredictions, userResponse, runManager, }) {
|
|
242
|
-
const llmWithFunctions = this.llm
|
|
243
|
-
|
|
243
|
+
const llmWithFunctions = this.llm
|
|
244
|
+
.bindTools([PREDICTION_VIOLATIONS_FUNCTION])
|
|
245
|
+
.withConfig({
|
|
244
246
|
function_call: { name: PREDICTION_VIOLATIONS_FUNCTION.name },
|
|
245
247
|
});
|
|
246
248
|
const chain = PREDICTION_VIOLATIONS_PROMPT.pipe(llmWithFunctions).pipe(this.jsonOutputParser);
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "langchain",
|
|
3
|
-
"version": "0.3.
|
|
3
|
+
"version": "0.3.27",
|
|
4
4
|
"description": "Typescript bindings for langchain",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -545,7 +545,7 @@
|
|
|
545
545
|
"js-tiktoken": "^1.0.12",
|
|
546
546
|
"js-yaml": "^4.1.0",
|
|
547
547
|
"jsonpointer": "^5.0.1",
|
|
548
|
-
"langsmith": "^0.3.
|
|
548
|
+
"langsmith": "^0.3.29",
|
|
549
549
|
"openapi-types": "^12.1.3",
|
|
550
550
|
"p-retry": "4",
|
|
551
551
|
"uuid": "^10.0.0",
|