langchain 0.2.14 → 0.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,6 +5,11 @@ const runnables_1 = require("@langchain/core/runnables");
5
5
  const agent_js_1 = require("../agent.cjs");
6
6
  const output_parser_js_1 = require("./output_parser.cjs");
7
7
  const tool_calling_js_1 = require("../format_scratchpad/tool_calling.cjs");
8
+ function _isBaseChatModel(x) {
9
+ const model = x;
10
+ return (typeof model._modelType === "function" &&
11
+ model._modelType() === "base_chat_model");
12
+ }
8
13
  /**
9
14
  * Create an agent that uses tools.
10
15
  * @param params Params required to create the agent. Includes an LLM, tools, and prompt.
@@ -62,10 +67,16 @@ function createToolCallingAgent({ llm, tools, prompt, streamRunnable, }) {
62
67
  `Found ${JSON.stringify(prompt.inputVariables)} instead.`,
63
68
  ].join("\n"));
64
69
  }
65
- if (llm.bindTools === undefined) {
66
- throw new Error(`This agent requires that the "bind_tools()" method be implemented on the input model.`);
70
+ let modelWithTools;
71
+ if (_isBaseChatModel(llm)) {
72
+ if (llm.bindTools === undefined) {
73
+ throw new Error(`This agent requires that the "bind_tools()" method be implemented on the input model.`);
74
+ }
75
+ modelWithTools = llm.bindTools(tools);
76
+ }
77
+ else {
78
+ modelWithTools = llm;
67
79
  }
68
- const modelWithTools = llm.bindTools(tools);
69
80
  const agent = agent_js_1.AgentRunnableSequence.fromRunnables([
70
81
  runnables_1.RunnablePassthrough.assign({
71
82
  agent_scratchpad: (input) => (0, tool_calling_js_1.formatToToolMessages)(input.steps),
@@ -1,7 +1,6 @@
1
- import { BaseChatModel } from "@langchain/core/language_models/chat_models";
2
1
  import { ChatPromptTemplate } from "@langchain/core/prompts";
3
2
  import { StructuredToolInterface } from "@langchain/core/tools";
4
- import { ToolDefinition } from "@langchain/core/language_models/base";
3
+ import { LanguageModelLike, ToolDefinition } from "@langchain/core/language_models/base";
5
4
  import { AgentRunnableSequence } from "../agent.js";
6
5
  import { ToolsAgentStep } from "./output_parser.js";
7
6
  /**
@@ -13,7 +12,7 @@ export type CreateToolCallingAgentParams = {
13
12
  * so must either be an OpenAI model that supports that or a wrapper of
14
13
  * a different model that adds in equivalent support.
15
14
  */
16
- llm: BaseChatModel;
15
+ llm: LanguageModelLike;
17
16
  /** Tools this agent has access to. */
18
17
  tools: StructuredToolInterface[] | ToolDefinition[];
19
18
  /** The prompt to use, must have an input key of `agent_scratchpad`. */
@@ -2,6 +2,11 @@ import { RunnablePassthrough } from "@langchain/core/runnables";
2
2
  import { AgentRunnableSequence } from "../agent.js";
3
3
  import { ToolCallingAgentOutputParser, } from "./output_parser.js";
4
4
  import { formatToToolMessages } from "../format_scratchpad/tool_calling.js";
5
+ function _isBaseChatModel(x) {
6
+ const model = x;
7
+ return (typeof model._modelType === "function" &&
8
+ model._modelType() === "base_chat_model");
9
+ }
5
10
  /**
6
11
  * Create an agent that uses tools.
7
12
  * @param params Params required to create the agent. Includes an LLM, tools, and prompt.
@@ -59,10 +64,16 @@ export function createToolCallingAgent({ llm, tools, prompt, streamRunnable, })
59
64
  `Found ${JSON.stringify(prompt.inputVariables)} instead.`,
60
65
  ].join("\n"));
61
66
  }
62
- if (llm.bindTools === undefined) {
63
- throw new Error(`This agent requires that the "bind_tools()" method be implemented on the input model.`);
67
+ let modelWithTools;
68
+ if (_isBaseChatModel(llm)) {
69
+ if (llm.bindTools === undefined) {
70
+ throw new Error(`This agent requires that the "bind_tools()" method be implemented on the input model.`);
71
+ }
72
+ modelWithTools = llm.bindTools(tools);
73
+ }
74
+ else {
75
+ modelWithTools = llm;
64
76
  }
65
- const modelWithTools = llm.bindTools(tools);
66
77
  const agent = AgentRunnableSequence.fromRunnables([
67
78
  RunnablePassthrough.assign({
68
79
  agent_scratchpad: (input) => formatToToolMessages(input.steps),
@@ -23,7 +23,7 @@ const load_js_1 = require("./question_answering/load.cjs");
23
23
  * documents,
24
24
  * embeddings
25
25
  * );
26
- * const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input}`);
26
+ * const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input} based on the following context {context}`);
27
27
  *
28
28
  * const combineDocsChain = await createStuffDocumentsChain({
29
29
  * llm,
@@ -35,7 +35,7 @@ export interface RetrievalQAChainInput extends Omit<ChainInputs, "memory"> {
35
35
  * documents,
36
36
  * embeddings
37
37
  * );
38
- * const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input}`);
38
+ * const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input} based on the following context {context}`);
39
39
  *
40
40
  * const combineDocsChain = await createStuffDocumentsChain({
41
41
  * llm,
@@ -20,7 +20,7 @@ import { loadQAStuffChain, } from "./question_answering/load.js";
20
20
  * documents,
21
21
  * embeddings
22
22
  * );
23
- * const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input}`);
23
+ * const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input} based on the following context {context}`);
24
24
  *
25
25
  * const combineDocsChain = await createStuffDocumentsChain({
26
26
  * llm,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.2.14",
3
+ "version": "0.2.15",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {