langchain 0.1.32 → 0.1.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/dist/agents/agent.cjs +2 -1
  2. package/dist/agents/agent.js +2 -1
  3. package/dist/agents/format_scratchpad/openai_tools.cjs +2 -16
  4. package/dist/agents/format_scratchpad/openai_tools.d.ts +2 -3
  5. package/dist/agents/format_scratchpad/openai_tools.js +2 -15
  6. package/dist/agents/format_scratchpad/tool_calling.cjs +30 -0
  7. package/dist/agents/format_scratchpad/tool_calling.d.ts +10 -0
  8. package/dist/agents/format_scratchpad/tool_calling.js +25 -0
  9. package/dist/agents/index.cjs +8 -6
  10. package/dist/agents/index.d.ts +1 -0
  11. package/dist/agents/index.js +1 -0
  12. package/dist/agents/openai_tools/index.cjs +1 -1
  13. package/dist/agents/openai_tools/index.d.ts +1 -1
  14. package/dist/agents/openai_tools/index.js +1 -1
  15. package/dist/agents/openai_tools/output_parser.cjs +0 -2
  16. package/dist/agents/openai_tools/output_parser.d.ts +3 -13
  17. package/dist/agents/openai_tools/output_parser.js +0 -2
  18. package/dist/agents/tool_calling/index.cjs +83 -0
  19. package/dist/agents/tool_calling/index.d.ts +78 -0
  20. package/dist/agents/tool_calling/index.js +79 -0
  21. package/dist/agents/tool_calling/output_parser.cjs +75 -0
  22. package/dist/agents/tool_calling/output_parser.d.ts +22 -0
  23. package/dist/agents/tool_calling/output_parser.js +70 -0
  24. package/dist/chains/openai_functions/extraction.d.ts +2 -2
  25. package/dist/chains/openai_functions/structured_output.d.ts +2 -2
  26. package/dist/chains/openai_functions/tagging.d.ts +2 -2
  27. package/dist/document_loaders/web/github.cjs +6 -1
  28. package/dist/document_loaders/web/github.js +6 -1
  29. package/dist/experimental/openai_assistant/index.cjs +1 -1
  30. package/dist/experimental/openai_assistant/index.js +1 -1
  31. package/dist/memory/vector_store.cjs +11 -1
  32. package/dist/memory/vector_store.d.ts +8 -0
  33. package/dist/memory/vector_store.js +11 -1
  34. package/dist/retrievers/self_query/index.cjs +1 -1
  35. package/dist/retrievers/self_query/index.d.ts +1 -1
  36. package/dist/retrievers/self_query/index.js +1 -1
  37. package/dist/retrievers/self_query/pinecone.cjs +1 -1
  38. package/dist/retrievers/self_query/pinecone.d.ts +1 -1
  39. package/dist/retrievers/self_query/pinecone.js +1 -1
  40. package/package.json +5 -5
@@ -246,7 +246,8 @@ class RunnableMultiActionAgent extends BaseMultiActionAgent {
246
246
  });
247
247
  this.runnable = fields.runnable;
248
248
  this.stop = fields.stop;
249
- this.defaultRunName = fields.defaultRunName ?? this.defaultRunName;
249
+ this.defaultRunName =
250
+ fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;
250
251
  this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;
251
252
  }
252
253
  async plan(steps, inputs, callbackManager, config) {
@@ -237,7 +237,8 @@ export class RunnableMultiActionAgent extends BaseMultiActionAgent {
237
237
  });
238
238
  this.runnable = fields.runnable;
239
239
  this.stop = fields.stop;
240
- this.defaultRunName = fields.defaultRunName ?? this.defaultRunName;
240
+ this.defaultRunName =
241
+ fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;
241
242
  this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;
242
243
  }
243
244
  async plan(steps, inputs, callbackManager, config) {
@@ -1,19 +1,5 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.formatToOpenAIToolMessages = void 0;
4
- const messages_1 = require("@langchain/core/messages");
5
- function formatToOpenAIToolMessages(steps) {
6
- return steps.flatMap(({ action, observation }) => {
7
- if ("messageLog" in action && action.messageLog !== undefined) {
8
- const log = action.messageLog;
9
- return log.concat(new messages_1.ToolMessage({
10
- content: observation,
11
- tool_call_id: action.toolCallId,
12
- }));
13
- }
14
- else {
15
- return [new messages_1.AIMessage(action.log)];
16
- }
17
- });
18
- }
19
- exports.formatToOpenAIToolMessages = formatToOpenAIToolMessages;
4
+ const tool_calling_js_1 = require("./tool_calling.cjs");
5
+ Object.defineProperty(exports, "formatToOpenAIToolMessages", { enumerable: true, get: function () { return tool_calling_js_1.formatToToolMessages; } });
@@ -1,3 +1,2 @@
1
- import { type BaseMessage } from "@langchain/core/messages";
2
- import type { ToolsAgentStep } from "../openai/output_parser.js";
3
- export declare function formatToOpenAIToolMessages(steps: ToolsAgentStep[]): BaseMessage[];
1
+ import { formatToToolMessages as formatToOpenAIToolMessages } from "./tool_calling.js";
2
+ export { formatToOpenAIToolMessages };
@@ -1,15 +1,2 @@
1
- import { ToolMessage, AIMessage, } from "@langchain/core/messages";
2
- export function formatToOpenAIToolMessages(steps) {
3
- return steps.flatMap(({ action, observation }) => {
4
- if ("messageLog" in action && action.messageLog !== undefined) {
5
- const log = action.messageLog;
6
- return log.concat(new ToolMessage({
7
- content: observation,
8
- tool_call_id: action.toolCallId,
9
- }));
10
- }
11
- else {
12
- return [new AIMessage(action.log)];
13
- }
14
- });
15
- }
1
+ import { formatToToolMessages as formatToOpenAIToolMessages } from "./tool_calling.js";
2
+ export { formatToOpenAIToolMessages };
@@ -0,0 +1,30 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.formatToToolMessages = exports._createToolMessage = void 0;
4
+ const messages_1 = require("@langchain/core/messages");
5
+ /**
6
+ * Convert agent action and observation into a function message.
7
+ * @param agentAction - The tool invocation request from the agent
8
+ * @param observation - The result of the tool invocation
9
+ * @returns FunctionMessage that corresponds to the original tool invocation
10
+ */
11
+ function _createToolMessage(step) {
12
+ return new messages_1.ToolMessage({
13
+ tool_call_id: step.action.toolCallId,
14
+ content: step.observation,
15
+ additional_kwargs: { name: step.action.tool },
16
+ });
17
+ }
18
+ exports._createToolMessage = _createToolMessage;
19
+ function formatToToolMessages(steps) {
20
+ return steps.flatMap(({ action, observation }) => {
21
+ if ("messageLog" in action && action.messageLog !== undefined) {
22
+ const log = action.messageLog;
23
+ return log.concat(_createToolMessage({ action, observation }));
24
+ }
25
+ else {
26
+ return [new messages_1.AIMessage(action.log)];
27
+ }
28
+ });
29
+ }
30
+ exports.formatToToolMessages = formatToToolMessages;
@@ -0,0 +1,10 @@
1
+ import { BaseMessage, ToolMessage } from "@langchain/core/messages";
2
+ import { ToolsAgentStep } from "../tool_calling/output_parser.js";
3
+ /**
4
+ * Convert agent action and observation into a function message.
5
+ * @param agentAction - The tool invocation request from the agent
6
+ * @param observation - The result of the tool invocation
7
+ * @returns FunctionMessage that corresponds to the original tool invocation
8
+ */
9
+ export declare function _createToolMessage(step: ToolsAgentStep): ToolMessage;
10
+ export declare function formatToToolMessages(steps: ToolsAgentStep[]): BaseMessage[];
@@ -0,0 +1,25 @@
1
+ import { AIMessage, ToolMessage } from "@langchain/core/messages";
2
+ /**
3
+ * Convert agent action and observation into a function message.
4
+ * @param agentAction - The tool invocation request from the agent
5
+ * @param observation - The result of the tool invocation
6
+ * @returns FunctionMessage that corresponds to the original tool invocation
7
+ */
8
+ export function _createToolMessage(step) {
9
+ return new ToolMessage({
10
+ tool_call_id: step.action.toolCallId,
11
+ content: step.observation,
12
+ additional_kwargs: { name: step.action.tool },
13
+ });
14
+ }
15
+ export function formatToToolMessages(steps) {
16
+ return steps.flatMap(({ action, observation }) => {
17
+ if ("messageLog" in action && action.messageLog !== undefined) {
18
+ const log = action.messageLog;
19
+ return log.concat(_createToolMessage({ action, observation }));
20
+ }
21
+ else {
22
+ return [new AIMessage(action.log)];
23
+ }
24
+ });
25
+ }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.createReactAgent = exports.createXmlAgent = exports.XMLAgent = exports.createOpenAIToolsAgent = exports.createOpenAIFunctionsAgent = exports.OpenAIAgent = exports.StructuredChatOutputParserWithRetries = exports.StructuredChatOutputParser = exports.createStructuredChatAgent = exports.StructuredChatAgent = exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParserWithRetries = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.Toolkit = exports.createVectorStoreRouterAgent = exports.createVectorStoreAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.RunnableAgent = exports.BaseMultiActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
3
+ exports.createReactAgent = exports.createXmlAgent = exports.XMLAgent = exports.createToolCallingAgent = exports.createOpenAIToolsAgent = exports.createOpenAIFunctionsAgent = exports.OpenAIAgent = exports.StructuredChatOutputParserWithRetries = exports.StructuredChatOutputParser = exports.createStructuredChatAgent = exports.StructuredChatAgent = exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParserWithRetries = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.Toolkit = exports.createVectorStoreRouterAgent = exports.createVectorStoreAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.RunnableAgent = exports.BaseMultiActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
4
4
  var agent_js_1 = require("./agent.cjs");
5
5
  Object.defineProperty(exports, "Agent", { enumerable: true, get: function () { return agent_js_1.Agent; } });
6
6
  Object.defineProperty(exports, "BaseSingleActionAgent", { enumerable: true, get: function () { return agent_js_1.BaseSingleActionAgent; } });
@@ -51,8 +51,10 @@ Object.defineProperty(exports, "OpenAIAgent", { enumerable: true, get: function
51
51
  Object.defineProperty(exports, "createOpenAIFunctionsAgent", { enumerable: true, get: function () { return index_js_6.createOpenAIFunctionsAgent; } });
52
52
  var index_js_7 = require("./openai_tools/index.cjs");
53
53
  Object.defineProperty(exports, "createOpenAIToolsAgent", { enumerable: true, get: function () { return index_js_7.createOpenAIToolsAgent; } });
54
- var index_js_8 = require("./xml/index.cjs");
55
- Object.defineProperty(exports, "XMLAgent", { enumerable: true, get: function () { return index_js_8.XMLAgent; } });
56
- Object.defineProperty(exports, "createXmlAgent", { enumerable: true, get: function () { return index_js_8.createXmlAgent; } });
57
- var index_js_9 = require("./react/index.cjs");
58
- Object.defineProperty(exports, "createReactAgent", { enumerable: true, get: function () { return index_js_9.createReactAgent; } });
54
+ var index_js_8 = require("./tool_calling/index.cjs");
55
+ Object.defineProperty(exports, "createToolCallingAgent", { enumerable: true, get: function () { return index_js_8.createToolCallingAgent; } });
56
+ var index_js_9 = require("./xml/index.cjs");
57
+ Object.defineProperty(exports, "XMLAgent", { enumerable: true, get: function () { return index_js_9.XMLAgent; } });
58
+ Object.defineProperty(exports, "createXmlAgent", { enumerable: true, get: function () { return index_js_9.createXmlAgent; } });
59
+ var index_js_10 = require("./react/index.cjs");
60
+ Object.defineProperty(exports, "createReactAgent", { enumerable: true, get: function () { return index_js_10.createReactAgent; } });
@@ -14,6 +14,7 @@ export { StructuredChatAgent, type StructuredChatAgentInput, type StructuredChat
14
14
  export { StructuredChatOutputParser, type StructuredChatOutputParserArgs, StructuredChatOutputParserWithRetries, } from "./structured_chat/outputParser.js";
15
15
  export { OpenAIAgent, type OpenAIAgentInput, type OpenAIAgentCreatePromptArgs, type CreateOpenAIFunctionsAgentParams, createOpenAIFunctionsAgent, } from "./openai_functions/index.js";
16
16
  export { type CreateOpenAIToolsAgentParams, createOpenAIToolsAgent, } from "./openai_tools/index.js";
17
+ export { type CreateToolCallingAgentParams, createToolCallingAgent, } from "./tool_calling/index.js";
17
18
  export { XMLAgent, type XMLAgentInput, type CreateXmlAgentParams, createXmlAgent, } from "./xml/index.js";
18
19
  export { type CreateReactAgentParams, createReactAgent, } from "./react/index.js";
19
20
  export type { AgentAction, AgentFinish, AgentStep, } from "@langchain/core/agents";
@@ -14,5 +14,6 @@ export { StructuredChatAgent, createStructuredChatAgent, } from "./structured_ch
14
14
  export { StructuredChatOutputParser, StructuredChatOutputParserWithRetries, } from "./structured_chat/outputParser.js";
15
15
  export { OpenAIAgent, createOpenAIFunctionsAgent, } from "./openai_functions/index.js";
16
16
  export { createOpenAIToolsAgent, } from "./openai_tools/index.js";
17
+ export { createToolCallingAgent, } from "./tool_calling/index.js";
17
18
  export { XMLAgent, createXmlAgent, } from "./xml/index.js";
18
19
  export { createReactAgent, } from "./react/index.js";
@@ -4,7 +4,7 @@ exports.createOpenAIToolsAgent = exports.OpenAIToolsAgentOutputParser = void 0;
4
4
  const runnables_1 = require("@langchain/core/runnables");
5
5
  const function_calling_1 = require("@langchain/core/utils/function_calling");
6
6
  const openai_tools_js_1 = require("../format_scratchpad/openai_tools.cjs");
7
- const output_parser_js_1 = require("../openai/output_parser.cjs");
7
+ const output_parser_js_1 = require("./output_parser.cjs");
8
8
  Object.defineProperty(exports, "OpenAIToolsAgentOutputParser", { enumerable: true, get: function () { return output_parser_js_1.OpenAIToolsAgentOutputParser; } });
9
9
  const agent_js_1 = require("../agent.cjs");
10
10
  /**
@@ -2,7 +2,7 @@ import type { StructuredToolInterface } from "@langchain/core/tools";
2
2
  import type { BaseChatModel, BaseChatModelCallOptions } from "@langchain/core/language_models/chat_models";
3
3
  import { ChatPromptTemplate } from "@langchain/core/prompts";
4
4
  import { OpenAIClient } from "@langchain/openai";
5
- import { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from "../openai/output_parser.js";
5
+ import { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from "./output_parser.js";
6
6
  import { AgentRunnableSequence } from "../agent.js";
7
7
  export { OpenAIToolsAgentOutputParser, type ToolsAgentStep };
8
8
  /**
@@ -1,7 +1,7 @@
1
1
  import { RunnablePassthrough } from "@langchain/core/runnables";
2
2
  import { convertToOpenAITool } from "@langchain/core/utils/function_calling";
3
3
  import { formatToOpenAIToolMessages } from "../format_scratchpad/openai_tools.js";
4
- import { OpenAIToolsAgentOutputParser, } from "../openai/output_parser.js";
4
+ import { OpenAIToolsAgentOutputParser, } from "./output_parser.js";
5
5
  import { AgentRunnableSequence } from "../agent.js";
6
6
  export { OpenAIToolsAgentOutputParser };
7
7
  /**
@@ -7,7 +7,6 @@ const types_js_1 = require("../types.cjs");
7
7
  /**
8
8
  * @example
9
9
  * ```typescript
10
- *
11
10
  * const prompt = ChatPromptTemplate.fromMessages([
12
11
  * ["ai", "You are a helpful assistant"],
13
12
  * ["human", "{input}"],
@@ -32,7 +31,6 @@ const types_js_1 = require("../types.cjs");
32
31
  * input:
33
32
  * "What is the sum of the current temperature in San Francisco, New York, and Tokyo?",
34
33
  * });
35
- *
36
34
  * ```
37
35
  */
38
36
  class OpenAIToolsAgentOutputParser extends types_js_1.AgentMultiActionOutputParser {
@@ -1,21 +1,12 @@
1
- import { AgentAction, AgentFinish, AgentStep } from "@langchain/core/agents";
1
+ import { AgentAction, AgentFinish } from "@langchain/core/agents";
2
2
  import { BaseMessage } from "@langchain/core/messages";
3
3
  import { ChatGeneration } from "@langchain/core/outputs";
4
4
  import { AgentMultiActionOutputParser } from "../types.js";
5
- /**
6
- * Type that represents an agent action with an optional message log.
7
- */
8
- export type ToolsAgentAction = AgentAction & {
9
- toolCallId: string;
10
- messageLog?: BaseMessage[];
11
- };
12
- export type ToolsAgentStep = AgentStep & {
13
- action: ToolsAgentAction;
14
- };
5
+ import { ToolsAgentAction, ToolsAgentStep } from "../tool_calling/output_parser.js";
6
+ export type { ToolsAgentAction, ToolsAgentStep };
15
7
  /**
16
8
  * @example
17
9
  * ```typescript
18
- *
19
10
  * const prompt = ChatPromptTemplate.fromMessages([
20
11
  * ["ai", "You are a helpful assistant"],
21
12
  * ["human", "{input}"],
@@ -40,7 +31,6 @@ export type ToolsAgentStep = AgentStep & {
40
31
  * input:
41
32
  * "What is the sum of the current temperature in San Francisco, New York, and Tokyo?",
42
33
  * });
43
- *
44
34
  * ```
45
35
  */
46
36
  export declare class OpenAIToolsAgentOutputParser extends AgentMultiActionOutputParser {
@@ -4,7 +4,6 @@ import { AgentMultiActionOutputParser } from "../types.js";
4
4
  /**
5
5
  * @example
6
6
  * ```typescript
7
- *
8
7
  * const prompt = ChatPromptTemplate.fromMessages([
9
8
  * ["ai", "You are a helpful assistant"],
10
9
  * ["human", "{input}"],
@@ -29,7 +28,6 @@ import { AgentMultiActionOutputParser } from "../types.js";
29
28
  * input:
30
29
  * "What is the sum of the current temperature in San Francisco, New York, and Tokyo?",
31
30
  * });
32
- *
33
31
  * ```
34
32
  */
35
33
  export class OpenAIToolsAgentOutputParser extends AgentMultiActionOutputParser {
@@ -0,0 +1,83 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.createToolCallingAgent = void 0;
4
+ const runnables_1 = require("@langchain/core/runnables");
5
+ const agent_js_1 = require("../agent.cjs");
6
+ const output_parser_js_1 = require("./output_parser.cjs");
7
+ const tool_calling_js_1 = require("../format_scratchpad/tool_calling.cjs");
8
+ /**
9
+ * Create an agent that uses tools.
10
+ * @param params Params required to create the agent. Includes an LLM, tools, and prompt.
11
+ * @returns A runnable sequence representing an agent. It takes as input all the same input
12
+ * variables as the prompt passed in does. It returns as output either an
13
+ * AgentAction or AgentFinish.
14
+ * @example
15
+ * ```typescript
16
+ * import { ChatAnthropic } from "@langchain/anthropic";
17
+ * import { ChatPromptTemplate, MessagesPlaceholder } from "@langchain/core/prompts";
18
+ * import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
19
+ *
20
+ * const prompt = ChatPromptTemplate.fromMessages(
21
+ * [
22
+ * ["system", "You are a helpful assistant"],
23
+ * ["placeholder", "{chat_history}"],
24
+ * ["human", "{input}"],
25
+ * ["placeholder", "{agent_scratchpad}"],
26
+ * ]
27
+ * );
28
+ *
29
+ *
30
+ * const llm = new ChatAnthropic({
31
+ * modelName: "claude-3-opus-20240229",
32
+ * temperature: 0,
33
+ * });
34
+ *
35
+ * // Define the tools the agent will have access to.
36
+ * const tools = [...];
37
+ *
38
+ * const agent = createToolCallingAgent({ llm, tools, prompt });
39
+ *
40
+ * const agentExecutor = new AgentExecutor({ agent, tools });
41
+ *
42
+ * const result = await agentExecutor.invoke({input: "what is LangChain?"});
43
+ *
44
+ * // Using with chat history
45
+ * import { AIMessage, HumanMessage } from "@langchain/core/messages";
46
+ *
47
+ * const result2 = await agentExecutor.invoke(
48
+ * {
49
+ * input: "what's my name?",
50
+ * chat_history: [
51
+ * new HumanMessage({content: "hi! my name is bob"}),
52
+ * new AIMessage({content: "Hello Bob! How can I assist you today?"}),
53
+ * ],
54
+ * }
55
+ * );
56
+ * ```
57
+ */
58
+ function createToolCallingAgent({ llm, tools, prompt, streamRunnable, }) {
59
+ if (!prompt.inputVariables.includes("agent_scratchpad")) {
60
+ throw new Error([
61
+ `Prompt must have an input variable named "agent_scratchpad".`,
62
+ `Found ${JSON.stringify(prompt.inputVariables)} instead.`,
63
+ ].join("\n"));
64
+ }
65
+ if (llm.bindTools === undefined) {
66
+ throw new Error(`This agent requires that the "bind_tools()" method be implemented on the input model.`);
67
+ }
68
+ const modelWithTools = llm.bindTools(tools);
69
+ const agent = agent_js_1.AgentRunnableSequence.fromRunnables([
70
+ runnables_1.RunnablePassthrough.assign({
71
+ agent_scratchpad: (input) => (0, tool_calling_js_1.formatToToolMessages)(input.steps),
72
+ }),
73
+ prompt,
74
+ modelWithTools,
75
+ new output_parser_js_1.ToolCallingAgentOutputParser(),
76
+ ], {
77
+ name: "ToolCallingAgent",
78
+ streamRunnable,
79
+ singleAction: false,
80
+ });
81
+ return agent;
82
+ }
83
+ exports.createToolCallingAgent = createToolCallingAgent;
@@ -0,0 +1,78 @@
1
+ import { BaseChatModel } from "@langchain/core/language_models/chat_models";
2
+ import { ChatPromptTemplate } from "@langchain/core/prompts";
3
+ import { StructuredToolInterface } from "@langchain/core/tools";
4
+ import { AgentRunnableSequence } from "../agent.js";
5
+ import { ToolsAgentStep } from "./output_parser.js";
6
+ /**
7
+ * Params used by the createOpenAIToolsAgent function.
8
+ */
9
+ export type CreateToolCallingAgentParams = {
10
+ /**
11
+ * LLM to use as the agent. Should work with OpenAI tool calling,
12
+ * so must either be an OpenAI model that supports that or a wrapper of
13
+ * a different model that adds in equivalent support.
14
+ */
15
+ llm: BaseChatModel;
16
+ /** Tools this agent has access to. */
17
+ tools: StructuredToolInterface[];
18
+ /** The prompt to use, must have an input key of `agent_scratchpad`. */
19
+ prompt: ChatPromptTemplate;
20
+ /**
21
+ * Whether to invoke the underlying model in streaming mode,
22
+ * allowing streaming of intermediate steps. Defaults to true.
23
+ */
24
+ streamRunnable?: boolean;
25
+ };
26
+ /**
27
+ * Create an agent that uses tools.
28
+ * @param params Params required to create the agent. Includes an LLM, tools, and prompt.
29
+ * @returns A runnable sequence representing an agent. It takes as input all the same input
30
+ * variables as the prompt passed in does. It returns as output either an
31
+ * AgentAction or AgentFinish.
32
+ * @example
33
+ * ```typescript
34
+ * import { ChatAnthropic } from "@langchain/anthropic";
35
+ * import { ChatPromptTemplate, MessagesPlaceholder } from "@langchain/core/prompts";
36
+ * import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
37
+ *
38
+ * const prompt = ChatPromptTemplate.fromMessages(
39
+ * [
40
+ * ["system", "You are a helpful assistant"],
41
+ * ["placeholder", "{chat_history}"],
42
+ * ["human", "{input}"],
43
+ * ["placeholder", "{agent_scratchpad}"],
44
+ * ]
45
+ * );
46
+ *
47
+ *
48
+ * const llm = new ChatAnthropic({
49
+ * modelName: "claude-3-opus-20240229",
50
+ * temperature: 0,
51
+ * });
52
+ *
53
+ * // Define the tools the agent will have access to.
54
+ * const tools = [...];
55
+ *
56
+ * const agent = createToolCallingAgent({ llm, tools, prompt });
57
+ *
58
+ * const agentExecutor = new AgentExecutor({ agent, tools });
59
+ *
60
+ * const result = await agentExecutor.invoke({input: "what is LangChain?"});
61
+ *
62
+ * // Using with chat history
63
+ * import { AIMessage, HumanMessage } from "@langchain/core/messages";
64
+ *
65
+ * const result2 = await agentExecutor.invoke(
66
+ * {
67
+ * input: "what's my name?",
68
+ * chat_history: [
69
+ * new HumanMessage({content: "hi! my name is bob"}),
70
+ * new AIMessage({content: "Hello Bob! How can I assist you today?"}),
71
+ * ],
72
+ * }
73
+ * );
74
+ * ```
75
+ */
76
+ export declare function createToolCallingAgent({ llm, tools, prompt, streamRunnable, }: CreateToolCallingAgentParams): AgentRunnableSequence<{
77
+ steps: ToolsAgentStep[];
78
+ }, import("@langchain/core/agents").AgentFinish | import("@langchain/core/agents").AgentAction[]>;
@@ -0,0 +1,79 @@
1
+ import { RunnablePassthrough } from "@langchain/core/runnables";
2
+ import { AgentRunnableSequence } from "../agent.js";
3
+ import { ToolCallingAgentOutputParser, } from "./output_parser.js";
4
+ import { formatToToolMessages } from "../format_scratchpad/tool_calling.js";
5
+ /**
6
+ * Create an agent that uses tools.
7
+ * @param params Params required to create the agent. Includes an LLM, tools, and prompt.
8
+ * @returns A runnable sequence representing an agent. It takes as input all the same input
9
+ * variables as the prompt passed in does. It returns as output either an
10
+ * AgentAction or AgentFinish.
11
+ * @example
12
+ * ```typescript
13
+ * import { ChatAnthropic } from "@langchain/anthropic";
14
+ * import { ChatPromptTemplate, MessagesPlaceholder } from "@langchain/core/prompts";
15
+ * import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
16
+ *
17
+ * const prompt = ChatPromptTemplate.fromMessages(
18
+ * [
19
+ * ["system", "You are a helpful assistant"],
20
+ * ["placeholder", "{chat_history}"],
21
+ * ["human", "{input}"],
22
+ * ["placeholder", "{agent_scratchpad}"],
23
+ * ]
24
+ * );
25
+ *
26
+ *
27
+ * const llm = new ChatAnthropic({
28
+ * modelName: "claude-3-opus-20240229",
29
+ * temperature: 0,
30
+ * });
31
+ *
32
+ * // Define the tools the agent will have access to.
33
+ * const tools = [...];
34
+ *
35
+ * const agent = createToolCallingAgent({ llm, tools, prompt });
36
+ *
37
+ * const agentExecutor = new AgentExecutor({ agent, tools });
38
+ *
39
+ * const result = await agentExecutor.invoke({input: "what is LangChain?"});
40
+ *
41
+ * // Using with chat history
42
+ * import { AIMessage, HumanMessage } from "@langchain/core/messages";
43
+ *
44
+ * const result2 = await agentExecutor.invoke(
45
+ * {
46
+ * input: "what's my name?",
47
+ * chat_history: [
48
+ * new HumanMessage({content: "hi! my name is bob"}),
49
+ * new AIMessage({content: "Hello Bob! How can I assist you today?"}),
50
+ * ],
51
+ * }
52
+ * );
53
+ * ```
54
+ */
55
+ export function createToolCallingAgent({ llm, tools, prompt, streamRunnable, }) {
56
+ if (!prompt.inputVariables.includes("agent_scratchpad")) {
57
+ throw new Error([
58
+ `Prompt must have an input variable named "agent_scratchpad".`,
59
+ `Found ${JSON.stringify(prompt.inputVariables)} instead.`,
60
+ ].join("\n"));
61
+ }
62
+ if (llm.bindTools === undefined) {
63
+ throw new Error(`This agent requires that the "bind_tools()" method be implemented on the input model.`);
64
+ }
65
+ const modelWithTools = llm.bindTools(tools);
66
+ const agent = AgentRunnableSequence.fromRunnables([
67
+ RunnablePassthrough.assign({
68
+ agent_scratchpad: (input) => formatToToolMessages(input.steps),
69
+ }),
70
+ prompt,
71
+ modelWithTools,
72
+ new ToolCallingAgentOutputParser(),
73
+ ], {
74
+ name: "ToolCallingAgent",
75
+ streamRunnable,
76
+ singleAction: false,
77
+ });
78
+ return agent;
79
+ }
@@ -0,0 +1,75 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ToolCallingAgentOutputParser = exports.parseAIMessageToToolAction = void 0;
4
+ const messages_1 = require("@langchain/core/messages");
5
+ const output_parsers_1 = require("@langchain/core/output_parsers");
6
+ const types_js_1 = require("../types.cjs");
7
+ function parseAIMessageToToolAction(message) {
8
+ const stringifiedMessageContent = typeof message.content === "string"
9
+ ? message.content
10
+ : JSON.stringify(message.content);
11
+ let toolCalls = [];
12
+ if (message.tool_calls !== undefined && message.tool_calls.length > 0) {
13
+ toolCalls = message.tool_calls;
14
+ }
15
+ else {
16
+ if (message.additional_kwargs.tool_calls === undefined ||
17
+ message.additional_kwargs.tool_calls.length === 0) {
18
+ return {
19
+ returnValues: { output: message.content },
20
+ log: stringifiedMessageContent,
21
+ };
22
+ }
23
+ // Best effort parsing
24
+ for (const toolCall of message.additional_kwargs.tool_calls ?? []) {
25
+ const functionName = toolCall.function?.name;
26
+ try {
27
+ const args = JSON.parse(toolCall.function.arguments);
28
+ toolCalls.push({ name: functionName, args, id: toolCall.id });
29
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
30
+ }
31
+ catch (e) {
32
+ throw new output_parsers_1.OutputParserException(`Failed to parse tool arguments from chat model response. Text: "${JSON.stringify(toolCalls)}". ${e}`);
33
+ }
34
+ }
35
+ }
36
+ return toolCalls.map((toolCall, i) => {
37
+ const messageLog = i === 0 ? [message] : [];
38
+ const log = `Invoking "${toolCall.name}" with ${JSON.stringify(toolCall.args ?? {})}\n${stringifiedMessageContent}`;
39
+ return {
40
+ tool: toolCall.name,
41
+ toolInput: toolCall.args,
42
+ toolCallId: toolCall.id ?? "",
43
+ log,
44
+ messageLog,
45
+ };
46
+ });
47
+ }
48
+ exports.parseAIMessageToToolAction = parseAIMessageToToolAction;
49
+ class ToolCallingAgentOutputParser extends types_js_1.AgentMultiActionOutputParser {
50
+ constructor() {
51
+ super(...arguments);
52
+ Object.defineProperty(this, "lc_namespace", {
53
+ enumerable: true,
54
+ configurable: true,
55
+ writable: true,
56
+ value: ["langchain", "agents", "tool_calling"]
57
+ });
58
+ }
59
+ static lc_name() {
60
+ return "ToolCallingAgentOutputParser";
61
+ }
62
+ async parse(text) {
63
+ throw new Error(`ToolCallingAgentOutputParser can only parse messages.\nPassed input: ${text}`);
64
+ }
65
+ async parseResult(generations) {
66
+ if ("message" in generations[0] && (0, messages_1.isBaseMessage)(generations[0].message)) {
67
+ return parseAIMessageToToolAction(generations[0].message);
68
+ }
69
+ throw new Error("parseResult on ToolCallingAgentOutputParser only works on ChatGeneration output");
70
+ }
71
+ getFormatInstructions() {
72
+ throw new Error("getFormatInstructions not implemented inside ToolCallingAgentOutputParser.");
73
+ }
74
+ }
75
+ exports.ToolCallingAgentOutputParser = ToolCallingAgentOutputParser;
@@ -0,0 +1,22 @@
1
+ import { AgentAction, AgentFinish, AgentStep } from "@langchain/core/agents";
2
+ import { AIMessage, BaseMessage } from "@langchain/core/messages";
3
+ import { ChatGeneration } from "@langchain/core/outputs";
4
+ import { AgentMultiActionOutputParser } from "../types.js";
5
+ /**
6
+ * Type that represents an agent action with an optional message log.
7
+ */
8
+ export type ToolsAgentAction = AgentAction & {
9
+ toolCallId: string;
10
+ messageLog?: BaseMessage[];
11
+ };
12
+ export type ToolsAgentStep = AgentStep & {
13
+ action: ToolsAgentAction;
14
+ };
15
+ export declare function parseAIMessageToToolAction(message: AIMessage): ToolsAgentAction[] | AgentFinish;
16
+ export declare class ToolCallingAgentOutputParser extends AgentMultiActionOutputParser {
17
+ lc_namespace: string[];
18
+ static lc_name(): string;
19
+ parse(text: string): Promise<AgentAction[] | AgentFinish>;
20
+ parseResult(generations: ChatGeneration[]): Promise<AgentFinish | ToolsAgentAction[]>;
21
+ getFormatInstructions(): string;
22
+ }
@@ -0,0 +1,70 @@
1
+ import { isBaseMessage, } from "@langchain/core/messages";
2
+ import { OutputParserException } from "@langchain/core/output_parsers";
3
+ import { AgentMultiActionOutputParser } from "../types.js";
4
+ export function parseAIMessageToToolAction(message) {
5
+ const stringifiedMessageContent = typeof message.content === "string"
6
+ ? message.content
7
+ : JSON.stringify(message.content);
8
+ let toolCalls = [];
9
+ if (message.tool_calls !== undefined && message.tool_calls.length > 0) {
10
+ toolCalls = message.tool_calls;
11
+ }
12
+ else {
13
+ if (message.additional_kwargs.tool_calls === undefined ||
14
+ message.additional_kwargs.tool_calls.length === 0) {
15
+ return {
16
+ returnValues: { output: message.content },
17
+ log: stringifiedMessageContent,
18
+ };
19
+ }
20
+ // Best effort parsing
21
+ for (const toolCall of message.additional_kwargs.tool_calls ?? []) {
22
+ const functionName = toolCall.function?.name;
23
+ try {
24
+ const args = JSON.parse(toolCall.function.arguments);
25
+ toolCalls.push({ name: functionName, args, id: toolCall.id });
26
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
27
+ }
28
+ catch (e) {
29
+ throw new OutputParserException(`Failed to parse tool arguments from chat model response. Text: "${JSON.stringify(toolCalls)}". ${e}`);
30
+ }
31
+ }
32
+ }
33
+ return toolCalls.map((toolCall, i) => {
34
+ const messageLog = i === 0 ? [message] : [];
35
+ const log = `Invoking "${toolCall.name}" with ${JSON.stringify(toolCall.args ?? {})}\n${stringifiedMessageContent}`;
36
+ return {
37
+ tool: toolCall.name,
38
+ toolInput: toolCall.args,
39
+ toolCallId: toolCall.id ?? "",
40
+ log,
41
+ messageLog,
42
+ };
43
+ });
44
+ }
45
+ export class ToolCallingAgentOutputParser extends AgentMultiActionOutputParser {
46
+ constructor() {
47
+ super(...arguments);
48
+ Object.defineProperty(this, "lc_namespace", {
49
+ enumerable: true,
50
+ configurable: true,
51
+ writable: true,
52
+ value: ["langchain", "agents", "tool_calling"]
53
+ });
54
+ }
55
+ static lc_name() {
56
+ return "ToolCallingAgentOutputParser";
57
+ }
58
+ async parse(text) {
59
+ throw new Error(`ToolCallingAgentOutputParser can only parse messages.\nPassed input: ${text}`);
60
+ }
61
+ async parseResult(generations) {
62
+ if ("message" in generations[0] && isBaseMessage(generations[0].message)) {
63
+ return parseAIMessageToToolAction(generations[0].message);
64
+ }
65
+ throw new Error("parseResult on ToolCallingAgentOutputParser only works on ChatGeneration output");
66
+ }
67
+ getFormatInstructions() {
68
+ throw new Error("getFormatInstructions not implemented inside ToolCallingAgentOutputParser.");
69
+ }
70
+ }
@@ -10,7 +10,7 @@ import { LLMChain } from "../llm_chain.js";
10
10
  * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.
11
11
  * @returns A LLMChain instance configured to return data matching the schema.
12
12
  */
13
- export declare function createExtractionChain(schema: FunctionParameters, llm: BaseChatModel<BaseFunctionCallOptions>): LLMChain<object, BaseChatModel<BaseFunctionCallOptions>>;
13
+ export declare function createExtractionChain(schema: FunctionParameters, llm: BaseChatModel<BaseFunctionCallOptions>): LLMChain<object, BaseChatModel<BaseFunctionCallOptions, import("@langchain/core/messages").BaseMessageChunk>>;
14
14
  /**
15
15
  * Function that creates an extraction chain from a Zod schema. It
16
16
  * converts the Zod schema to a JSON schema using zod-to-json-schema
@@ -19,4 +19,4 @@ export declare function createExtractionChain(schema: FunctionParameters, llm: B
19
19
  * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.
20
20
  * @returns A LLMChain instance configured to return data matching the schema.
21
21
  */
22
- export declare function createExtractionChainFromZod(schema: z.ZodObject<any, any, any, any>, llm: BaseChatModel<BaseFunctionCallOptions>): LLMChain<object, BaseChatModel<BaseFunctionCallOptions>>;
22
+ export declare function createExtractionChainFromZod(schema: z.ZodObject<any, any, any, any>, llm: BaseChatModel<BaseFunctionCallOptions>): LLMChain<object, BaseChatModel<BaseFunctionCallOptions, import("@langchain/core/messages").BaseMessageChunk>>;
@@ -55,6 +55,6 @@ export declare class FunctionCallStructuredOutputParser<T extends z.AnyZodObject
55
55
  * as well as an additional required "outputSchema" JSON Schema object.
56
56
  * @returns OpenAPIChain
57
57
  */
58
- export declare function createStructuredOutputChain<T extends z.AnyZodObject = z.AnyZodObject>(input: StructuredOutputChainInput<T>): LLMChain<any, BaseChatModel<BaseFunctionCallOptions> | ChatOpenAI<BaseFunctionCallOptions>>;
58
+ export declare function createStructuredOutputChain<T extends z.AnyZodObject = z.AnyZodObject>(input: StructuredOutputChainInput<T>): LLMChain<any, BaseChatModel<BaseFunctionCallOptions, import("@langchain/core/messages").BaseMessageChunk> | ChatOpenAI<BaseFunctionCallOptions>>;
59
59
  /** @deprecated Use {@link https://api.js.langchain.com/functions/langchain_chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */
60
- export declare function createStructuredOutputChainFromZod<T extends z.AnyZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput<T>, "outputSchema">): LLMChain<any, BaseChatModel<BaseFunctionCallOptions> | ChatOpenAI<BaseFunctionCallOptions>>;
60
+ export declare function createStructuredOutputChainFromZod<T extends z.AnyZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput<T>, "outputSchema">): LLMChain<any, BaseChatModel<BaseFunctionCallOptions, import("@langchain/core/messages").BaseMessageChunk> | ChatOpenAI<BaseFunctionCallOptions>>;
@@ -23,7 +23,7 @@ export type TaggingChainOptions = {
23
23
  * Switch to expression language: https://js.langchain.com/docs/expression_language/
24
24
  * Will be removed in 0.2.0
25
25
  */
26
- export declare function createTaggingChain(schema: FunctionParameters, llm: BaseChatModel<BaseFunctionCallOptions>, options?: TaggingChainOptions): LLMChain<object, BaseChatModel<BaseFunctionCallOptions>>;
26
+ export declare function createTaggingChain(schema: FunctionParameters, llm: BaseChatModel<BaseFunctionCallOptions>, options?: TaggingChainOptions): LLMChain<object, BaseChatModel<BaseFunctionCallOptions, import("@langchain/core/messages").BaseMessageChunk>>;
27
27
  /**
28
28
  * Function that creates a tagging chain from a Zod schema. It converts
29
29
  * the Zod schema to a JSON schema using the zodToJsonSchema function and
@@ -37,4 +37,4 @@ export declare function createTaggingChain(schema: FunctionParameters, llm: Base
37
37
  * Switch to expression language: https://js.langchain.com/docs/expression_language/
38
38
  * Will be removed in 0.2.0
39
39
  */
40
- export declare function createTaggingChainFromZod(schema: z.ZodObject<any, any, any, any>, llm: BaseChatModel<BaseFunctionCallOptions>, options?: TaggingChainOptions): LLMChain<object, BaseChatModel<BaseFunctionCallOptions>>;
40
+ export declare function createTaggingChainFromZod(schema: z.ZodObject<any, any, any, any>, llm: BaseChatModel<BaseFunctionCallOptions>, options?: TaggingChainOptions): LLMChain<object, BaseChatModel<BaseFunctionCallOptions, import("@langchain/core/messages").BaseMessageChunk>>;
@@ -262,12 +262,17 @@ class GithubRepoLoader extends base_js_1.BaseDocumentLoader {
262
262
  * @param gitmodulesContent the content of a .gitmodules file
263
263
  */
264
264
  async parseGitmodules(gitmodulesContent) {
265
+ let validGitmodulesContent = gitmodulesContent;
266
+ // in case the .gitmodules file does not end with a newline, we add one to make the regex work
267
+ if (!validGitmodulesContent.endsWith("\n")) {
268
+ validGitmodulesContent += "\n";
269
+ }
265
270
  // catches the initial line of submodule entries
266
271
  const submodulePattern = /\[submodule "(.*?)"]\n((\s+.*?\s*=\s*.*?\n)*)/g;
267
272
  // catches the properties of a submodule
268
273
  const keyValuePattern = /\s+(.*?)\s*=\s*(.*?)\s/g;
269
274
  const submoduleInfos = [];
270
- for (const [, name, propertyLines] of gitmodulesContent.matchAll(submodulePattern)) {
275
+ for (const [, name, propertyLines] of validGitmodulesContent.matchAll(submodulePattern)) {
271
276
  if (!name || !propertyLines) {
272
277
  throw new Error("Could not parse submodule entry");
273
278
  }
@@ -256,12 +256,17 @@ export class GithubRepoLoader extends BaseDocumentLoader {
256
256
  * @param gitmodulesContent the content of a .gitmodules file
257
257
  */
258
258
  async parseGitmodules(gitmodulesContent) {
259
+ let validGitmodulesContent = gitmodulesContent;
260
+ // in case the .gitmodules file does not end with a newline, we add one to make the regex work
261
+ if (!validGitmodulesContent.endsWith("\n")) {
262
+ validGitmodulesContent += "\n";
263
+ }
259
264
  // catches the initial line of submodule entries
260
265
  const submodulePattern = /\[submodule "(.*?)"]\n((\s+.*?\s*=\s*.*?\n)*)/g;
261
266
  // catches the properties of a submodule
262
267
  const keyValuePattern = /\s+(.*?)\s*=\s*(.*?)\s/g;
263
268
  const submoduleInfos = [];
264
- for (const [, name, propertyLines] of gitmodulesContent.matchAll(submodulePattern)) {
269
+ for (const [, name, propertyLines] of validGitmodulesContent.matchAll(submodulePattern)) {
265
270
  if (!name || !propertyLines) {
266
271
  throw new Error("Could not parse submodule entry");
267
272
  }
@@ -210,7 +210,7 @@ class OpenAIAssistantRunnable extends runnables_1.Runnable {
210
210
  const run = await this._waitForRun(runId, threadId);
211
211
  if (run.status === "completed") {
212
212
  const messages = await this.client.beta.threads.messages.list(threadId, {
213
- order: "asc",
213
+ order: "desc",
214
214
  });
215
215
  const newMessages = messages.data.filter((msg) => msg.run_id === runId);
216
216
  if (!this.asAgent) {
@@ -207,7 +207,7 @@ export class OpenAIAssistantRunnable extends Runnable {
207
207
  const run = await this._waitForRun(runId, threadId);
208
208
  if (run.status === "completed") {
209
209
  const messages = await this.client.beta.threads.messages.list(threadId, {
210
- order: "asc",
210
+ order: "desc",
211
211
  });
212
212
  const newMessages = messages.data.filter((msg) => msg.run_id === runId);
213
213
  if (!this.asAgent) {
@@ -62,10 +62,17 @@ class VectorStoreRetrieverMemory extends memory_1.BaseMemory {
62
62
  writable: true,
63
63
  value: void 0
64
64
  });
65
+ Object.defineProperty(this, "metadata", {
66
+ enumerable: true,
67
+ configurable: true,
68
+ writable: true,
69
+ value: void 0
70
+ });
65
71
  this.vectorStoreRetriever = fields.vectorStoreRetriever;
66
72
  this.inputKey = fields.inputKey;
67
73
  this.memoryKey = fields.memoryKey ?? "memory";
68
74
  this.returnDocs = fields.returnDocs ?? false;
75
+ this.metadata = fields.metadata;
69
76
  }
70
77
  get memoryKeys() {
71
78
  return [this.memoryKey];
@@ -95,13 +102,16 @@ class VectorStoreRetrieverMemory extends memory_1.BaseMemory {
95
102
  * @returns A Promise that resolves to void.
96
103
  */
97
104
  async saveContext(inputValues, outputValues) {
105
+ const metadata = typeof this.metadata === "function"
106
+ ? this.metadata(inputValues, outputValues)
107
+ : this.metadata;
98
108
  const text = Object.entries(inputValues)
99
109
  .filter(([k]) => k !== this.memoryKey)
100
110
  .concat(Object.entries(outputValues))
101
111
  .map(([k, v]) => `${k}: ${v}`)
102
112
  .join("\n");
103
113
  await this.vectorStoreRetriever.addDocuments([
104
- new documents_1.Document({ pageContent: text }),
114
+ new documents_1.Document({ pageContent: text, metadata }),
105
115
  ]);
106
116
  }
107
117
  }
@@ -1,5 +1,7 @@
1
1
  import type { VectorStoreRetrieverInterface } from "@langchain/core/vectorstores";
2
2
  import { BaseMemory, InputValues, MemoryVariables, OutputValues } from "@langchain/core/memory";
3
+ type Metadata = Record<string, unknown>;
4
+ type MetadataFunction = (inputValues?: InputValues, outputValues?: OutputValues) => Metadata;
3
5
  /**
4
6
  * Interface for the parameters required to initialize a
5
7
  * VectorStoreRetrieverMemory instance.
@@ -10,6 +12,10 @@ export interface VectorStoreRetrieverMemoryParams {
10
12
  outputKey?: string;
11
13
  memoryKey?: string;
12
14
  returnDocs?: boolean;
15
+ /**
16
+ * Metadata to be added to the document when saving context.
17
+ */
18
+ metadata?: Metadata | MetadataFunction;
13
19
  }
14
20
  /**
15
21
  * Class for managing long-term memory in Large Language Model (LLM)
@@ -47,6 +53,7 @@ export declare class VectorStoreRetrieverMemory extends BaseMemory implements Ve
47
53
  inputKey?: string;
48
54
  memoryKey: string;
49
55
  returnDocs: boolean;
56
+ metadata?: Metadata | MetadataFunction;
50
57
  constructor(fields: VectorStoreRetrieverMemoryParams);
51
58
  get memoryKeys(): string[];
52
59
  /**
@@ -67,3 +74,4 @@ export declare class VectorStoreRetrieverMemory extends BaseMemory implements Ve
67
74
  */
68
75
  saveContext(inputValues: InputValues, outputValues: OutputValues): Promise<void>;
69
76
  }
77
+ export {};
@@ -59,10 +59,17 @@ export class VectorStoreRetrieverMemory extends BaseMemory {
59
59
  writable: true,
60
60
  value: void 0
61
61
  });
62
+ Object.defineProperty(this, "metadata", {
63
+ enumerable: true,
64
+ configurable: true,
65
+ writable: true,
66
+ value: void 0
67
+ });
62
68
  this.vectorStoreRetriever = fields.vectorStoreRetriever;
63
69
  this.inputKey = fields.inputKey;
64
70
  this.memoryKey = fields.memoryKey ?? "memory";
65
71
  this.returnDocs = fields.returnDocs ?? false;
72
+ this.metadata = fields.metadata;
66
73
  }
67
74
  get memoryKeys() {
68
75
  return [this.memoryKey];
@@ -92,13 +99,16 @@ export class VectorStoreRetrieverMemory extends BaseMemory {
92
99
  * @returns A Promise that resolves to void.
93
100
  */
94
101
  async saveContext(inputValues, outputValues) {
102
+ const metadata = typeof this.metadata === "function"
103
+ ? this.metadata(inputValues, outputValues)
104
+ : this.metadata;
95
105
  const text = Object.entries(inputValues)
96
106
  .filter(([k]) => k !== this.memoryKey)
97
107
  .concat(Object.entries(outputValues))
98
108
  .map(([k, v]) => `${k}: ${v}`)
99
109
  .join("\n");
100
110
  await this.vectorStoreRetriever.addDocuments([
101
- new Document({ pageContent: text }),
111
+ new Document({ pageContent: text, metadata }),
102
112
  ]);
103
113
  }
104
114
  }
@@ -14,7 +14,7 @@ Object.defineProperty(exports, "BasicTranslator", { enumerable: true, get: funct
14
14
  * implements the SelfQueryRetrieverArgs interface.
15
15
  * @example
16
16
  * ```typescript
17
- * const selfQueryRetriever = await SelfQueryRetriever.fromLLM({
17
+ * const selfQueryRetriever = SelfQueryRetriever.fromLLM({
18
18
  * llm: new ChatOpenAI(),
19
19
  * vectorStore: await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()),
20
20
  * documentContents: "Brief summary of a movie",
@@ -30,7 +30,7 @@ export interface SelfQueryRetrieverArgs<T extends VectorStore> extends BaseRetri
30
30
  * implements the SelfQueryRetrieverArgs interface.
31
31
  * @example
32
32
  * ```typescript
33
- * const selfQueryRetriever = await SelfQueryRetriever.fromLLM({
33
+ * const selfQueryRetriever = SelfQueryRetriever.fromLLM({
34
34
  * llm: new ChatOpenAI(),
35
35
  * vectorStore: await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()),
36
36
  * documentContents: "Brief summary of a movie",
@@ -9,7 +9,7 @@ export { BaseTranslator, BasicTranslator, FunctionalTranslator };
9
9
  * implements the SelfQueryRetrieverArgs interface.
10
10
  * @example
11
11
  * ```typescript
12
- * const selfQueryRetriever = await SelfQueryRetriever.fromLLM({
12
+ * const selfQueryRetriever = SelfQueryRetriever.fromLLM({
13
13
  * llm: new ChatOpenAI(),
14
14
  * vectorStore: await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()),
15
15
  * documentContents: "Brief summary of a movie",
@@ -11,7 +11,7 @@ const base_js_1 = require("./base.cjs");
11
11
  * queries and compare results.
12
12
  * @example
13
13
  * ```typescript
14
- * const selfQueryRetriever = await SelfQueryRetriever.fromLLM({
14
+ * const selfQueryRetriever = SelfQueryRetriever.fromLLM({
15
15
  * llm: new ChatOpenAI(),
16
16
  * vectorStore: new PineconeStore(),
17
17
  * documentContents: "Brief summary of a movie",
@@ -8,7 +8,7 @@ import { BasicTranslator } from "./base.js";
8
8
  * queries and compare results.
9
9
  * @example
10
10
  * ```typescript
11
- * const selfQueryRetriever = await SelfQueryRetriever.fromLLM({
11
+ * const selfQueryRetriever = SelfQueryRetriever.fromLLM({
12
12
  * llm: new ChatOpenAI(),
13
13
  * vectorStore: new PineconeStore(),
14
14
  * documentContents: "Brief summary of a movie",
@@ -8,7 +8,7 @@ import { BasicTranslator } from "./base.js";
8
8
  * queries and compare results.
9
9
  * @example
10
10
  * ```typescript
11
- * const selfQueryRetriever = await SelfQueryRetriever.fromLLM({
11
+ * const selfQueryRetriever = SelfQueryRetriever.fromLLM({
12
12
  * llm: new ChatOpenAI(),
13
13
  * vectorStore: new PineconeStore(),
14
14
  * documentContents: "Brief summary of a movie",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.1.32",
3
+ "version": "0.1.34",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {
@@ -1190,7 +1190,7 @@
1190
1190
  "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/langchain/",
1191
1191
  "scripts": {
1192
1192
  "build": "yarn run build:deps && yarn clean && yarn build:esm && yarn build:cjs && yarn build:scripts",
1193
- "build:deps": "yarn run turbo:command build --filter=@langchain/anthropic --filter=@langchain/openai --filter=@langchain/community --concurrency=1",
1193
+ "build:deps": "yarn run turbo:command build --filter=@langchain/openai --filter=@langchain/community --concurrency=1",
1194
1194
  "build:esm": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist/ && rimraf dist/tests dist/**/tests",
1195
1195
  "build:cjs": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist-cjs/ -p tsconfig.cjs.json && yarn move-cjs-to-dist && rimraf dist-cjs",
1196
1196
  "build:watch": "yarn create-entrypoints && tsc --outDir dist/ --watch",
@@ -1512,9 +1512,9 @@
1512
1512
  },
1513
1513
  "dependencies": {
1514
1514
  "@anthropic-ai/sdk": "^0.9.1",
1515
- "@langchain/community": "~0.0.41",
1516
- "@langchain/core": "~0.1.44",
1517
- "@langchain/openai": "~0.0.26",
1515
+ "@langchain/community": "~0.0.47",
1516
+ "@langchain/core": "~0.1.56",
1517
+ "@langchain/openai": "~0.0.28",
1518
1518
  "binary-extensions": "^2.2.0",
1519
1519
  "js-tiktoken": "^1.0.7",
1520
1520
  "js-yaml": "^4.1.0",