langchain 0.0.186 → 0.0.188

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/dist/callbacks/handlers/llmonitor.cjs +31 -17
  2. package/dist/callbacks/handlers/llmonitor.js +31 -17
  3. package/dist/chat_models/bedrock/web.cjs +5 -3
  4. package/dist/chat_models/bedrock/web.js +5 -3
  5. package/dist/embeddings/cohere.cjs +18 -9
  6. package/dist/embeddings/cohere.d.ts +13 -1
  7. package/dist/embeddings/cohere.js +18 -9
  8. package/dist/experimental/chat_models/ollama_functions.cjs +140 -0
  9. package/dist/experimental/chat_models/ollama_functions.d.ts +76 -0
  10. package/dist/experimental/chat_models/ollama_functions.js +136 -0
  11. package/dist/llms/bedrock/web.cjs +5 -3
  12. package/dist/llms/bedrock/web.js +5 -3
  13. package/dist/llms/cohere.cjs +9 -7
  14. package/dist/llms/cohere.d.ts +1 -1
  15. package/dist/llms/cohere.js +9 -7
  16. package/dist/load/import_map.cjs +3 -1
  17. package/dist/load/import_map.d.ts +1 -0
  18. package/dist/load/import_map.js +1 -0
  19. package/dist/memory/buffer_token_memory.cjs +92 -0
  20. package/dist/memory/buffer_token_memory.d.ts +41 -0
  21. package/dist/memory/buffer_token_memory.js +88 -0
  22. package/dist/memory/index.cjs +3 -1
  23. package/dist/memory/index.d.ts +1 -0
  24. package/dist/memory/index.js +1 -0
  25. package/dist/output_parsers/http_response.cjs +82 -0
  26. package/dist/output_parsers/http_response.d.ts +28 -0
  27. package/dist/output_parsers/http_response.js +78 -0
  28. package/dist/output_parsers/index.cjs +3 -1
  29. package/dist/output_parsers/index.d.ts +1 -0
  30. package/dist/output_parsers/index.js +1 -0
  31. package/dist/output_parsers/openai_functions.cjs +4 -6
  32. package/dist/output_parsers/openai_functions.d.ts +1 -1
  33. package/dist/output_parsers/openai_functions.js +4 -6
  34. package/dist/prompts/base.cjs +1 -1
  35. package/dist/prompts/base.js +1 -1
  36. package/dist/schema/index.cjs +2 -2
  37. package/dist/schema/index.d.ts +2 -2
  38. package/dist/schema/index.js +2 -2
  39. package/dist/schema/output_parser.d.ts +2 -2
  40. package/dist/util/bedrock.cjs +8 -0
  41. package/dist/util/bedrock.js +8 -0
  42. package/dist/util/ollama.cjs +10 -12
  43. package/dist/util/ollama.js +10 -12
  44. package/dist/util/openapi.cjs +5 -2
  45. package/dist/util/openapi.js +5 -2
  46. package/experimental/chat_models/ollama_functions.cjs +1 -0
  47. package/experimental/chat_models/ollama_functions.d.ts +1 -0
  48. package/experimental/chat_models/ollama_functions.js +1 -0
  49. package/package.json +16 -6
@@ -0,0 +1,136 @@
1
+ import { BaseChatModel } from "../../chat_models/base.js";
2
+ import { AIMessage, SystemMessage, } from "../../schema/index.js";
3
+ import { ChatOllama } from "../../chat_models/ollama.js";
4
+ import { PromptTemplate } from "../../prompts/prompt.js";
5
+ const TOOL_SYSTEM_PROMPT =
6
+ /* #__PURE__ */
7
+ PromptTemplate.fromTemplate(`You have access to the following tools:
8
+
9
+ {tools}
10
+
11
+ To use a tool, respond with a JSON object with the following structure:
12
+ {{
13
+ "tool": <name of the called tool>,
14
+ "tool_input": <parameters for the tool matching the above JSON schema>
15
+ }}`);
16
+ export class OllamaFunctions extends BaseChatModel {
17
+ static lc_name() {
18
+ return "OllamaFunctions";
19
+ }
20
+ constructor(fields) {
21
+ super(fields ?? {});
22
+ Object.defineProperty(this, "llm", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: void 0
27
+ });
28
+ Object.defineProperty(this, "toolSystemPrompt", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: TOOL_SYSTEM_PROMPT
33
+ });
34
+ Object.defineProperty(this, "defaultResponseFunction", {
35
+ enumerable: true,
36
+ configurable: true,
37
+ writable: true,
38
+ value: {
39
+ name: "__conversational_response",
40
+ description: "Respond conversationally if no other tools should be called for a given query.",
41
+ parameters: {
42
+ type: "object",
43
+ properties: {
44
+ response: {
45
+ type: "string",
46
+ description: "Conversational response to the user.",
47
+ },
48
+ },
49
+ required: ["response"],
50
+ },
51
+ }
52
+ });
53
+ Object.defineProperty(this, "lc_namespace", {
54
+ enumerable: true,
55
+ configurable: true,
56
+ writable: true,
57
+ value: ["langchain", "experimental", "chat_models"]
58
+ });
59
+ this.llm = fields?.llm ?? new ChatOllama({ ...fields, format: "json" });
60
+ this.toolSystemPrompt = fields?.toolSystemPrompt ?? this.toolSystemPrompt;
61
+ }
62
+ invocationParams() {
63
+ return this.llm.invocationParams();
64
+ }
65
+ /** @ignore */
66
+ _identifyingParams() {
67
+ return this.llm._identifyingParams();
68
+ }
69
+ async _generate(messages, options, runManager) {
70
+ let functions = options.functions ?? [];
71
+ if (options.function_call !== undefined) {
72
+ functions = functions.filter((fn) => fn.name === options.function_call?.name);
73
+ if (!functions.length) {
74
+ throw new Error(`If "function_call" is specified, you must also pass a matching function in "functions".`);
75
+ }
76
+ }
77
+ else if (functions.length === 0) {
78
+ functions.push(this.defaultResponseFunction);
79
+ }
80
+ const defaultContent = await this.toolSystemPrompt.format({
81
+ tools: JSON.stringify(functions, null, 2),
82
+ });
83
+ const systemMessage = new SystemMessage({ content: defaultContent });
84
+ const chatResult = await this.llm._generate([systemMessage, ...messages], options, runManager);
85
+ const chatGenerationContent = chatResult.generations[0].message.content;
86
+ if (typeof chatGenerationContent !== "string") {
87
+ throw new Error("OllamaFunctions does not support non-string output.");
88
+ }
89
+ let parsedChatResult;
90
+ try {
91
+ parsedChatResult = JSON.parse(chatGenerationContent);
92
+ }
93
+ catch (e) {
94
+ throw new Error(`"${this.llm.model}" did not respond with valid JSON. Please try again.`);
95
+ }
96
+ const calledToolName = parsedChatResult.tool;
97
+ const calledToolArguments = parsedChatResult.tool_input;
98
+ const calledTool = functions.find((fn) => fn.name === calledToolName);
99
+ if (calledTool === undefined) {
100
+ throw new Error(`Failed to parse a function call from ${this.llm.model} output: ${chatGenerationContent}`);
101
+ }
102
+ if (calledTool.name === this.defaultResponseFunction.name) {
103
+ return {
104
+ generations: [
105
+ {
106
+ message: new AIMessage({
107
+ content: calledToolArguments.response,
108
+ }),
109
+ text: calledToolArguments.response,
110
+ },
111
+ ],
112
+ };
113
+ }
114
+ const responseMessageWithFunctions = new AIMessage({
115
+ content: "",
116
+ additional_kwargs: {
117
+ function_call: {
118
+ name: calledToolName,
119
+ arguments: calledToolArguments
120
+ ? JSON.stringify(calledToolArguments)
121
+ : "",
122
+ },
123
+ },
124
+ });
125
+ return {
126
+ generations: [{ message: responseMessageWithFunctions, text: "" }],
127
+ };
128
+ }
129
+ _llmType() {
130
+ return "ollama_functions";
131
+ }
132
+ /** @ignore */
133
+ _combineLLMOutput() {
134
+ return [];
135
+ }
136
+ }
@@ -117,7 +117,7 @@ class Bedrock extends base_js_1.LLM {
117
117
  value: true
118
118
  });
119
119
  this.model = fields?.model ?? this.model;
120
- const allowedModels = ["ai21", "anthropic", "amazon", "cohere"];
120
+ const allowedModels = ["ai21", "anthropic", "amazon", "cohere", "meta"];
121
121
  if (!allowedModels.includes(this.model.split(".")[0])) {
122
122
  throw new Error(`Unknown model: '${this.model}', only these are supported: ${allowedModels}`);
123
123
  }
@@ -223,7 +223,7 @@ class Bedrock extends base_js_1.LLM {
223
223
  }
224
224
  async *_streamResponseChunks(prompt, options, runManager) {
225
225
  const provider = this.model.split(".")[0];
226
- const bedrockMethod = provider === "anthropic" || provider === "cohere"
226
+ const bedrockMethod = provider === "anthropic" || provider === "cohere" || provider === "meta"
227
227
  ? "invoke-with-response-stream"
228
228
  : "invoke";
229
229
  const service = "bedrock-runtime";
@@ -237,7 +237,9 @@ class Bedrock extends base_js_1.LLM {
237
237
  if (response.status < 200 || response.status >= 300) {
238
238
  throw Error(`Failed to access underlying url '${endpointHost}': got ${response.status} ${response.statusText}: ${await response.text()}`);
239
239
  }
240
- if (provider === "anthropic" || provider === "cohere") {
240
+ if (provider === "anthropic" ||
241
+ provider === "cohere" ||
242
+ provider === "meta") {
241
243
  const reader = response.body?.getReader();
242
244
  const decoder = new TextDecoder();
243
245
  for await (const chunk of this._readChunks(reader)) {
@@ -114,7 +114,7 @@ export class Bedrock extends LLM {
114
114
  value: true
115
115
  });
116
116
  this.model = fields?.model ?? this.model;
117
- const allowedModels = ["ai21", "anthropic", "amazon", "cohere"];
117
+ const allowedModels = ["ai21", "anthropic", "amazon", "cohere", "meta"];
118
118
  if (!allowedModels.includes(this.model.split(".")[0])) {
119
119
  throw new Error(`Unknown model: '${this.model}', only these are supported: ${allowedModels}`);
120
120
  }
@@ -220,7 +220,7 @@ export class Bedrock extends LLM {
220
220
  }
221
221
  async *_streamResponseChunks(prompt, options, runManager) {
222
222
  const provider = this.model.split(".")[0];
223
- const bedrockMethod = provider === "anthropic" || provider === "cohere"
223
+ const bedrockMethod = provider === "anthropic" || provider === "cohere" || provider === "meta"
224
224
  ? "invoke-with-response-stream"
225
225
  : "invoke";
226
226
  const service = "bedrock-runtime";
@@ -234,7 +234,9 @@ export class Bedrock extends LLM {
234
234
  if (response.status < 200 || response.status >= 300) {
235
235
  throw Error(`Failed to access underlying url '${endpointHost}': got ${response.status} ${response.statusText}: ${await response.text()}`);
236
236
  }
237
- if (provider === "anthropic" || provider === "cohere") {
237
+ if (provider === "anthropic" ||
238
+ provider === "cohere" ||
239
+ provider === "meta") {
238
240
  const reader = response.body?.getReader();
239
241
  const decoder = new TextDecoder();
240
242
  for await (const chunk of this._readChunks(reader)) {
@@ -67,18 +67,20 @@ class Cohere extends base_js_1.LLM {
67
67
  }
68
68
  /** @ignore */
69
69
  async _call(prompt, options) {
70
- const { cohere } = await Cohere.imports();
71
- cohere.init(this.apiKey);
70
+ const { CohereClient } = await Cohere.imports();
71
+ const cohere = new CohereClient({
72
+ token: this.apiKey,
73
+ });
72
74
  // Hit the `generate` endpoint on the `large` model
73
75
  const generateResponse = await this.caller.callWithOptions({ signal: options.signal }, cohere.generate.bind(cohere), {
74
76
  prompt,
75
77
  model: this.model,
76
- max_tokens: this.maxTokens,
78
+ maxTokens: this.maxTokens,
77
79
  temperature: this.temperature,
78
- end_sequences: options.stop,
80
+ endSequences: options.stop,
79
81
  });
80
82
  try {
81
- return generateResponse.body.generations[0].text;
83
+ return generateResponse.generations[0].text;
82
84
  }
83
85
  catch {
84
86
  console.log(generateResponse);
@@ -88,8 +90,8 @@ class Cohere extends base_js_1.LLM {
88
90
  /** @ignore */
89
91
  static async imports() {
90
92
  try {
91
- const { default: cohere } = await import("cohere-ai");
92
- return { cohere };
93
+ const { CohereClient } = await import("cohere-ai");
94
+ return { CohereClient };
93
95
  }
94
96
  catch (e) {
95
97
  throw new Error("Please install cohere-ai as a dependency with, e.g. `yarn add cohere-ai`");
@@ -36,6 +36,6 @@ export declare class Cohere extends LLM implements CohereInput {
36
36
  _call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
37
37
  /** @ignore */
38
38
  static imports(): Promise<{
39
- cohere: typeof import("cohere-ai");
39
+ CohereClient: typeof import("cohere-ai").CohereClient;
40
40
  }>;
41
41
  }
@@ -64,18 +64,20 @@ export class Cohere extends LLM {
64
64
  }
65
65
  /** @ignore */
66
66
  async _call(prompt, options) {
67
- const { cohere } = await Cohere.imports();
68
- cohere.init(this.apiKey);
67
+ const { CohereClient } = await Cohere.imports();
68
+ const cohere = new CohereClient({
69
+ token: this.apiKey,
70
+ });
69
71
  // Hit the `generate` endpoint on the `large` model
70
72
  const generateResponse = await this.caller.callWithOptions({ signal: options.signal }, cohere.generate.bind(cohere), {
71
73
  prompt,
72
74
  model: this.model,
73
- max_tokens: this.maxTokens,
75
+ maxTokens: this.maxTokens,
74
76
  temperature: this.temperature,
75
- end_sequences: options.stop,
77
+ endSequences: options.stop,
76
78
  });
77
79
  try {
78
- return generateResponse.body.generations[0].text;
80
+ return generateResponse.generations[0].text;
79
81
  }
80
82
  catch {
81
83
  console.log(generateResponse);
@@ -85,8 +87,8 @@ export class Cohere extends LLM {
85
87
  /** @ignore */
86
88
  static async imports() {
87
89
  try {
88
- const { default: cohere } = await import("cohere-ai");
89
- return { cohere };
90
+ const { CohereClient } = await import("cohere-ai");
91
+ return { CohereClient };
90
92
  }
91
93
  catch (e) {
92
94
  throw new Error("Please install cohere-ai as a dependency with, e.g. `yarn add cohere-ai`");
@@ -25,7 +25,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
27
  exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fake = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__cloudflare_workersai = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__voyage = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains__combine_documents__reduce = exports.chains = exports.tools__render = exports.tools = exports.base_language = exports.agents__openai__output_parser = exports.agents__xml__output_parser = exports.agents__react__output_parser = exports.agents__format_scratchpad__log_to_message = exports.agents__format_scratchpad__xml = exports.agents__format_scratchpad__log = exports.agents__format_scratchpad__openai_tools = exports.agents__format_scratchpad = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.runnables__remote = exports.evaluation = exports.experimental__chains__violation_of_expectations = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__prompt_template = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = void 0;
28
+ exports.evaluation = exports.experimental__chains__violation_of_expectations = exports.experimental__chat_models__ollama_functions = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__prompt_template = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = void 0;
29
+ exports.runnables__remote = void 0;
29
30
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
31
  exports.agents = __importStar(require("../agents/index.cjs"));
31
32
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -123,6 +124,7 @@ exports.experimental__babyagi = __importStar(require("../experimental/babyagi/in
123
124
  exports.experimental__generative_agents = __importStar(require("../experimental/generative_agents/index.cjs"));
124
125
  exports.experimental__plan_and_execute = __importStar(require("../experimental/plan_and_execute/index.cjs"));
125
126
  exports.experimental__chat_models__bittensor = __importStar(require("../experimental/chat_models/bittensor.cjs"));
127
+ exports.experimental__chat_models__ollama_functions = __importStar(require("../experimental/chat_models/ollama_functions.cjs"));
126
128
  exports.experimental__chains__violation_of_expectations = __importStar(require("../experimental/chains/violation_of_expectations/index.cjs"));
127
129
  exports.evaluation = __importStar(require("../evaluation/index.cjs"));
128
130
  exports.runnables__remote = __importStar(require("../runnables/remote.cjs"));
@@ -95,6 +95,7 @@ export * as experimental__babyagi from "../experimental/babyagi/index.js";
95
95
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
96
96
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
97
97
  export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
98
+ export * as experimental__chat_models__ollama_functions from "../experimental/chat_models/ollama_functions.js";
98
99
  export * as experimental__chains__violation_of_expectations from "../experimental/chains/violation_of_expectations/index.js";
99
100
  export * as evaluation from "../evaluation/index.js";
100
101
  export * as runnables__remote from "../runnables/remote.js";
@@ -96,6 +96,7 @@ export * as experimental__babyagi from "../experimental/babyagi/index.js";
96
96
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
97
97
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
98
98
  export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
99
+ export * as experimental__chat_models__ollama_functions from "../experimental/chat_models/ollama_functions.js";
99
100
  export * as experimental__chains__violation_of_expectations from "../experimental/chains/violation_of_expectations/index.js";
100
101
  export * as evaluation from "../evaluation/index.js";
101
102
  export * as runnables__remote from "../runnables/remote.js";
@@ -0,0 +1,92 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ConversationTokenBufferMemory = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ const chat_memory_js_1 = require("./chat_memory.cjs");
6
+ /**
7
+ * Class that represents a conversation chat memory with a token buffer.
8
+ * It extends the `BaseChatMemory` class and implements the
9
+ * `ConversationTokenBufferMemoryInput` interface.
10
+ */
11
+ class ConversationTokenBufferMemory extends chat_memory_js_1.BaseChatMemory {
12
+ constructor(fields) {
13
+ super(fields);
14
+ Object.defineProperty(this, "humanPrefix", {
15
+ enumerable: true,
16
+ configurable: true,
17
+ writable: true,
18
+ value: "Human"
19
+ });
20
+ Object.defineProperty(this, "aiPrefix", {
21
+ enumerable: true,
22
+ configurable: true,
23
+ writable: true,
24
+ value: "AI"
25
+ });
26
+ Object.defineProperty(this, "memoryKey", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: "history"
31
+ });
32
+ Object.defineProperty(this, "maxTokenLimit", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: 2000
37
+ }); // Default max token limit of 2000 which can be overridden
38
+ Object.defineProperty(this, "llm", {
39
+ enumerable: true,
40
+ configurable: true,
41
+ writable: true,
42
+ value: void 0
43
+ });
44
+ this.llm = fields.llm;
45
+ this.humanPrefix = fields?.humanPrefix ?? this.humanPrefix;
46
+ this.aiPrefix = fields?.aiPrefix ?? this.aiPrefix;
47
+ this.memoryKey = fields?.memoryKey ?? this.memoryKey;
48
+ this.maxTokenLimit = fields?.maxTokenLimit ?? this.maxTokenLimit;
49
+ }
50
+ get memoryKeys() {
51
+ return [this.memoryKey];
52
+ }
53
+ /**
54
+ * Loads the memory variables. It takes an `InputValues` object as a
55
+ * parameter and returns a `Promise` that resolves with a
56
+ * `MemoryVariables` object.
57
+ * @param _values `InputValues` object.
58
+ * @returns A `Promise` that resolves with a `MemoryVariables` object.
59
+ */
60
+ async loadMemoryVariables(_values) {
61
+ const messages = await this.chatHistory.getMessages();
62
+ if (this.returnMessages) {
63
+ const result = {
64
+ [this.memoryKey]: messages,
65
+ };
66
+ return result;
67
+ }
68
+ const result = {
69
+ [this.memoryKey]: (0, base_js_1.getBufferString)(messages, this.humanPrefix, this.aiPrefix),
70
+ };
71
+ return result;
72
+ }
73
+ /**
74
+ * Saves the context from this conversation to buffer. If the amount
75
+ * of tokens required to save the buffer exceeds MAX_TOKEN_LIMIT,
76
+ * prune it.
77
+ */
78
+ async saveContext(inputValues, outputValues) {
79
+ await super.saveContext(inputValues, outputValues);
80
+ // Prune buffer if it exceeds the max token limit set for this instance.
81
+ const buffer = await this.chatHistory.getMessages();
82
+ let currBufferLength = await this.llm.getNumTokens((0, base_js_1.getBufferString)(buffer, this.humanPrefix, this.aiPrefix));
83
+ if (currBufferLength > this.maxTokenLimit) {
84
+ const prunedMemory = [];
85
+ while (currBufferLength > this.maxTokenLimit) {
86
+ prunedMemory.push(buffer.shift());
87
+ currBufferLength = await this.llm.getNumTokens((0, base_js_1.getBufferString)(buffer, this.humanPrefix, this.aiPrefix));
88
+ }
89
+ }
90
+ }
91
+ }
92
+ exports.ConversationTokenBufferMemory = ConversationTokenBufferMemory;
@@ -0,0 +1,41 @@
1
+ import { InputValues, MemoryVariables, OutputValues } from "./base.js";
2
+ import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js";
3
+ import { BaseLanguageModel } from "../base_language/index.js";
4
+ /**
5
+ * Interface for the input parameters of the `BufferTokenMemory` class.
6
+ */
7
+ export interface ConversationTokenBufferMemoryInput extends BaseChatMemoryInput {
8
+ humanPrefix?: string;
9
+ aiPrefix?: string;
10
+ llm: BaseLanguageModel;
11
+ memoryKey?: string;
12
+ maxTokenLimit?: number;
13
+ }
14
+ /**
15
+ * Class that represents a conversation chat memory with a token buffer.
16
+ * It extends the `BaseChatMemory` class and implements the
17
+ * `ConversationTokenBufferMemoryInput` interface.
18
+ */
19
+ export declare class ConversationTokenBufferMemory extends BaseChatMemory implements ConversationTokenBufferMemoryInput {
20
+ humanPrefix: string;
21
+ aiPrefix: string;
22
+ memoryKey: string;
23
+ maxTokenLimit: number;
24
+ llm: BaseLanguageModel;
25
+ constructor(fields: ConversationTokenBufferMemoryInput);
26
+ get memoryKeys(): string[];
27
+ /**
28
+ * Loads the memory variables. It takes an `InputValues` object as a
29
+ * parameter and returns a `Promise` that resolves with a
30
+ * `MemoryVariables` object.
31
+ * @param _values `InputValues` object.
32
+ * @returns A `Promise` that resolves with a `MemoryVariables` object.
33
+ */
34
+ loadMemoryVariables(_values: InputValues): Promise<MemoryVariables>;
35
+ /**
36
+ * Saves the context from this conversation to buffer. If the amount
37
+ * of tokens required to save the buffer exceeds MAX_TOKEN_LIMIT,
38
+ * prune it.
39
+ */
40
+ saveContext(inputValues: InputValues, outputValues: OutputValues): Promise<void>;
41
+ }
@@ -0,0 +1,88 @@
1
+ import { getBufferString, } from "./base.js";
2
+ import { BaseChatMemory } from "./chat_memory.js";
3
+ /**
4
+ * Class that represents a conversation chat memory with a token buffer.
5
+ * It extends the `BaseChatMemory` class and implements the
6
+ * `ConversationTokenBufferMemoryInput` interface.
7
+ */
8
+ export class ConversationTokenBufferMemory extends BaseChatMemory {
9
+ constructor(fields) {
10
+ super(fields);
11
+ Object.defineProperty(this, "humanPrefix", {
12
+ enumerable: true,
13
+ configurable: true,
14
+ writable: true,
15
+ value: "Human"
16
+ });
17
+ Object.defineProperty(this, "aiPrefix", {
18
+ enumerable: true,
19
+ configurable: true,
20
+ writable: true,
21
+ value: "AI"
22
+ });
23
+ Object.defineProperty(this, "memoryKey", {
24
+ enumerable: true,
25
+ configurable: true,
26
+ writable: true,
27
+ value: "history"
28
+ });
29
+ Object.defineProperty(this, "maxTokenLimit", {
30
+ enumerable: true,
31
+ configurable: true,
32
+ writable: true,
33
+ value: 2000
34
+ }); // Default max token limit of 2000 which can be overridden
35
+ Object.defineProperty(this, "llm", {
36
+ enumerable: true,
37
+ configurable: true,
38
+ writable: true,
39
+ value: void 0
40
+ });
41
+ this.llm = fields.llm;
42
+ this.humanPrefix = fields?.humanPrefix ?? this.humanPrefix;
43
+ this.aiPrefix = fields?.aiPrefix ?? this.aiPrefix;
44
+ this.memoryKey = fields?.memoryKey ?? this.memoryKey;
45
+ this.maxTokenLimit = fields?.maxTokenLimit ?? this.maxTokenLimit;
46
+ }
47
+ get memoryKeys() {
48
+ return [this.memoryKey];
49
+ }
50
+ /**
51
+ * Loads the memory variables. It takes an `InputValues` object as a
52
+ * parameter and returns a `Promise` that resolves with a
53
+ * `MemoryVariables` object.
54
+ * @param _values `InputValues` object.
55
+ * @returns A `Promise` that resolves with a `MemoryVariables` object.
56
+ */
57
+ async loadMemoryVariables(_values) {
58
+ const messages = await this.chatHistory.getMessages();
59
+ if (this.returnMessages) {
60
+ const result = {
61
+ [this.memoryKey]: messages,
62
+ };
63
+ return result;
64
+ }
65
+ const result = {
66
+ [this.memoryKey]: getBufferString(messages, this.humanPrefix, this.aiPrefix),
67
+ };
68
+ return result;
69
+ }
70
+ /**
71
+ * Saves the context from this conversation to buffer. If the amount
72
+ * of tokens required to save the buffer exceeds MAX_TOKEN_LIMIT,
73
+ * prune it.
74
+ */
75
+ async saveContext(inputValues, outputValues) {
76
+ await super.saveContext(inputValues, outputValues);
77
+ // Prune buffer if it exceeds the max token limit set for this instance.
78
+ const buffer = await this.chatHistory.getMessages();
79
+ let currBufferLength = await this.llm.getNumTokens(getBufferString(buffer, this.humanPrefix, this.aiPrefix));
80
+ if (currBufferLength > this.maxTokenLimit) {
81
+ const prunedMemory = [];
82
+ while (currBufferLength > this.maxTokenLimit) {
83
+ prunedMemory.push(buffer.shift());
84
+ currBufferLength = await this.llm.getNumTokens(getBufferString(buffer, this.humanPrefix, this.aiPrefix));
85
+ }
86
+ }
87
+ }
88
+ }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ConversationSummaryBufferMemory = exports.CombinedMemory = exports.ENTITY_MEMORY_CONVERSATION_TEMPLATE = exports.EntityMemory = exports.VectorStoreRetrieverMemory = exports.MotorheadMemory = exports.ChatMessageHistory = exports.BaseChatMemory = exports.BufferWindowMemory = exports.BaseConversationSummaryMemory = exports.ConversationSummaryMemory = exports.getBufferString = exports.getOutputValue = exports.getInputValue = exports.BaseMemory = exports.BufferMemory = void 0;
3
+ exports.ConversationTokenBufferMemory = exports.ConversationSummaryBufferMemory = exports.CombinedMemory = exports.ENTITY_MEMORY_CONVERSATION_TEMPLATE = exports.EntityMemory = exports.VectorStoreRetrieverMemory = exports.MotorheadMemory = exports.ChatMessageHistory = exports.BaseChatMemory = exports.BufferWindowMemory = exports.BaseConversationSummaryMemory = exports.ConversationSummaryMemory = exports.getBufferString = exports.getOutputValue = exports.getInputValue = exports.BaseMemory = exports.BufferMemory = void 0;
4
4
  var buffer_memory_js_1 = require("./buffer_memory.cjs");
5
5
  Object.defineProperty(exports, "BufferMemory", { enumerable: true, get: function () { return buffer_memory_js_1.BufferMemory; } });
6
6
  var base_js_1 = require("./base.cjs");
@@ -29,3 +29,5 @@ var combined_memory_js_1 = require("./combined_memory.cjs");
29
29
  Object.defineProperty(exports, "CombinedMemory", { enumerable: true, get: function () { return combined_memory_js_1.CombinedMemory; } });
30
30
  var summary_buffer_js_1 = require("./summary_buffer.cjs");
31
31
  Object.defineProperty(exports, "ConversationSummaryBufferMemory", { enumerable: true, get: function () { return summary_buffer_js_1.ConversationSummaryBufferMemory; } });
32
+ var buffer_token_memory_js_1 = require("./buffer_token_memory.cjs");
33
+ Object.defineProperty(exports, "ConversationTokenBufferMemory", { enumerable: true, get: function () { return buffer_token_memory_js_1.ConversationTokenBufferMemory; } });
@@ -10,3 +10,4 @@ export { EntityMemory } from "./entity_memory.js";
10
10
  export { ENTITY_MEMORY_CONVERSATION_TEMPLATE } from "./prompt.js";
11
11
  export { type CombinedMemoryInput, CombinedMemory } from "./combined_memory.js";
12
12
  export { ConversationSummaryBufferMemory, type ConversationSummaryBufferMemoryInput, } from "./summary_buffer.js";
13
+ export { ConversationTokenBufferMemory, type ConversationTokenBufferMemoryInput, } from "./buffer_token_memory.js";
@@ -10,3 +10,4 @@ export { EntityMemory } from "./entity_memory.js";
10
10
  export { ENTITY_MEMORY_CONVERSATION_TEMPLATE } from "./prompt.js";
11
11
  export { CombinedMemory } from "./combined_memory.js";
12
12
  export { ConversationSummaryBufferMemory, } from "./summary_buffer.js";
13
+ export { ConversationTokenBufferMemory, } from "./buffer_token_memory.js";
@@ -0,0 +1,82 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.HttpResponseOutputParser = void 0;
4
+ const output_parser_js_1 = require("../schema/output_parser.cjs");
5
+ /**
6
+ * OutputParser that formats chunks emitted from an LLM for different HTTP content types.
7
+ */
8
+ class HttpResponseOutputParser extends output_parser_js_1.BaseTransformOutputParser {
9
+ static lc_name() {
10
+ return "HttpResponseOutputParser";
11
+ }
12
+ constructor(fields) {
13
+ super(fields);
14
+ Object.defineProperty(this, "lc_namespace", {
15
+ enumerable: true,
16
+ configurable: true,
17
+ writable: true,
18
+ value: ["langchain", "output_parser"]
19
+ });
20
+ Object.defineProperty(this, "lc_serializable", {
21
+ enumerable: true,
22
+ configurable: true,
23
+ writable: true,
24
+ value: true
25
+ });
26
+ Object.defineProperty(this, "outputParser", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: new output_parser_js_1.StringOutputParser()
31
+ });
32
+ Object.defineProperty(this, "contentType", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: "text/plain"
37
+ });
38
+ this.outputParser = fields?.outputParser ?? this.outputParser;
39
+ this.contentType = fields?.contentType ?? this.contentType;
40
+ }
41
+ async *_transform(inputGenerator) {
42
+ for await (const chunk of this.outputParser._transform(inputGenerator)) {
43
+ if (typeof chunk === "string") {
44
+ yield this.parse(chunk);
45
+ }
46
+ else {
47
+ yield this.parse(JSON.stringify(chunk));
48
+ }
49
+ }
50
+ if (this.contentType === "text/event-stream") {
51
+ const encoder = new TextEncoder();
52
+ yield encoder.encode(`event: end\n\n`);
53
+ }
54
+ }
55
+ /**
56
+ * Parses a string output from an LLM call. This method is meant to be
57
+ * implemented by subclasses to define how a string output from an LLM
58
+ * should be parsed.
59
+ * @param text The string output from an LLM call.
60
+ * @param callbacks Optional callbacks.
61
+ * @returns A promise of the parsed output.
62
+ */
63
+ async parse(text) {
64
+ const chunk = await this.outputParser.parse(text);
65
+ let parsedChunk;
66
+ if (typeof chunk === "string") {
67
+ parsedChunk = chunk;
68
+ }
69
+ else {
70
+ parsedChunk = JSON.stringify(chunk);
71
+ }
72
+ const encoder = new TextEncoder();
73
+ if (this.contentType === "text/event-stream") {
74
+ return encoder.encode(`event: data\ndata: ${parsedChunk}\n\n`);
75
+ }
76
+ return encoder.encode(parsedChunk);
77
+ }
78
+ getFormatInstructions() {
79
+ return "";
80
+ }
81
+ }
82
+ exports.HttpResponseOutputParser = HttpResponseOutputParser;