langchain 0.0.185 → 0.0.187

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/dist/agents/openai/index.cjs +2 -1
  2. package/dist/agents/openai/index.js +2 -1
  3. package/dist/callbacks/handlers/llmonitor.cjs +31 -17
  4. package/dist/callbacks/handlers/llmonitor.js +31 -17
  5. package/dist/chains/combine_docs_chain.cjs +1 -1
  6. package/dist/chains/combine_docs_chain.js +1 -1
  7. package/dist/chains/llm_chain.cjs +52 -7
  8. package/dist/chains/llm_chain.d.ts +20 -12
  9. package/dist/chains/llm_chain.js +53 -8
  10. package/dist/chat_models/ollama.cjs +8 -0
  11. package/dist/chat_models/ollama.d.ts +3 -0
  12. package/dist/chat_models/ollama.js +8 -0
  13. package/dist/chat_models/openai.cjs +3 -0
  14. package/dist/chat_models/openai.js +3 -0
  15. package/dist/document_loaders/fs/unstructured.d.ts +1 -5
  16. package/dist/embeddings/ollama.d.ts +1 -1
  17. package/dist/experimental/chat_models/ollama_functions.cjs +140 -0
  18. package/dist/experimental/chat_models/ollama_functions.d.ts +76 -0
  19. package/dist/experimental/chat_models/ollama_functions.js +136 -0
  20. package/dist/llms/ollama.cjs +8 -0
  21. package/dist/llms/ollama.d.ts +3 -0
  22. package/dist/llms/ollama.js +8 -0
  23. package/dist/llms/openai.cjs +1 -1
  24. package/dist/llms/openai.js +1 -1
  25. package/dist/load/import_map.cjs +3 -1
  26. package/dist/load/import_map.d.ts +1 -0
  27. package/dist/load/import_map.js +1 -0
  28. package/dist/memory/buffer_token_memory.cjs +92 -0
  29. package/dist/memory/buffer_token_memory.d.ts +41 -0
  30. package/dist/memory/buffer_token_memory.js +88 -0
  31. package/dist/memory/index.cjs +3 -1
  32. package/dist/memory/index.d.ts +1 -0
  33. package/dist/memory/index.js +1 -0
  34. package/dist/output_parsers/index.cjs +3 -1
  35. package/dist/output_parsers/index.d.ts +1 -0
  36. package/dist/output_parsers/index.js +1 -0
  37. package/dist/output_parsers/openai_functions.cjs +3 -3
  38. package/dist/output_parsers/openai_functions.js +3 -3
  39. package/dist/output_parsers/openai_tools.cjs +53 -0
  40. package/dist/output_parsers/openai_tools.d.ts +22 -0
  41. package/dist/output_parsers/openai_tools.js +49 -0
  42. package/dist/prompts/base.cjs +1 -1
  43. package/dist/prompts/base.d.ts +2 -1
  44. package/dist/prompts/base.js +1 -1
  45. package/dist/prompts/chat.cjs +1 -1
  46. package/dist/prompts/chat.js +1 -1
  47. package/dist/schema/index.cjs +2 -2
  48. package/dist/schema/index.d.ts +4 -6
  49. package/dist/schema/index.js +2 -2
  50. package/dist/schema/runnable/base.d.ts +2 -2
  51. package/dist/util/ollama.cjs +10 -12
  52. package/dist/util/ollama.d.ts +3 -0
  53. package/dist/util/ollama.js +10 -12
  54. package/dist/util/types.cjs +5 -0
  55. package/dist/util/types.d.ts +4 -0
  56. package/dist/util/types.js +4 -0
  57. package/experimental/chat_models/ollama_functions.cjs +1 -0
  58. package/experimental/chat_models/ollama_functions.d.ts +1 -0
  59. package/experimental/chat_models/ollama_functions.js +1 -0
  60. package/package.json +13 -4
@@ -0,0 +1,140 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.OllamaFunctions = void 0;
4
+ const base_js_1 = require("../../chat_models/base.cjs");
5
+ const index_js_1 = require("../../schema/index.cjs");
6
+ const ollama_js_1 = require("../../chat_models/ollama.cjs");
7
+ const prompt_js_1 = require("../../prompts/prompt.cjs");
8
+ const TOOL_SYSTEM_PROMPT =
9
+ /* #__PURE__ */
10
+ prompt_js_1.PromptTemplate.fromTemplate(`You have access to the following tools:
11
+
12
+ {tools}
13
+
14
+ To use a tool, respond with a JSON object with the following structure:
15
+ {{
16
+ "tool": <name of the called tool>,
17
+ "tool_input": <parameters for the tool matching the above JSON schema>
18
+ }}`);
19
+ class OllamaFunctions extends base_js_1.BaseChatModel {
20
+ static lc_name() {
21
+ return "OllamaFunctions";
22
+ }
23
+ constructor(fields) {
24
+ super(fields ?? {});
25
+ Object.defineProperty(this, "llm", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: void 0
30
+ });
31
+ Object.defineProperty(this, "toolSystemPrompt", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: TOOL_SYSTEM_PROMPT
36
+ });
37
+ Object.defineProperty(this, "defaultResponseFunction", {
38
+ enumerable: true,
39
+ configurable: true,
40
+ writable: true,
41
+ value: {
42
+ name: "__conversational_response",
43
+ description: "Respond conversationally if no other tools should be called for a given query.",
44
+ parameters: {
45
+ type: "object",
46
+ properties: {
47
+ response: {
48
+ type: "string",
49
+ description: "Conversational response to the user.",
50
+ },
51
+ },
52
+ required: ["response"],
53
+ },
54
+ }
55
+ });
56
+ Object.defineProperty(this, "lc_namespace", {
57
+ enumerable: true,
58
+ configurable: true,
59
+ writable: true,
60
+ value: ["langchain", "experimental", "chat_models"]
61
+ });
62
+ this.llm = fields?.llm ?? new ollama_js_1.ChatOllama({ ...fields, format: "json" });
63
+ this.toolSystemPrompt = fields?.toolSystemPrompt ?? this.toolSystemPrompt;
64
+ }
65
+ invocationParams() {
66
+ return this.llm.invocationParams();
67
+ }
68
+ /** @ignore */
69
+ _identifyingParams() {
70
+ return this.llm._identifyingParams();
71
+ }
72
+ async _generate(messages, options, runManager) {
73
+ let functions = options.functions ?? [];
74
+ if (options.function_call !== undefined) {
75
+ functions = functions.filter((fn) => fn.name === options.function_call?.name);
76
+ if (!functions.length) {
77
+ throw new Error(`If "function_call" is specified, you must also pass a matching function in "functions".`);
78
+ }
79
+ }
80
+ else if (functions.length === 0) {
81
+ functions.push(this.defaultResponseFunction);
82
+ }
83
+ const defaultContent = await TOOL_SYSTEM_PROMPT.format({
84
+ tools: JSON.stringify(functions, null, 2),
85
+ });
86
+ const systemMessage = new index_js_1.SystemMessage({ content: defaultContent });
87
+ const chatResult = await this.llm._generate([systemMessage, ...messages], options, runManager);
88
+ const chatGenerationContent = chatResult.generations[0].message.content;
89
+ if (typeof chatGenerationContent !== "string") {
90
+ throw new Error("OllamaFunctions does not support non-string output.");
91
+ }
92
+ let parsedChatResult;
93
+ try {
94
+ parsedChatResult = JSON.parse(chatGenerationContent);
95
+ }
96
+ catch (e) {
97
+ throw new Error(`"${this.llm.model}" did not respond with valid JSON. Please try again.`);
98
+ }
99
+ const calledToolName = parsedChatResult.tool;
100
+ const calledToolArguments = parsedChatResult.tool_input;
101
+ const calledTool = functions.find((fn) => fn.name === calledToolName);
102
+ if (calledTool === undefined) {
103
+ throw new Error(`Failed to parse a function call from ${this.llm.model} output: ${chatGenerationContent}`);
104
+ }
105
+ if (calledTool.name === this.defaultResponseFunction.name) {
106
+ return {
107
+ generations: [
108
+ {
109
+ message: new index_js_1.AIMessage({
110
+ content: calledToolArguments.response,
111
+ }),
112
+ text: calledToolArguments.response,
113
+ },
114
+ ],
115
+ };
116
+ }
117
+ const responseMessageWithFunctions = new index_js_1.AIMessage({
118
+ content: "",
119
+ additional_kwargs: {
120
+ function_call: {
121
+ name: calledToolName,
122
+ arguments: calledToolArguments
123
+ ? JSON.stringify(calledToolArguments)
124
+ : "",
125
+ },
126
+ },
127
+ });
128
+ return {
129
+ generations: [{ message: responseMessageWithFunctions, text: "" }],
130
+ };
131
+ }
132
+ _llmType() {
133
+ return "ollama_functions";
134
+ }
135
+ /** @ignore */
136
+ _combineLLMOutput() {
137
+ return [];
138
+ }
139
+ }
140
+ exports.OllamaFunctions = OllamaFunctions;
@@ -0,0 +1,76 @@
1
+ import { BaseChatModel, BaseChatModelParams } from "../../chat_models/base.js";
2
+ import { CallbackManagerForLLMRun } from "../../callbacks/manager.js";
3
+ import { BaseMessage, ChatResult } from "../../schema/index.js";
4
+ import { ChatOllama } from "../../chat_models/ollama.js";
5
+ import { OllamaInput } from "../../util/ollama.js";
6
+ import { BaseFunctionCallOptions } from "../../base_language/index.js";
7
+ import type { BasePromptTemplate } from "../../prompts/base.js";
8
+ export interface ChatOllamaFunctionsCallOptions extends BaseFunctionCallOptions {
9
+ }
10
+ export type OllamaFunctionsInput = Partial<OllamaInput> & BaseChatModelParams & {
11
+ llm?: ChatOllama;
12
+ toolSystemPrompt?: BasePromptTemplate;
13
+ };
14
+ export declare class OllamaFunctions extends BaseChatModel<ChatOllamaFunctionsCallOptions> {
15
+ llm: ChatOllama;
16
+ toolSystemPrompt: BasePromptTemplate;
17
+ protected defaultResponseFunction: {
18
+ name: string;
19
+ description: string;
20
+ parameters: {
21
+ type: string;
22
+ properties: {
23
+ response: {
24
+ type: string;
25
+ description: string;
26
+ };
27
+ };
28
+ required: string[];
29
+ };
30
+ };
31
+ lc_namespace: string[];
32
+ static lc_name(): string;
33
+ constructor(fields?: OllamaFunctionsInput);
34
+ invocationParams(): {
35
+ model: string;
36
+ format: import("../../util/types.js").StringWithAutocomplete<"json"> | undefined;
37
+ options: {
38
+ embedding_only: boolean | undefined;
39
+ f16_kv: boolean | undefined;
40
+ frequency_penalty: number | undefined;
41
+ logits_all: boolean | undefined;
42
+ low_vram: boolean | undefined;
43
+ main_gpu: number | undefined;
44
+ mirostat: number | undefined;
45
+ mirostat_eta: number | undefined;
46
+ mirostat_tau: number | undefined;
47
+ num_batch: number | undefined;
48
+ num_ctx: number | undefined;
49
+ num_gpu: number | undefined;
50
+ num_gqa: number | undefined;
51
+ num_keep: number | undefined;
52
+ num_thread: number | undefined;
53
+ penalize_newline: boolean | undefined;
54
+ presence_penalty: number | undefined;
55
+ repeat_last_n: number | undefined;
56
+ repeat_penalty: number | undefined;
57
+ rope_frequency_base: number | undefined;
58
+ rope_frequency_scale: number | undefined;
59
+ temperature: number | undefined;
60
+ stop: string[] | undefined;
61
+ tfs_z: number | undefined;
62
+ top_k: number | undefined;
63
+ top_p: number | undefined;
64
+ typical_p: number | undefined;
65
+ use_mlock: boolean | undefined;
66
+ use_mmap: boolean | undefined;
67
+ vocab_only: boolean | undefined;
68
+ };
69
+ };
70
+ /** @ignore */
71
+ _identifyingParams(): Record<string, any>;
72
+ _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun | undefined): Promise<ChatResult>;
73
+ _llmType(): string;
74
+ /** @ignore */
75
+ _combineLLMOutput(): never[];
76
+ }
@@ -0,0 +1,136 @@
1
+ import { BaseChatModel } from "../../chat_models/base.js";
2
+ import { AIMessage, SystemMessage, } from "../../schema/index.js";
3
+ import { ChatOllama } from "../../chat_models/ollama.js";
4
+ import { PromptTemplate } from "../../prompts/prompt.js";
5
+ const TOOL_SYSTEM_PROMPT =
6
+ /* #__PURE__ */
7
+ PromptTemplate.fromTemplate(`You have access to the following tools:
8
+
9
+ {tools}
10
+
11
+ To use a tool, respond with a JSON object with the following structure:
12
+ {{
13
+ "tool": <name of the called tool>,
14
+ "tool_input": <parameters for the tool matching the above JSON schema>
15
+ }}`);
16
+ export class OllamaFunctions extends BaseChatModel {
17
+ static lc_name() {
18
+ return "OllamaFunctions";
19
+ }
20
+ constructor(fields) {
21
+ super(fields ?? {});
22
+ Object.defineProperty(this, "llm", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: void 0
27
+ });
28
+ Object.defineProperty(this, "toolSystemPrompt", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: TOOL_SYSTEM_PROMPT
33
+ });
34
+ Object.defineProperty(this, "defaultResponseFunction", {
35
+ enumerable: true,
36
+ configurable: true,
37
+ writable: true,
38
+ value: {
39
+ name: "__conversational_response",
40
+ description: "Respond conversationally if no other tools should be called for a given query.",
41
+ parameters: {
42
+ type: "object",
43
+ properties: {
44
+ response: {
45
+ type: "string",
46
+ description: "Conversational response to the user.",
47
+ },
48
+ },
49
+ required: ["response"],
50
+ },
51
+ }
52
+ });
53
+ Object.defineProperty(this, "lc_namespace", {
54
+ enumerable: true,
55
+ configurable: true,
56
+ writable: true,
57
+ value: ["langchain", "experimental", "chat_models"]
58
+ });
59
+ this.llm = fields?.llm ?? new ChatOllama({ ...fields, format: "json" });
60
+ this.toolSystemPrompt = fields?.toolSystemPrompt ?? this.toolSystemPrompt;
61
+ }
62
+ invocationParams() {
63
+ return this.llm.invocationParams();
64
+ }
65
+ /** @ignore */
66
+ _identifyingParams() {
67
+ return this.llm._identifyingParams();
68
+ }
69
+ async _generate(messages, options, runManager) {
70
+ let functions = options.functions ?? [];
71
+ if (options.function_call !== undefined) {
72
+ functions = functions.filter((fn) => fn.name === options.function_call?.name);
73
+ if (!functions.length) {
74
+ throw new Error(`If "function_call" is specified, you must also pass a matching function in "functions".`);
75
+ }
76
+ }
77
+ else if (functions.length === 0) {
78
+ functions.push(this.defaultResponseFunction);
79
+ }
80
+ const defaultContent = await TOOL_SYSTEM_PROMPT.format({
81
+ tools: JSON.stringify(functions, null, 2),
82
+ });
83
+ const systemMessage = new SystemMessage({ content: defaultContent });
84
+ const chatResult = await this.llm._generate([systemMessage, ...messages], options, runManager);
85
+ const chatGenerationContent = chatResult.generations[0].message.content;
86
+ if (typeof chatGenerationContent !== "string") {
87
+ throw new Error("OllamaFunctions does not support non-string output.");
88
+ }
89
+ let parsedChatResult;
90
+ try {
91
+ parsedChatResult = JSON.parse(chatGenerationContent);
92
+ }
93
+ catch (e) {
94
+ throw new Error(`"${this.llm.model}" did not respond with valid JSON. Please try again.`);
95
+ }
96
+ const calledToolName = parsedChatResult.tool;
97
+ const calledToolArguments = parsedChatResult.tool_input;
98
+ const calledTool = functions.find((fn) => fn.name === calledToolName);
99
+ if (calledTool === undefined) {
100
+ throw new Error(`Failed to parse a function call from ${this.llm.model} output: ${chatGenerationContent}`);
101
+ }
102
+ if (calledTool.name === this.defaultResponseFunction.name) {
103
+ return {
104
+ generations: [
105
+ {
106
+ message: new AIMessage({
107
+ content: calledToolArguments.response,
108
+ }),
109
+ text: calledToolArguments.response,
110
+ },
111
+ ],
112
+ };
113
+ }
114
+ const responseMessageWithFunctions = new AIMessage({
115
+ content: "",
116
+ additional_kwargs: {
117
+ function_call: {
118
+ name: calledToolName,
119
+ arguments: calledToolArguments
120
+ ? JSON.stringify(calledToolArguments)
121
+ : "",
122
+ },
123
+ },
124
+ });
125
+ return {
126
+ generations: [{ message: responseMessageWithFunctions, text: "" }],
127
+ };
128
+ }
129
+ _llmType() {
130
+ return "ollama_functions";
131
+ }
132
+ /** @ignore */
133
+ _combineLLMOutput() {
134
+ return [];
135
+ }
136
+ }
@@ -212,6 +212,12 @@ class Ollama extends base_js_1.LLM {
212
212
  writable: true,
213
213
  value: void 0
214
214
  });
215
+ Object.defineProperty(this, "format", {
216
+ enumerable: true,
217
+ configurable: true,
218
+ writable: true,
219
+ value: void 0
220
+ });
215
221
  this.model = fields.model ?? this.model;
216
222
  this.baseUrl = fields.baseUrl?.endsWith("/")
217
223
  ? fields.baseUrl.slice(0, -1)
@@ -246,6 +252,7 @@ class Ollama extends base_js_1.LLM {
246
252
  this.useMLock = fields.useMLock;
247
253
  this.useMMap = fields.useMMap;
248
254
  this.vocabOnly = fields.vocabOnly;
255
+ this.format = fields.format;
249
256
  }
250
257
  _llmType() {
251
258
  return "ollama";
@@ -253,6 +260,7 @@ class Ollama extends base_js_1.LLM {
253
260
  invocationParams(options) {
254
261
  return {
255
262
  model: this.model,
263
+ format: this.format,
256
264
  options: {
257
265
  embedding_only: this.embeddingOnly,
258
266
  f16_kv: this.f16KV,
@@ -2,6 +2,7 @@ import { LLM, BaseLLMParams } from "./base.js";
2
2
  import { OllamaInput, OllamaCallOptions } from "../util/ollama.js";
3
3
  import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
4
4
  import { GenerationChunk } from "../schema/index.js";
5
+ import type { StringWithAutocomplete } from "../util/types.js";
5
6
  /**
6
7
  * Class that represents the Ollama language model. It extends the base
7
8
  * LLM class and implements the OllamaInput interface.
@@ -41,10 +42,12 @@ export declare class Ollama extends LLM<OllamaCallOptions> implements OllamaInpu
41
42
  useMLock?: boolean;
42
43
  useMMap?: boolean;
43
44
  vocabOnly?: boolean;
45
+ format?: StringWithAutocomplete<"json">;
44
46
  constructor(fields: OllamaInput & BaseLLMParams);
45
47
  _llmType(): string;
46
48
  invocationParams(options?: this["ParsedCallOptions"]): {
47
49
  model: string;
50
+ format: StringWithAutocomplete<"json"> | undefined;
48
51
  options: {
49
52
  embedding_only: boolean | undefined;
50
53
  f16_kv: boolean | undefined;
@@ -209,6 +209,12 @@ export class Ollama extends LLM {
209
209
  writable: true,
210
210
  value: void 0
211
211
  });
212
+ Object.defineProperty(this, "format", {
213
+ enumerable: true,
214
+ configurable: true,
215
+ writable: true,
216
+ value: void 0
217
+ });
212
218
  this.model = fields.model ?? this.model;
213
219
  this.baseUrl = fields.baseUrl?.endsWith("/")
214
220
  ? fields.baseUrl.slice(0, -1)
@@ -243,6 +249,7 @@ export class Ollama extends LLM {
243
249
  this.useMLock = fields.useMLock;
244
250
  this.useMMap = fields.useMMap;
245
251
  this.vocabOnly = fields.vocabOnly;
252
+ this.format = fields.format;
246
253
  }
247
254
  _llmType() {
248
255
  return "ollama";
@@ -250,6 +257,7 @@ export class Ollama extends LLM {
250
257
  invocationParams(options) {
251
258
  return {
252
259
  model: this.model,
260
+ format: this.format,
253
261
  options: {
254
262
  embedding_only: this.embeddingOnly,
255
263
  f16_kv: this.f16KV,
@@ -121,7 +121,7 @@ class OpenAI extends base_js_1.BaseLLM {
121
121
  enumerable: true,
122
122
  configurable: true,
123
123
  writable: true,
124
- value: "text-davinci-003"
124
+ value: "gpt-3.5-turbo-instruct"
125
125
  });
126
126
  Object.defineProperty(this, "modelKwargs", {
127
127
  enumerable: true,
@@ -118,7 +118,7 @@ export class OpenAI extends BaseLLM {
118
118
  enumerable: true,
119
119
  configurable: true,
120
120
  writable: true,
121
- value: "text-davinci-003"
121
+ value: "gpt-3.5-turbo-instruct"
122
122
  });
123
123
  Object.defineProperty(this, "modelKwargs", {
124
124
  enumerable: true,
@@ -25,7 +25,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
27
  exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fake = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__cloudflare_workersai = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__voyage = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains__combine_documents__reduce = exports.chains = exports.tools__render = exports.tools = exports.base_language = exports.agents__openai__output_parser = exports.agents__xml__output_parser = exports.agents__react__output_parser = exports.agents__format_scratchpad__log_to_message = exports.agents__format_scratchpad__xml = exports.agents__format_scratchpad__log = exports.agents__format_scratchpad__openai_tools = exports.agents__format_scratchpad = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.runnables__remote = exports.evaluation = exports.experimental__chains__violation_of_expectations = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__prompt_template = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = void 0;
28
+ exports.evaluation = exports.experimental__chains__violation_of_expectations = exports.experimental__chat_models__ollama_functions = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__prompt_template = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = void 0;
29
+ exports.runnables__remote = void 0;
29
30
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
31
  exports.agents = __importStar(require("../agents/index.cjs"));
31
32
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -123,6 +124,7 @@ exports.experimental__babyagi = __importStar(require("../experimental/babyagi/in
123
124
  exports.experimental__generative_agents = __importStar(require("../experimental/generative_agents/index.cjs"));
124
125
  exports.experimental__plan_and_execute = __importStar(require("../experimental/plan_and_execute/index.cjs"));
125
126
  exports.experimental__chat_models__bittensor = __importStar(require("../experimental/chat_models/bittensor.cjs"));
127
+ exports.experimental__chat_models__ollama_functions = __importStar(require("../experimental/chat_models/ollama_functions.cjs"));
126
128
  exports.experimental__chains__violation_of_expectations = __importStar(require("../experimental/chains/violation_of_expectations/index.cjs"));
127
129
  exports.evaluation = __importStar(require("../evaluation/index.cjs"));
128
130
  exports.runnables__remote = __importStar(require("../runnables/remote.cjs"));
@@ -95,6 +95,7 @@ export * as experimental__babyagi from "../experimental/babyagi/index.js";
95
95
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
96
96
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
97
97
  export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
98
+ export * as experimental__chat_models__ollama_functions from "../experimental/chat_models/ollama_functions.js";
98
99
  export * as experimental__chains__violation_of_expectations from "../experimental/chains/violation_of_expectations/index.js";
99
100
  export * as evaluation from "../evaluation/index.js";
100
101
  export * as runnables__remote from "../runnables/remote.js";
@@ -96,6 +96,7 @@ export * as experimental__babyagi from "../experimental/babyagi/index.js";
96
96
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
97
97
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
98
98
  export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
99
+ export * as experimental__chat_models__ollama_functions from "../experimental/chat_models/ollama_functions.js";
99
100
  export * as experimental__chains__violation_of_expectations from "../experimental/chains/violation_of_expectations/index.js";
100
101
  export * as evaluation from "../evaluation/index.js";
101
102
  export * as runnables__remote from "../runnables/remote.js";
@@ -0,0 +1,92 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ConversationTokenBufferMemory = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ const chat_memory_js_1 = require("./chat_memory.cjs");
6
+ /**
7
+ * Class that represents a conversation chat memory with a token buffer.
8
+ * It extends the `BaseChatMemory` class and implements the
9
+ * `ConversationTokenBufferMemoryInput` interface.
10
+ */
11
+ class ConversationTokenBufferMemory extends chat_memory_js_1.BaseChatMemory {
12
+ constructor(fields) {
13
+ super(fields);
14
+ Object.defineProperty(this, "humanPrefix", {
15
+ enumerable: true,
16
+ configurable: true,
17
+ writable: true,
18
+ value: "Human"
19
+ });
20
+ Object.defineProperty(this, "aiPrefix", {
21
+ enumerable: true,
22
+ configurable: true,
23
+ writable: true,
24
+ value: "AI"
25
+ });
26
+ Object.defineProperty(this, "memoryKey", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: "history"
31
+ });
32
+ Object.defineProperty(this, "maxTokenLimit", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: 2000
37
+ }); // Default max token limit of 2000 which can be overridden
38
+ Object.defineProperty(this, "llm", {
39
+ enumerable: true,
40
+ configurable: true,
41
+ writable: true,
42
+ value: void 0
43
+ });
44
+ this.llm = fields.llm;
45
+ this.humanPrefix = fields?.humanPrefix ?? this.humanPrefix;
46
+ this.aiPrefix = fields?.aiPrefix ?? this.aiPrefix;
47
+ this.memoryKey = fields?.memoryKey ?? this.memoryKey;
48
+ this.maxTokenLimit = fields?.maxTokenLimit ?? this.maxTokenLimit;
49
+ }
50
+ get memoryKeys() {
51
+ return [this.memoryKey];
52
+ }
53
+ /**
54
+ * Loads the memory variables. It takes an `InputValues` object as a
55
+ * parameter and returns a `Promise` that resolves with a
56
+ * `MemoryVariables` object.
57
+ * @param _values `InputValues` object.
58
+ * @returns A `Promise` that resolves with a `MemoryVariables` object.
59
+ */
60
+ async loadMemoryVariables(_values) {
61
+ const messages = await this.chatHistory.getMessages();
62
+ if (this.returnMessages) {
63
+ const result = {
64
+ [this.memoryKey]: messages,
65
+ };
66
+ return result;
67
+ }
68
+ const result = {
69
+ [this.memoryKey]: (0, base_js_1.getBufferString)(messages, this.humanPrefix, this.aiPrefix),
70
+ };
71
+ return result;
72
+ }
73
+ /**
74
+ * Saves the context from this conversation to buffer. If the amount
75
+ * of tokens required to save the buffer exceeds MAX_TOKEN_LIMIT,
76
+ * prune it.
77
+ */
78
+ async saveContext(inputValues, outputValues) {
79
+ await super.saveContext(inputValues, outputValues);
80
+ // Prune buffer if it exceeds the max token limit set for this instance.
81
+ const buffer = await this.chatHistory.getMessages();
82
+ let currBufferLength = await this.llm.getNumTokens((0, base_js_1.getBufferString)(buffer, this.humanPrefix, this.aiPrefix));
83
+ if (currBufferLength > this.maxTokenLimit) {
84
+ const prunedMemory = [];
85
+ while (currBufferLength > this.maxTokenLimit) {
86
+ prunedMemory.push(buffer.shift());
87
+ currBufferLength = await this.llm.getNumTokens((0, base_js_1.getBufferString)(buffer, this.humanPrefix, this.aiPrefix));
88
+ }
89
+ }
90
+ }
91
+ }
92
+ exports.ConversationTokenBufferMemory = ConversationTokenBufferMemory;
@@ -0,0 +1,41 @@
1
+ import { InputValues, MemoryVariables, OutputValues } from "./base.js";
2
+ import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js";
3
+ import { BaseLanguageModel } from "../base_language/index.js";
4
+ /**
5
+ * Interface for the input parameters of the `BufferTokenMemory` class.
6
+ */
7
+ export interface ConversationTokenBufferMemoryInput extends BaseChatMemoryInput {
8
+ humanPrefix?: string;
9
+ aiPrefix?: string;
10
+ llm: BaseLanguageModel;
11
+ memoryKey?: string;
12
+ maxTokenLimit?: number;
13
+ }
14
+ /**
15
+ * Class that represents a conversation chat memory with a token buffer.
16
+ * It extends the `BaseChatMemory` class and implements the
17
+ * `ConversationTokenBufferMemoryInput` interface.
18
+ */
19
+ export declare class ConversationTokenBufferMemory extends BaseChatMemory implements ConversationTokenBufferMemoryInput {
20
+ humanPrefix: string;
21
+ aiPrefix: string;
22
+ memoryKey: string;
23
+ maxTokenLimit: number;
24
+ llm: BaseLanguageModel;
25
+ constructor(fields: ConversationTokenBufferMemoryInput);
26
+ get memoryKeys(): string[];
27
+ /**
28
+ * Loads the memory variables. It takes an `InputValues` object as a
29
+ * parameter and returns a `Promise` that resolves with a
30
+ * `MemoryVariables` object.
31
+ * @param _values `InputValues` object.
32
+ * @returns A `Promise` that resolves with a `MemoryVariables` object.
33
+ */
34
+ loadMemoryVariables(_values: InputValues): Promise<MemoryVariables>;
35
+ /**
36
+ * Saves the context from this conversation to buffer. If the amount
37
+ * of tokens required to save the buffer exceeds MAX_TOKEN_LIMIT,
38
+ * prune it.
39
+ */
40
+ saveContext(inputValues: InputValues, outputValues: OutputValues): Promise<void>;
41
+ }