langchain 0.0.186 → 0.0.187

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,8 +19,19 @@ const parseRole = (id) => {
19
19
  return "ai";
20
20
  if (roleHint.includes("Function"))
21
21
  return "function";
22
+ if (roleHint.includes("Tool"))
23
+ return "tool";
22
24
  return "ai";
23
25
  };
26
+ const PARAMS_TO_CAPTURE = [
27
+ "stop",
28
+ "stop_sequences",
29
+ "function_call",
30
+ "functions",
31
+ "tools",
32
+ "tool_choice",
33
+ "response_format",
34
+ ];
24
35
  const convertToLLMonitorMessages = (input) => {
25
36
  const parseMessage = (raw) => {
26
37
  if (typeof raw === "string")
@@ -35,11 +46,10 @@ const convertToLLMonitorMessages = (input) => {
35
46
  const role = parseRole(message.id);
36
47
  const obj = message.kwargs;
37
48
  const text = message.text ?? obj.content;
38
- const functionCall = obj.additional_kwargs?.function_call;
39
49
  return {
40
50
  role,
41
51
  text,
42
- functionCall,
52
+ ...(obj.additional_kwargs ?? {}),
43
53
  };
44
54
  }
45
55
  catch (e) {
@@ -83,6 +93,21 @@ const parseOutput = (rawOutput) => {
83
93
  return result;
84
94
  return rawOutput;
85
95
  };
96
+ const parseExtraAndName = (llm, extraParams, metadata) => {
97
+ const params = {
98
+ ...(extraParams?.invocation_params ?? {}),
99
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
100
+ // @ts-ignore this is a valid property
101
+ ...(llm?.kwargs ?? {}),
102
+ ...(metadata || {}),
103
+ };
104
+ const { model, model_name, modelName, model_id, userId, userProps, ...rest } = params;
105
+ const name = model || modelName || model_name || model_id || llm.id.at(-1);
106
+ // Filter rest to only include params we want to capture
107
+ const extra = Object.fromEntries(Object.entries(rest).filter(([key]) => PARAMS_TO_CAPTURE.includes(key) ||
108
+ ["string", "number", "boolean"].includes(typeof rest[key])));
109
+ return { name, extra, userId, userProps };
110
+ };
86
111
  class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
87
112
  constructor(fields = {}) {
88
113
  super(fields);
@@ -109,18 +134,13 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
109
134
  }
110
135
  }
111
136
  async handleLLMStart(llm, prompts, runId, parentRunId, extraParams, tags, metadata) {
112
- const params = {
113
- ...(extraParams?.invocation_params || {}),
114
- ...(metadata || {}),
115
- };
116
- const { model, model_name, modelName, userId, userProps, ...rest } = params;
117
- const name = model || modelName || model_name || llm.id.at(-1);
137
+ const { name, extra, userId, userProps } = parseExtraAndName(llm, extraParams, metadata);
118
138
  await this.monitor.trackEvent("llm", "start", {
119
139
  runId,
120
140
  parentRunId,
121
141
  name,
122
142
  input: (0, exports.convertToLLMonitorMessages)(prompts),
123
- extra: rest,
143
+ extra,
124
144
  userId,
125
145
  userProps,
126
146
  tags,
@@ -128,19 +148,13 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
128
148
  });
129
149
  }
130
150
  async handleChatModelStart(llm, messages, runId, parentRunId, extraParams, tags, metadata) {
131
- const params = {
132
- ...(extraParams?.invocation_params || {}),
133
- ...(metadata || {}),
134
- };
135
- // Expand them so they're excluded from the "extra" field
136
- const { model, model_name, modelName, userId, userProps, ...rest } = params;
137
- const name = model || modelName || model_name || llm.id.at(-1);
151
+ const { name, extra, userId, userProps } = parseExtraAndName(llm, extraParams, metadata);
138
152
  await this.monitor.trackEvent("llm", "start", {
139
153
  runId,
140
154
  parentRunId,
141
155
  name,
142
156
  input: (0, exports.convertToLLMonitorMessages)(messages),
143
- extra: rest,
157
+ extra,
144
158
  userId,
145
159
  userProps,
146
160
  tags,
@@ -13,8 +13,19 @@ const parseRole = (id) => {
13
13
  return "ai";
14
14
  if (roleHint.includes("Function"))
15
15
  return "function";
16
+ if (roleHint.includes("Tool"))
17
+ return "tool";
16
18
  return "ai";
17
19
  };
20
+ const PARAMS_TO_CAPTURE = [
21
+ "stop",
22
+ "stop_sequences",
23
+ "function_call",
24
+ "functions",
25
+ "tools",
26
+ "tool_choice",
27
+ "response_format",
28
+ ];
18
29
  export const convertToLLMonitorMessages = (input) => {
19
30
  const parseMessage = (raw) => {
20
31
  if (typeof raw === "string")
@@ -29,11 +40,10 @@ export const convertToLLMonitorMessages = (input) => {
29
40
  const role = parseRole(message.id);
30
41
  const obj = message.kwargs;
31
42
  const text = message.text ?? obj.content;
32
- const functionCall = obj.additional_kwargs?.function_call;
33
43
  return {
34
44
  role,
35
45
  text,
36
- functionCall,
46
+ ...(obj.additional_kwargs ?? {}),
37
47
  };
38
48
  }
39
49
  catch (e) {
@@ -76,6 +86,21 @@ const parseOutput = (rawOutput) => {
76
86
  return result;
77
87
  return rawOutput;
78
88
  };
89
+ const parseExtraAndName = (llm, extraParams, metadata) => {
90
+ const params = {
91
+ ...(extraParams?.invocation_params ?? {}),
92
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
93
+ // @ts-ignore this is a valid property
94
+ ...(llm?.kwargs ?? {}),
95
+ ...(metadata || {}),
96
+ };
97
+ const { model, model_name, modelName, model_id, userId, userProps, ...rest } = params;
98
+ const name = model || modelName || model_name || model_id || llm.id.at(-1);
99
+ // Filter rest to only include params we want to capture
100
+ const extra = Object.fromEntries(Object.entries(rest).filter(([key]) => PARAMS_TO_CAPTURE.includes(key) ||
101
+ ["string", "number", "boolean"].includes(typeof rest[key])));
102
+ return { name, extra, userId, userProps };
103
+ };
79
104
  export class LLMonitorHandler extends BaseCallbackHandler {
80
105
  constructor(fields = {}) {
81
106
  super(fields);
@@ -102,18 +127,13 @@ export class LLMonitorHandler extends BaseCallbackHandler {
102
127
  }
103
128
  }
104
129
  async handleLLMStart(llm, prompts, runId, parentRunId, extraParams, tags, metadata) {
105
- const params = {
106
- ...(extraParams?.invocation_params || {}),
107
- ...(metadata || {}),
108
- };
109
- const { model, model_name, modelName, userId, userProps, ...rest } = params;
110
- const name = model || modelName || model_name || llm.id.at(-1);
130
+ const { name, extra, userId, userProps } = parseExtraAndName(llm, extraParams, metadata);
111
131
  await this.monitor.trackEvent("llm", "start", {
112
132
  runId,
113
133
  parentRunId,
114
134
  name,
115
135
  input: convertToLLMonitorMessages(prompts),
116
- extra: rest,
136
+ extra,
117
137
  userId,
118
138
  userProps,
119
139
  tags,
@@ -121,19 +141,13 @@ export class LLMonitorHandler extends BaseCallbackHandler {
121
141
  });
122
142
  }
123
143
  async handleChatModelStart(llm, messages, runId, parentRunId, extraParams, tags, metadata) {
124
- const params = {
125
- ...(extraParams?.invocation_params || {}),
126
- ...(metadata || {}),
127
- };
128
- // Expand them so they're excluded from the "extra" field
129
- const { model, model_name, modelName, userId, userProps, ...rest } = params;
130
- const name = model || modelName || model_name || llm.id.at(-1);
144
+ const { name, extra, userId, userProps } = parseExtraAndName(llm, extraParams, metadata);
131
145
  await this.monitor.trackEvent("llm", "start", {
132
146
  runId,
133
147
  parentRunId,
134
148
  name,
135
149
  input: convertToLLMonitorMessages(messages),
136
- extra: rest,
150
+ extra,
137
151
  userId,
138
152
  userProps,
139
153
  tags,
@@ -0,0 +1,140 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.OllamaFunctions = void 0;
4
+ const base_js_1 = require("../../chat_models/base.cjs");
5
+ const index_js_1 = require("../../schema/index.cjs");
6
+ const ollama_js_1 = require("../../chat_models/ollama.cjs");
7
+ const prompt_js_1 = require("../../prompts/prompt.cjs");
8
+ const TOOL_SYSTEM_PROMPT =
9
+ /* #__PURE__ */
10
+ prompt_js_1.PromptTemplate.fromTemplate(`You have access to the following tools:
11
+
12
+ {tools}
13
+
14
+ To use a tool, respond with a JSON object with the following structure:
15
+ {{
16
+ "tool": <name of the called tool>,
17
+ "tool_input": <parameters for the tool matching the above JSON schema>
18
+ }}`);
19
+ class OllamaFunctions extends base_js_1.BaseChatModel {
20
+ static lc_name() {
21
+ return "OllamaFunctions";
22
+ }
23
+ constructor(fields) {
24
+ super(fields ?? {});
25
+ Object.defineProperty(this, "llm", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: void 0
30
+ });
31
+ Object.defineProperty(this, "toolSystemPrompt", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: TOOL_SYSTEM_PROMPT
36
+ });
37
+ Object.defineProperty(this, "defaultResponseFunction", {
38
+ enumerable: true,
39
+ configurable: true,
40
+ writable: true,
41
+ value: {
42
+ name: "__conversational_response",
43
+ description: "Respond conversationally if no other tools should be called for a given query.",
44
+ parameters: {
45
+ type: "object",
46
+ properties: {
47
+ response: {
48
+ type: "string",
49
+ description: "Conversational response to the user.",
50
+ },
51
+ },
52
+ required: ["response"],
53
+ },
54
+ }
55
+ });
56
+ Object.defineProperty(this, "lc_namespace", {
57
+ enumerable: true,
58
+ configurable: true,
59
+ writable: true,
60
+ value: ["langchain", "experimental", "chat_models"]
61
+ });
62
+ this.llm = fields?.llm ?? new ollama_js_1.ChatOllama({ ...fields, format: "json" });
63
+ this.toolSystemPrompt = fields?.toolSystemPrompt ?? this.toolSystemPrompt;
64
+ }
65
+ invocationParams() {
66
+ return this.llm.invocationParams();
67
+ }
68
+ /** @ignore */
69
+ _identifyingParams() {
70
+ return this.llm._identifyingParams();
71
+ }
72
+ async _generate(messages, options, runManager) {
73
+ let functions = options.functions ?? [];
74
+ if (options.function_call !== undefined) {
75
+ functions = functions.filter((fn) => fn.name === options.function_call?.name);
76
+ if (!functions.length) {
77
+ throw new Error(`If "function_call" is specified, you must also pass a matching function in "functions".`);
78
+ }
79
+ }
80
+ else if (functions.length === 0) {
81
+ functions.push(this.defaultResponseFunction);
82
+ }
83
+ const defaultContent = await TOOL_SYSTEM_PROMPT.format({
84
+ tools: JSON.stringify(functions, null, 2),
85
+ });
86
+ const systemMessage = new index_js_1.SystemMessage({ content: defaultContent });
87
+ const chatResult = await this.llm._generate([systemMessage, ...messages], options, runManager);
88
+ const chatGenerationContent = chatResult.generations[0].message.content;
89
+ if (typeof chatGenerationContent !== "string") {
90
+ throw new Error("OllamaFunctions does not support non-string output.");
91
+ }
92
+ let parsedChatResult;
93
+ try {
94
+ parsedChatResult = JSON.parse(chatGenerationContent);
95
+ }
96
+ catch (e) {
97
+ throw new Error(`"${this.llm.model}" did not respond with valid JSON. Please try again.`);
98
+ }
99
+ const calledToolName = parsedChatResult.tool;
100
+ const calledToolArguments = parsedChatResult.tool_input;
101
+ const calledTool = functions.find((fn) => fn.name === calledToolName);
102
+ if (calledTool === undefined) {
103
+ throw new Error(`Failed to parse a function call from ${this.llm.model} output: ${chatGenerationContent}`);
104
+ }
105
+ if (calledTool.name === this.defaultResponseFunction.name) {
106
+ return {
107
+ generations: [
108
+ {
109
+ message: new index_js_1.AIMessage({
110
+ content: calledToolArguments.response,
111
+ }),
112
+ text: calledToolArguments.response,
113
+ },
114
+ ],
115
+ };
116
+ }
117
+ const responseMessageWithFunctions = new index_js_1.AIMessage({
118
+ content: "",
119
+ additional_kwargs: {
120
+ function_call: {
121
+ name: calledToolName,
122
+ arguments: calledToolArguments
123
+ ? JSON.stringify(calledToolArguments)
124
+ : "",
125
+ },
126
+ },
127
+ });
128
+ return {
129
+ generations: [{ message: responseMessageWithFunctions, text: "" }],
130
+ };
131
+ }
132
+ _llmType() {
133
+ return "ollama_functions";
134
+ }
135
+ /** @ignore */
136
+ _combineLLMOutput() {
137
+ return [];
138
+ }
139
+ }
140
+ exports.OllamaFunctions = OllamaFunctions;
@@ -0,0 +1,76 @@
1
+ import { BaseChatModel, BaseChatModelParams } from "../../chat_models/base.js";
2
+ import { CallbackManagerForLLMRun } from "../../callbacks/manager.js";
3
+ import { BaseMessage, ChatResult } from "../../schema/index.js";
4
+ import { ChatOllama } from "../../chat_models/ollama.js";
5
+ import { OllamaInput } from "../../util/ollama.js";
6
+ import { BaseFunctionCallOptions } from "../../base_language/index.js";
7
+ import type { BasePromptTemplate } from "../../prompts/base.js";
8
+ export interface ChatOllamaFunctionsCallOptions extends BaseFunctionCallOptions {
9
+ }
10
+ export type OllamaFunctionsInput = Partial<OllamaInput> & BaseChatModelParams & {
11
+ llm?: ChatOllama;
12
+ toolSystemPrompt?: BasePromptTemplate;
13
+ };
14
+ export declare class OllamaFunctions extends BaseChatModel<ChatOllamaFunctionsCallOptions> {
15
+ llm: ChatOllama;
16
+ toolSystemPrompt: BasePromptTemplate;
17
+ protected defaultResponseFunction: {
18
+ name: string;
19
+ description: string;
20
+ parameters: {
21
+ type: string;
22
+ properties: {
23
+ response: {
24
+ type: string;
25
+ description: string;
26
+ };
27
+ };
28
+ required: string[];
29
+ };
30
+ };
31
+ lc_namespace: string[];
32
+ static lc_name(): string;
33
+ constructor(fields?: OllamaFunctionsInput);
34
+ invocationParams(): {
35
+ model: string;
36
+ format: import("../../util/types.js").StringWithAutocomplete<"json"> | undefined;
37
+ options: {
38
+ embedding_only: boolean | undefined;
39
+ f16_kv: boolean | undefined;
40
+ frequency_penalty: number | undefined;
41
+ logits_all: boolean | undefined;
42
+ low_vram: boolean | undefined;
43
+ main_gpu: number | undefined;
44
+ mirostat: number | undefined;
45
+ mirostat_eta: number | undefined;
46
+ mirostat_tau: number | undefined;
47
+ num_batch: number | undefined;
48
+ num_ctx: number | undefined;
49
+ num_gpu: number | undefined;
50
+ num_gqa: number | undefined;
51
+ num_keep: number | undefined;
52
+ num_thread: number | undefined;
53
+ penalize_newline: boolean | undefined;
54
+ presence_penalty: number | undefined;
55
+ repeat_last_n: number | undefined;
56
+ repeat_penalty: number | undefined;
57
+ rope_frequency_base: number | undefined;
58
+ rope_frequency_scale: number | undefined;
59
+ temperature: number | undefined;
60
+ stop: string[] | undefined;
61
+ tfs_z: number | undefined;
62
+ top_k: number | undefined;
63
+ top_p: number | undefined;
64
+ typical_p: number | undefined;
65
+ use_mlock: boolean | undefined;
66
+ use_mmap: boolean | undefined;
67
+ vocab_only: boolean | undefined;
68
+ };
69
+ };
70
+ /** @ignore */
71
+ _identifyingParams(): Record<string, any>;
72
+ _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun | undefined): Promise<ChatResult>;
73
+ _llmType(): string;
74
+ /** @ignore */
75
+ _combineLLMOutput(): never[];
76
+ }
@@ -0,0 +1,136 @@
1
+ import { BaseChatModel } from "../../chat_models/base.js";
2
+ import { AIMessage, SystemMessage, } from "../../schema/index.js";
3
+ import { ChatOllama } from "../../chat_models/ollama.js";
4
+ import { PromptTemplate } from "../../prompts/prompt.js";
5
+ const TOOL_SYSTEM_PROMPT =
6
+ /* #__PURE__ */
7
+ PromptTemplate.fromTemplate(`You have access to the following tools:
8
+
9
+ {tools}
10
+
11
+ To use a tool, respond with a JSON object with the following structure:
12
+ {{
13
+ "tool": <name of the called tool>,
14
+ "tool_input": <parameters for the tool matching the above JSON schema>
15
+ }}`);
16
+ export class OllamaFunctions extends BaseChatModel {
17
+ static lc_name() {
18
+ return "OllamaFunctions";
19
+ }
20
+ constructor(fields) {
21
+ super(fields ?? {});
22
+ Object.defineProperty(this, "llm", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: void 0
27
+ });
28
+ Object.defineProperty(this, "toolSystemPrompt", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: TOOL_SYSTEM_PROMPT
33
+ });
34
+ Object.defineProperty(this, "defaultResponseFunction", {
35
+ enumerable: true,
36
+ configurable: true,
37
+ writable: true,
38
+ value: {
39
+ name: "__conversational_response",
40
+ description: "Respond conversationally if no other tools should be called for a given query.",
41
+ parameters: {
42
+ type: "object",
43
+ properties: {
44
+ response: {
45
+ type: "string",
46
+ description: "Conversational response to the user.",
47
+ },
48
+ },
49
+ required: ["response"],
50
+ },
51
+ }
52
+ });
53
+ Object.defineProperty(this, "lc_namespace", {
54
+ enumerable: true,
55
+ configurable: true,
56
+ writable: true,
57
+ value: ["langchain", "experimental", "chat_models"]
58
+ });
59
+ this.llm = fields?.llm ?? new ChatOllama({ ...fields, format: "json" });
60
+ this.toolSystemPrompt = fields?.toolSystemPrompt ?? this.toolSystemPrompt;
61
+ }
62
+ invocationParams() {
63
+ return this.llm.invocationParams();
64
+ }
65
+ /** @ignore */
66
+ _identifyingParams() {
67
+ return this.llm._identifyingParams();
68
+ }
69
+ async _generate(messages, options, runManager) {
70
+ let functions = options.functions ?? [];
71
+ if (options.function_call !== undefined) {
72
+ functions = functions.filter((fn) => fn.name === options.function_call?.name);
73
+ if (!functions.length) {
74
+ throw new Error(`If "function_call" is specified, you must also pass a matching function in "functions".`);
75
+ }
76
+ }
77
+ else if (functions.length === 0) {
78
+ functions.push(this.defaultResponseFunction);
79
+ }
80
+ const defaultContent = await TOOL_SYSTEM_PROMPT.format({
81
+ tools: JSON.stringify(functions, null, 2),
82
+ });
83
+ const systemMessage = new SystemMessage({ content: defaultContent });
84
+ const chatResult = await this.llm._generate([systemMessage, ...messages], options, runManager);
85
+ const chatGenerationContent = chatResult.generations[0].message.content;
86
+ if (typeof chatGenerationContent !== "string") {
87
+ throw new Error("OllamaFunctions does not support non-string output.");
88
+ }
89
+ let parsedChatResult;
90
+ try {
91
+ parsedChatResult = JSON.parse(chatGenerationContent);
92
+ }
93
+ catch (e) {
94
+ throw new Error(`"${this.llm.model}" did not respond with valid JSON. Please try again.`);
95
+ }
96
+ const calledToolName = parsedChatResult.tool;
97
+ const calledToolArguments = parsedChatResult.tool_input;
98
+ const calledTool = functions.find((fn) => fn.name === calledToolName);
99
+ if (calledTool === undefined) {
100
+ throw new Error(`Failed to parse a function call from ${this.llm.model} output: ${chatGenerationContent}`);
101
+ }
102
+ if (calledTool.name === this.defaultResponseFunction.name) {
103
+ return {
104
+ generations: [
105
+ {
106
+ message: new AIMessage({
107
+ content: calledToolArguments.response,
108
+ }),
109
+ text: calledToolArguments.response,
110
+ },
111
+ ],
112
+ };
113
+ }
114
+ const responseMessageWithFunctions = new AIMessage({
115
+ content: "",
116
+ additional_kwargs: {
117
+ function_call: {
118
+ name: calledToolName,
119
+ arguments: calledToolArguments
120
+ ? JSON.stringify(calledToolArguments)
121
+ : "",
122
+ },
123
+ },
124
+ });
125
+ return {
126
+ generations: [{ message: responseMessageWithFunctions, text: "" }],
127
+ };
128
+ }
129
+ _llmType() {
130
+ return "ollama_functions";
131
+ }
132
+ /** @ignore */
133
+ _combineLLMOutput() {
134
+ return [];
135
+ }
136
+ }
@@ -25,7 +25,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
27
  exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fake = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__cloudflare_workersai = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__voyage = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains__combine_documents__reduce = exports.chains = exports.tools__render = exports.tools = exports.base_language = exports.agents__openai__output_parser = exports.agents__xml__output_parser = exports.agents__react__output_parser = exports.agents__format_scratchpad__log_to_message = exports.agents__format_scratchpad__xml = exports.agents__format_scratchpad__log = exports.agents__format_scratchpad__openai_tools = exports.agents__format_scratchpad = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.runnables__remote = exports.evaluation = exports.experimental__chains__violation_of_expectations = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__prompt_template = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = void 0;
28
+ exports.evaluation = exports.experimental__chains__violation_of_expectations = exports.experimental__chat_models__ollama_functions = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__prompt_template = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = void 0;
29
+ exports.runnables__remote = void 0;
29
30
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
31
  exports.agents = __importStar(require("../agents/index.cjs"));
31
32
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -123,6 +124,7 @@ exports.experimental__babyagi = __importStar(require("../experimental/babyagi/in
123
124
  exports.experimental__generative_agents = __importStar(require("../experimental/generative_agents/index.cjs"));
124
125
  exports.experimental__plan_and_execute = __importStar(require("../experimental/plan_and_execute/index.cjs"));
125
126
  exports.experimental__chat_models__bittensor = __importStar(require("../experimental/chat_models/bittensor.cjs"));
127
+ exports.experimental__chat_models__ollama_functions = __importStar(require("../experimental/chat_models/ollama_functions.cjs"));
126
128
  exports.experimental__chains__violation_of_expectations = __importStar(require("../experimental/chains/violation_of_expectations/index.cjs"));
127
129
  exports.evaluation = __importStar(require("../evaluation/index.cjs"));
128
130
  exports.runnables__remote = __importStar(require("../runnables/remote.cjs"));
@@ -95,6 +95,7 @@ export * as experimental__babyagi from "../experimental/babyagi/index.js";
95
95
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
96
96
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
97
97
  export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
98
+ export * as experimental__chat_models__ollama_functions from "../experimental/chat_models/ollama_functions.js";
98
99
  export * as experimental__chains__violation_of_expectations from "../experimental/chains/violation_of_expectations/index.js";
99
100
  export * as evaluation from "../evaluation/index.js";
100
101
  export * as runnables__remote from "../runnables/remote.js";
@@ -96,6 +96,7 @@ export * as experimental__babyagi from "../experimental/babyagi/index.js";
96
96
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
97
97
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
98
98
  export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
99
+ export * as experimental__chat_models__ollama_functions from "../experimental/chat_models/ollama_functions.js";
99
100
  export * as experimental__chains__violation_of_expectations from "../experimental/chains/violation_of_expectations/index.js";
100
101
  export * as evaluation from "../evaluation/index.js";
101
102
  export * as runnables__remote from "../runnables/remote.js";
@@ -0,0 +1,92 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ConversationTokenBufferMemory = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ const chat_memory_js_1 = require("./chat_memory.cjs");
6
+ /**
7
+ * Class that represents a conversation chat memory with a token buffer.
8
+ * It extends the `BaseChatMemory` class and implements the
9
+ * `ConversationTokenBufferMemoryInput` interface.
10
+ */
11
+ class ConversationTokenBufferMemory extends chat_memory_js_1.BaseChatMemory {
12
+ constructor(fields) {
13
+ super(fields);
14
+ Object.defineProperty(this, "humanPrefix", {
15
+ enumerable: true,
16
+ configurable: true,
17
+ writable: true,
18
+ value: "Human"
19
+ });
20
+ Object.defineProperty(this, "aiPrefix", {
21
+ enumerable: true,
22
+ configurable: true,
23
+ writable: true,
24
+ value: "AI"
25
+ });
26
+ Object.defineProperty(this, "memoryKey", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: "history"
31
+ });
32
+ Object.defineProperty(this, "maxTokenLimit", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: 2000
37
+ }); // Default max token limit of 2000 which can be overridden
38
+ Object.defineProperty(this, "llm", {
39
+ enumerable: true,
40
+ configurable: true,
41
+ writable: true,
42
+ value: void 0
43
+ });
44
+ this.llm = fields.llm;
45
+ this.humanPrefix = fields?.humanPrefix ?? this.humanPrefix;
46
+ this.aiPrefix = fields?.aiPrefix ?? this.aiPrefix;
47
+ this.memoryKey = fields?.memoryKey ?? this.memoryKey;
48
+ this.maxTokenLimit = fields?.maxTokenLimit ?? this.maxTokenLimit;
49
+ }
50
+ get memoryKeys() {
51
+ return [this.memoryKey];
52
+ }
53
+ /**
54
+ * Loads the memory variables. It takes an `InputValues` object as a
55
+ * parameter and returns a `Promise` that resolves with a
56
+ * `MemoryVariables` object.
57
+ * @param _values `InputValues` object.
58
+ * @returns A `Promise` that resolves with a `MemoryVariables` object.
59
+ */
60
+ async loadMemoryVariables(_values) {
61
+ const messages = await this.chatHistory.getMessages();
62
+ if (this.returnMessages) {
63
+ const result = {
64
+ [this.memoryKey]: messages,
65
+ };
66
+ return result;
67
+ }
68
+ const result = {
69
+ [this.memoryKey]: (0, base_js_1.getBufferString)(messages, this.humanPrefix, this.aiPrefix),
70
+ };
71
+ return result;
72
+ }
73
+ /**
74
+ * Saves the context from this conversation to buffer. If the amount
75
+ * of tokens required to save the buffer exceeds MAX_TOKEN_LIMIT,
76
+ * prune it.
77
+ */
78
+ async saveContext(inputValues, outputValues) {
79
+ await super.saveContext(inputValues, outputValues);
80
+ // Prune buffer if it exceeds the max token limit set for this instance.
81
+ const buffer = await this.chatHistory.getMessages();
82
+ let currBufferLength = await this.llm.getNumTokens((0, base_js_1.getBufferString)(buffer, this.humanPrefix, this.aiPrefix));
83
+ if (currBufferLength > this.maxTokenLimit) {
84
+ const prunedMemory = [];
85
+ while (currBufferLength > this.maxTokenLimit) {
86
+ prunedMemory.push(buffer.shift());
87
+ currBufferLength = await this.llm.getNumTokens((0, base_js_1.getBufferString)(buffer, this.humanPrefix, this.aiPrefix));
88
+ }
89
+ }
90
+ }
91
+ }
92
+ exports.ConversationTokenBufferMemory = ConversationTokenBufferMemory;
@@ -0,0 +1,41 @@
1
+ import { InputValues, MemoryVariables, OutputValues } from "./base.js";
2
+ import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js";
3
+ import { BaseLanguageModel } from "../base_language/index.js";
4
+ /**
5
+ * Interface for the input parameters of the `BufferTokenMemory` class.
6
+ */
7
+ export interface ConversationTokenBufferMemoryInput extends BaseChatMemoryInput {
8
+ humanPrefix?: string;
9
+ aiPrefix?: string;
10
+ llm: BaseLanguageModel;
11
+ memoryKey?: string;
12
+ maxTokenLimit?: number;
13
+ }
14
+ /**
15
+ * Class that represents a conversation chat memory with a token buffer.
16
+ * It extends the `BaseChatMemory` class and implements the
17
+ * `ConversationTokenBufferMemoryInput` interface.
18
+ */
19
+ export declare class ConversationTokenBufferMemory extends BaseChatMemory implements ConversationTokenBufferMemoryInput {
20
+ humanPrefix: string;
21
+ aiPrefix: string;
22
+ memoryKey: string;
23
+ maxTokenLimit: number;
24
+ llm: BaseLanguageModel;
25
+ constructor(fields: ConversationTokenBufferMemoryInput);
26
+ get memoryKeys(): string[];
27
+ /**
28
+ * Loads the memory variables. It takes an `InputValues` object as a
29
+ * parameter and returns a `Promise` that resolves with a
30
+ * `MemoryVariables` object.
31
+ * @param _values `InputValues` object.
32
+ * @returns A `Promise` that resolves with a `MemoryVariables` object.
33
+ */
34
+ loadMemoryVariables(_values: InputValues): Promise<MemoryVariables>;
35
+ /**
36
+ * Saves the context from this conversation to buffer. If the amount
37
+ * of tokens required to save the buffer exceeds MAX_TOKEN_LIMIT,
38
+ * prune it.
39
+ */
40
+ saveContext(inputValues: InputValues, outputValues: OutputValues): Promise<void>;
41
+ }
@@ -0,0 +1,88 @@
1
+ import { getBufferString, } from "./base.js";
2
+ import { BaseChatMemory } from "./chat_memory.js";
3
+ /**
4
+ * Class that represents a conversation chat memory with a token buffer.
5
+ * It extends the `BaseChatMemory` class and implements the
6
+ * `ConversationTokenBufferMemoryInput` interface.
7
+ */
8
+ export class ConversationTokenBufferMemory extends BaseChatMemory {
9
+ constructor(fields) {
10
+ super(fields);
11
+ Object.defineProperty(this, "humanPrefix", {
12
+ enumerable: true,
13
+ configurable: true,
14
+ writable: true,
15
+ value: "Human"
16
+ });
17
+ Object.defineProperty(this, "aiPrefix", {
18
+ enumerable: true,
19
+ configurable: true,
20
+ writable: true,
21
+ value: "AI"
22
+ });
23
+ Object.defineProperty(this, "memoryKey", {
24
+ enumerable: true,
25
+ configurable: true,
26
+ writable: true,
27
+ value: "history"
28
+ });
29
+ Object.defineProperty(this, "maxTokenLimit", {
30
+ enumerable: true,
31
+ configurable: true,
32
+ writable: true,
33
+ value: 2000
34
+ }); // Default max token limit of 2000 which can be overridden
35
+ Object.defineProperty(this, "llm", {
36
+ enumerable: true,
37
+ configurable: true,
38
+ writable: true,
39
+ value: void 0
40
+ });
41
+ this.llm = fields.llm;
42
+ this.humanPrefix = fields?.humanPrefix ?? this.humanPrefix;
43
+ this.aiPrefix = fields?.aiPrefix ?? this.aiPrefix;
44
+ this.memoryKey = fields?.memoryKey ?? this.memoryKey;
45
+ this.maxTokenLimit = fields?.maxTokenLimit ?? this.maxTokenLimit;
46
+ }
47
+ get memoryKeys() {
48
+ return [this.memoryKey];
49
+ }
50
+ /**
51
+ * Loads the memory variables. It takes an `InputValues` object as a
52
+ * parameter and returns a `Promise` that resolves with a
53
+ * `MemoryVariables` object.
54
+ * @param _values `InputValues` object.
55
+ * @returns A `Promise` that resolves with a `MemoryVariables` object.
56
+ */
57
+ async loadMemoryVariables(_values) {
58
+ const messages = await this.chatHistory.getMessages();
59
+ if (this.returnMessages) {
60
+ const result = {
61
+ [this.memoryKey]: messages,
62
+ };
63
+ return result;
64
+ }
65
+ const result = {
66
+ [this.memoryKey]: getBufferString(messages, this.humanPrefix, this.aiPrefix),
67
+ };
68
+ return result;
69
+ }
70
+ /**
71
+ * Saves the context from this conversation to buffer. If the amount
72
+ * of tokens required to save the buffer exceeds MAX_TOKEN_LIMIT,
73
+ * prune it.
74
+ */
75
+ async saveContext(inputValues, outputValues) {
76
+ await super.saveContext(inputValues, outputValues);
77
+ // Prune buffer if it exceeds the max token limit set for this instance.
78
+ const buffer = await this.chatHistory.getMessages();
79
+ let currBufferLength = await this.llm.getNumTokens(getBufferString(buffer, this.humanPrefix, this.aiPrefix));
80
+ if (currBufferLength > this.maxTokenLimit) {
81
+ const prunedMemory = [];
82
+ while (currBufferLength > this.maxTokenLimit) {
83
+ prunedMemory.push(buffer.shift());
84
+ currBufferLength = await this.llm.getNumTokens(getBufferString(buffer, this.humanPrefix, this.aiPrefix));
85
+ }
86
+ }
87
+ }
88
+ }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ConversationSummaryBufferMemory = exports.CombinedMemory = exports.ENTITY_MEMORY_CONVERSATION_TEMPLATE = exports.EntityMemory = exports.VectorStoreRetrieverMemory = exports.MotorheadMemory = exports.ChatMessageHistory = exports.BaseChatMemory = exports.BufferWindowMemory = exports.BaseConversationSummaryMemory = exports.ConversationSummaryMemory = exports.getBufferString = exports.getOutputValue = exports.getInputValue = exports.BaseMemory = exports.BufferMemory = void 0;
3
+ exports.ConversationTokenBufferMemory = exports.ConversationSummaryBufferMemory = exports.CombinedMemory = exports.ENTITY_MEMORY_CONVERSATION_TEMPLATE = exports.EntityMemory = exports.VectorStoreRetrieverMemory = exports.MotorheadMemory = exports.ChatMessageHistory = exports.BaseChatMemory = exports.BufferWindowMemory = exports.BaseConversationSummaryMemory = exports.ConversationSummaryMemory = exports.getBufferString = exports.getOutputValue = exports.getInputValue = exports.BaseMemory = exports.BufferMemory = void 0;
4
4
  var buffer_memory_js_1 = require("./buffer_memory.cjs");
5
5
  Object.defineProperty(exports, "BufferMemory", { enumerable: true, get: function () { return buffer_memory_js_1.BufferMemory; } });
6
6
  var base_js_1 = require("./base.cjs");
@@ -29,3 +29,5 @@ var combined_memory_js_1 = require("./combined_memory.cjs");
29
29
  Object.defineProperty(exports, "CombinedMemory", { enumerable: true, get: function () { return combined_memory_js_1.CombinedMemory; } });
30
30
  var summary_buffer_js_1 = require("./summary_buffer.cjs");
31
31
  Object.defineProperty(exports, "ConversationSummaryBufferMemory", { enumerable: true, get: function () { return summary_buffer_js_1.ConversationSummaryBufferMemory; } });
32
+ var buffer_token_memory_js_1 = require("./buffer_token_memory.cjs");
33
+ Object.defineProperty(exports, "ConversationTokenBufferMemory", { enumerable: true, get: function () { return buffer_token_memory_js_1.ConversationTokenBufferMemory; } });
@@ -10,3 +10,4 @@ export { EntityMemory } from "./entity_memory.js";
10
10
  export { ENTITY_MEMORY_CONVERSATION_TEMPLATE } from "./prompt.js";
11
11
  export { type CombinedMemoryInput, CombinedMemory } from "./combined_memory.js";
12
12
  export { ConversationSummaryBufferMemory, type ConversationSummaryBufferMemoryInput, } from "./summary_buffer.js";
13
+ export { ConversationTokenBufferMemory, type ConversationTokenBufferMemoryInput, } from "./buffer_token_memory.js";
@@ -10,3 +10,4 @@ export { EntityMemory } from "./entity_memory.js";
10
10
  export { ENTITY_MEMORY_CONVERSATION_TEMPLATE } from "./prompt.js";
11
11
  export { CombinedMemory } from "./combined_memory.js";
12
12
  export { ConversationSummaryBufferMemory, } from "./summary_buffer.js";
13
+ export { ConversationTokenBufferMemory, } from "./buffer_token_memory.js";
@@ -12,7 +12,7 @@ const index_js_2 = require("../schema/runnable/index.cjs");
12
12
  */
13
13
  class StringPromptValue extends index_js_1.BasePromptValue {
14
14
  constructor(value) {
15
- super(...arguments);
15
+ super({ value });
16
16
  Object.defineProperty(this, "lc_namespace", {
17
17
  enumerable: true,
18
18
  configurable: true,
@@ -9,7 +9,7 @@ import { Runnable } from "../schema/runnable/index.js";
9
9
  */
10
10
  export class StringPromptValue extends BasePromptValue {
11
11
  constructor(value) {
12
- super(...arguments);
12
+ super({ value });
13
13
  Object.defineProperty(this, "lc_namespace", {
14
14
  enumerable: true,
15
15
  configurable: true,
@@ -396,8 +396,8 @@ class ToolMessage extends BaseMessage {
396
396
  }
397
397
  exports.ToolMessage = ToolMessage;
398
398
  /**
399
- * Represents a chunk of a function message, which can be concatenated
400
- * with other function message chunks.
399
+ * Represents a chunk of a tool message, which can be concatenated
400
+ * with other tool message chunks.
401
401
  */
402
402
  class ToolMessageChunk extends BaseMessageChunk {
403
403
  constructor(fields) {
@@ -232,8 +232,8 @@ export declare class ToolMessage extends BaseMessage {
232
232
  _getType(): MessageType;
233
233
  }
234
234
  /**
235
- * Represents a chunk of a function message, which can be concatenated
236
- * with other function message chunks.
235
+ * Represents a chunk of a tool message, which can be concatenated
236
+ * with other tool message chunks.
237
237
  */
238
238
  export declare class ToolMessageChunk extends BaseMessageChunk {
239
239
  tool_call_id: string;
@@ -381,8 +381,8 @@ export class ToolMessage extends BaseMessage {
381
381
  }
382
382
  }
383
383
  /**
384
- * Represents a chunk of a function message, which can be concatenated
385
- * with other function message chunks.
384
+ * Represents a chunk of a tool message, which can be concatenated
385
+ * with other tool message chunks.
386
386
  */
387
387
  export class ToolMessageChunk extends BaseMessageChunk {
388
388
  constructor(fields) {
@@ -29,20 +29,18 @@ async function* createOllamaStream(baseUrl, params, options) {
29
29
  }
30
30
  const stream = stream_js_1.IterableReadableStream.fromReadableStream(response.body);
31
31
  const decoder = new TextDecoder();
32
+ let extra = "";
32
33
  for await (const chunk of stream) {
33
- try {
34
- if (chunk !== undefined) {
35
- const lines = decoder
36
- .decode(chunk)
37
- .split("\n")
38
- .filter((v) => v.length);
39
- for (const line of lines) {
40
- yield JSON.parse(line);
41
- }
34
+ const decoded = extra + decoder.decode(chunk);
35
+ const lines = decoded.split("\n");
36
+ extra = lines.pop() || "";
37
+ for (const line of lines) {
38
+ try {
39
+ yield JSON.parse(line);
40
+ }
41
+ catch (e) {
42
+ console.warn(`Received a non-JSON parseable chunk: ${line}`);
42
43
  }
43
- }
44
- catch (e) {
45
- console.warn(`Received a non-JSON parseable chunk: ${decoder.decode(chunk)}`);
46
44
  }
47
45
  }
48
46
  }
@@ -26,20 +26,18 @@ export async function* createOllamaStream(baseUrl, params, options) {
26
26
  }
27
27
  const stream = IterableReadableStream.fromReadableStream(response.body);
28
28
  const decoder = new TextDecoder();
29
+ let extra = "";
29
30
  for await (const chunk of stream) {
30
- try {
31
- if (chunk !== undefined) {
32
- const lines = decoder
33
- .decode(chunk)
34
- .split("\n")
35
- .filter((v) => v.length);
36
- for (const line of lines) {
37
- yield JSON.parse(line);
38
- }
31
+ const decoded = extra + decoder.decode(chunk);
32
+ const lines = decoded.split("\n");
33
+ extra = lines.pop() || "";
34
+ for (const line of lines) {
35
+ try {
36
+ yield JSON.parse(line);
37
+ }
38
+ catch (e) {
39
+ console.warn(`Received a non-JSON parseable chunk: ${line}`);
39
40
  }
40
- }
41
- catch (e) {
42
- console.warn(`Received a non-JSON parseable chunk: ${decoder.decode(chunk)}`);
43
41
  }
44
42
  }
45
43
  }
@@ -0,0 +1 @@
1
+ module.exports = require('../../dist/experimental/chat_models/ollama_functions.cjs');
@@ -0,0 +1 @@
1
+ export * from '../../dist/experimental/chat_models/ollama_functions.js'
@@ -0,0 +1 @@
1
+ export * from '../../dist/experimental/chat_models/ollama_functions.js'
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.0.186",
3
+ "version": "0.0.187",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {
@@ -778,6 +778,9 @@
778
778
  "experimental/chat_models/bittensor.cjs",
779
779
  "experimental/chat_models/bittensor.js",
780
780
  "experimental/chat_models/bittensor.d.ts",
781
+ "experimental/chat_models/ollama_functions.cjs",
782
+ "experimental/chat_models/ollama_functions.js",
783
+ "experimental/chat_models/ollama_functions.d.ts",
781
784
  "experimental/llms/bittensor.cjs",
782
785
  "experimental/llms/bittensor.js",
783
786
  "experimental/llms/bittensor.d.ts",
@@ -806,7 +809,8 @@
806
809
  "build:esm": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist/ && rimraf dist/tests dist/**/tests",
807
810
  "build:cjs": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist-cjs/ -p tsconfig.cjs.json && node scripts/move-cjs-to-dist.js && rimraf dist-cjs",
808
811
  "build:watch": "node scripts/create-entrypoints.js && tsc --outDir dist/ --watch",
809
- "build:scripts": "node scripts/create-entrypoints.js && node scripts/check-tree-shaking.js && node scripts/generate-docs-llm-compatibility-table",
812
+ "build:scripts": "node scripts/create-entrypoints.js && node scripts/check-tree-shaking.js && yarn conditional:api_refs && node scripts/generate-docs-llm-compatibility-table",
813
+ "conditional:api_refs": "bash scripts/build-api-refs.sh",
810
814
  "lint": "NODE_OPTIONS=--max-old-space-size=4096 eslint src && dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
811
815
  "lint:fix": "yarn lint --fix",
812
816
  "precommit": "lint-staged",
@@ -923,7 +927,7 @@
923
927
  "jest": "^29.5.0",
924
928
  "jest-environment-node": "^29.6.4",
925
929
  "jsdom": "^22.1.0",
926
- "llmonitor": "^0.5.8",
930
+ "llmonitor": "^0.5.9",
927
931
  "lodash": "^4.17.21",
928
932
  "mammoth": "^1.5.1",
929
933
  "ml-matrix": "^6.10.4",
@@ -1029,7 +1033,7 @@
1029
1033
  "ignore": "^5.2.0",
1030
1034
  "ioredis": "^5.3.2",
1031
1035
  "jsdom": "*",
1032
- "llmonitor": "^0.5.8",
1036
+ "llmonitor": "^0.5.9",
1033
1037
  "lodash": "^4.17.21",
1034
1038
  "mammoth": "*",
1035
1039
  "mongodb": "^5.2.0",
@@ -2674,6 +2678,11 @@
2674
2678
  "import": "./experimental/chat_models/bittensor.js",
2675
2679
  "require": "./experimental/chat_models/bittensor.cjs"
2676
2680
  },
2681
+ "./experimental/chat_models/ollama_functions": {
2682
+ "types": "./experimental/chat_models/ollama_functions.d.ts",
2683
+ "import": "./experimental/chat_models/ollama_functions.js",
2684
+ "require": "./experimental/chat_models/ollama_functions.cjs"
2685
+ },
2677
2686
  "./experimental/llms/bittensor": {
2678
2687
  "types": "./experimental/llms/bittensor.d.ts",
2679
2688
  "import": "./experimental/llms/bittensor.js",