langchain 0.0.185 → 0.0.187

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/dist/agents/openai/index.cjs +2 -1
  2. package/dist/agents/openai/index.js +2 -1
  3. package/dist/callbacks/handlers/llmonitor.cjs +31 -17
  4. package/dist/callbacks/handlers/llmonitor.js +31 -17
  5. package/dist/chains/combine_docs_chain.cjs +1 -1
  6. package/dist/chains/combine_docs_chain.js +1 -1
  7. package/dist/chains/llm_chain.cjs +52 -7
  8. package/dist/chains/llm_chain.d.ts +20 -12
  9. package/dist/chains/llm_chain.js +53 -8
  10. package/dist/chat_models/ollama.cjs +8 -0
  11. package/dist/chat_models/ollama.d.ts +3 -0
  12. package/dist/chat_models/ollama.js +8 -0
  13. package/dist/chat_models/openai.cjs +3 -0
  14. package/dist/chat_models/openai.js +3 -0
  15. package/dist/document_loaders/fs/unstructured.d.ts +1 -5
  16. package/dist/embeddings/ollama.d.ts +1 -1
  17. package/dist/experimental/chat_models/ollama_functions.cjs +140 -0
  18. package/dist/experimental/chat_models/ollama_functions.d.ts +76 -0
  19. package/dist/experimental/chat_models/ollama_functions.js +136 -0
  20. package/dist/llms/ollama.cjs +8 -0
  21. package/dist/llms/ollama.d.ts +3 -0
  22. package/dist/llms/ollama.js +8 -0
  23. package/dist/llms/openai.cjs +1 -1
  24. package/dist/llms/openai.js +1 -1
  25. package/dist/load/import_map.cjs +3 -1
  26. package/dist/load/import_map.d.ts +1 -0
  27. package/dist/load/import_map.js +1 -0
  28. package/dist/memory/buffer_token_memory.cjs +92 -0
  29. package/dist/memory/buffer_token_memory.d.ts +41 -0
  30. package/dist/memory/buffer_token_memory.js +88 -0
  31. package/dist/memory/index.cjs +3 -1
  32. package/dist/memory/index.d.ts +1 -0
  33. package/dist/memory/index.js +1 -0
  34. package/dist/output_parsers/index.cjs +3 -1
  35. package/dist/output_parsers/index.d.ts +1 -0
  36. package/dist/output_parsers/index.js +1 -0
  37. package/dist/output_parsers/openai_functions.cjs +3 -3
  38. package/dist/output_parsers/openai_functions.js +3 -3
  39. package/dist/output_parsers/openai_tools.cjs +53 -0
  40. package/dist/output_parsers/openai_tools.d.ts +22 -0
  41. package/dist/output_parsers/openai_tools.js +49 -0
  42. package/dist/prompts/base.cjs +1 -1
  43. package/dist/prompts/base.d.ts +2 -1
  44. package/dist/prompts/base.js +1 -1
  45. package/dist/prompts/chat.cjs +1 -1
  46. package/dist/prompts/chat.js +1 -1
  47. package/dist/schema/index.cjs +2 -2
  48. package/dist/schema/index.d.ts +4 -6
  49. package/dist/schema/index.js +2 -2
  50. package/dist/schema/runnable/base.d.ts +2 -2
  51. package/dist/util/ollama.cjs +10 -12
  52. package/dist/util/ollama.d.ts +3 -0
  53. package/dist/util/ollama.js +10 -12
  54. package/dist/util/types.cjs +5 -0
  55. package/dist/util/types.d.ts +4 -0
  56. package/dist/util/types.js +4 -0
  57. package/experimental/chat_models/ollama_functions.cjs +1 -0
  58. package/experimental/chat_models/ollama_functions.d.ts +1 -0
  59. package/experimental/chat_models/ollama_functions.js +1 -0
  60. package/package.json +13 -4
@@ -143,7 +143,8 @@ class OpenAIAgent extends agent_js_1.Agent {
143
143
  const valuesForLLM = {
144
144
  functions: this.tools.map(convert_to_openai_js_1.formatToOpenAIFunction),
145
145
  };
146
- for (const key of this.llmChain.llm.callKeys) {
146
+ const callKeys = "callKeys" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];
147
+ for (const key of callKeys) {
147
148
  if (key in inputs) {
148
149
  valuesForLLM[key] = inputs[key];
149
150
  delete valuesForPrompt[key];
@@ -139,7 +139,8 @@ export class OpenAIAgent extends Agent {
139
139
  const valuesForLLM = {
140
140
  functions: this.tools.map(formatToOpenAIFunction),
141
141
  };
142
- for (const key of this.llmChain.llm.callKeys) {
142
+ const callKeys = "callKeys" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];
143
+ for (const key of callKeys) {
143
144
  if (key in inputs) {
144
145
  valuesForLLM[key] = inputs[key];
145
146
  delete valuesForPrompt[key];
@@ -19,8 +19,19 @@ const parseRole = (id) => {
19
19
  return "ai";
20
20
  if (roleHint.includes("Function"))
21
21
  return "function";
22
+ if (roleHint.includes("Tool"))
23
+ return "tool";
22
24
  return "ai";
23
25
  };
26
+ const PARAMS_TO_CAPTURE = [
27
+ "stop",
28
+ "stop_sequences",
29
+ "function_call",
30
+ "functions",
31
+ "tools",
32
+ "tool_choice",
33
+ "response_format",
34
+ ];
24
35
  const convertToLLMonitorMessages = (input) => {
25
36
  const parseMessage = (raw) => {
26
37
  if (typeof raw === "string")
@@ -35,11 +46,10 @@ const convertToLLMonitorMessages = (input) => {
35
46
  const role = parseRole(message.id);
36
47
  const obj = message.kwargs;
37
48
  const text = message.text ?? obj.content;
38
- const functionCall = obj.additional_kwargs?.function_call;
39
49
  return {
40
50
  role,
41
51
  text,
42
- functionCall,
52
+ ...(obj.additional_kwargs ?? {}),
43
53
  };
44
54
  }
45
55
  catch (e) {
@@ -83,6 +93,21 @@ const parseOutput = (rawOutput) => {
83
93
  return result;
84
94
  return rawOutput;
85
95
  };
96
+ const parseExtraAndName = (llm, extraParams, metadata) => {
97
+ const params = {
98
+ ...(extraParams?.invocation_params ?? {}),
99
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
100
+ // @ts-ignore this is a valid property
101
+ ...(llm?.kwargs ?? {}),
102
+ ...(metadata || {}),
103
+ };
104
+ const { model, model_name, modelName, model_id, userId, userProps, ...rest } = params;
105
+ const name = model || modelName || model_name || model_id || llm.id.at(-1);
106
+ // Filter rest to only include params we want to capture
107
+ const extra = Object.fromEntries(Object.entries(rest).filter(([key]) => PARAMS_TO_CAPTURE.includes(key) ||
108
+ ["string", "number", "boolean"].includes(typeof rest[key])));
109
+ return { name, extra, userId, userProps };
110
+ };
86
111
  class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
87
112
  constructor(fields = {}) {
88
113
  super(fields);
@@ -109,18 +134,13 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
109
134
  }
110
135
  }
111
136
  async handleLLMStart(llm, prompts, runId, parentRunId, extraParams, tags, metadata) {
112
- const params = {
113
- ...(extraParams?.invocation_params || {}),
114
- ...(metadata || {}),
115
- };
116
- const { model, model_name, modelName, userId, userProps, ...rest } = params;
117
- const name = model || modelName || model_name || llm.id.at(-1);
137
+ const { name, extra, userId, userProps } = parseExtraAndName(llm, extraParams, metadata);
118
138
  await this.monitor.trackEvent("llm", "start", {
119
139
  runId,
120
140
  parentRunId,
121
141
  name,
122
142
  input: (0, exports.convertToLLMonitorMessages)(prompts),
123
- extra: rest,
143
+ extra,
124
144
  userId,
125
145
  userProps,
126
146
  tags,
@@ -128,19 +148,13 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
128
148
  });
129
149
  }
130
150
  async handleChatModelStart(llm, messages, runId, parentRunId, extraParams, tags, metadata) {
131
- const params = {
132
- ...(extraParams?.invocation_params || {}),
133
- ...(metadata || {}),
134
- };
135
- // Expand them so they're excluded from the "extra" field
136
- const { model, model_name, modelName, userId, userProps, ...rest } = params;
137
- const name = model || modelName || model_name || llm.id.at(-1);
151
+ const { name, extra, userId, userProps } = parseExtraAndName(llm, extraParams, metadata);
138
152
  await this.monitor.trackEvent("llm", "start", {
139
153
  runId,
140
154
  parentRunId,
141
155
  name,
142
156
  input: (0, exports.convertToLLMonitorMessages)(messages),
143
- extra: rest,
157
+ extra,
144
158
  userId,
145
159
  userProps,
146
160
  tags,
@@ -13,8 +13,19 @@ const parseRole = (id) => {
13
13
  return "ai";
14
14
  if (roleHint.includes("Function"))
15
15
  return "function";
16
+ if (roleHint.includes("Tool"))
17
+ return "tool";
16
18
  return "ai";
17
19
  };
20
+ const PARAMS_TO_CAPTURE = [
21
+ "stop",
22
+ "stop_sequences",
23
+ "function_call",
24
+ "functions",
25
+ "tools",
26
+ "tool_choice",
27
+ "response_format",
28
+ ];
18
29
  export const convertToLLMonitorMessages = (input) => {
19
30
  const parseMessage = (raw) => {
20
31
  if (typeof raw === "string")
@@ -29,11 +40,10 @@ export const convertToLLMonitorMessages = (input) => {
29
40
  const role = parseRole(message.id);
30
41
  const obj = message.kwargs;
31
42
  const text = message.text ?? obj.content;
32
- const functionCall = obj.additional_kwargs?.function_call;
33
43
  return {
34
44
  role,
35
45
  text,
36
- functionCall,
46
+ ...(obj.additional_kwargs ?? {}),
37
47
  };
38
48
  }
39
49
  catch (e) {
@@ -76,6 +86,21 @@ const parseOutput = (rawOutput) => {
76
86
  return result;
77
87
  return rawOutput;
78
88
  };
89
+ const parseExtraAndName = (llm, extraParams, metadata) => {
90
+ const params = {
91
+ ...(extraParams?.invocation_params ?? {}),
92
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
93
+ // @ts-ignore this is a valid property
94
+ ...(llm?.kwargs ?? {}),
95
+ ...(metadata || {}),
96
+ };
97
+ const { model, model_name, modelName, model_id, userId, userProps, ...rest } = params;
98
+ const name = model || modelName || model_name || model_id || llm.id.at(-1);
99
+ // Filter rest to only include params we want to capture
100
+ const extra = Object.fromEntries(Object.entries(rest).filter(([key]) => PARAMS_TO_CAPTURE.includes(key) ||
101
+ ["string", "number", "boolean"].includes(typeof rest[key])));
102
+ return { name, extra, userId, userProps };
103
+ };
79
104
  export class LLMonitorHandler extends BaseCallbackHandler {
80
105
  constructor(fields = {}) {
81
106
  super(fields);
@@ -102,18 +127,13 @@ export class LLMonitorHandler extends BaseCallbackHandler {
102
127
  }
103
128
  }
104
129
  async handleLLMStart(llm, prompts, runId, parentRunId, extraParams, tags, metadata) {
105
- const params = {
106
- ...(extraParams?.invocation_params || {}),
107
- ...(metadata || {}),
108
- };
109
- const { model, model_name, modelName, userId, userProps, ...rest } = params;
110
- const name = model || modelName || model_name || llm.id.at(-1);
130
+ const { name, extra, userId, userProps } = parseExtraAndName(llm, extraParams, metadata);
111
131
  await this.monitor.trackEvent("llm", "start", {
112
132
  runId,
113
133
  parentRunId,
114
134
  name,
115
135
  input: convertToLLMonitorMessages(prompts),
116
- extra: rest,
136
+ extra,
117
137
  userId,
118
138
  userProps,
119
139
  tags,
@@ -121,19 +141,13 @@ export class LLMonitorHandler extends BaseCallbackHandler {
121
141
  });
122
142
  }
123
143
  async handleChatModelStart(llm, messages, runId, parentRunId, extraParams, tags, metadata) {
124
- const params = {
125
- ...(extraParams?.invocation_params || {}),
126
- ...(metadata || {}),
127
- };
128
- // Expand them so they're excluded from the "extra" field
129
- const { model, model_name, modelName, userId, userProps, ...rest } = params;
130
- const name = model || modelName || model_name || llm.id.at(-1);
144
+ const { name, extra, userId, userProps } = parseExtraAndName(llm, extraParams, metadata);
131
145
  await this.monitor.trackEvent("llm", "start", {
132
146
  runId,
133
147
  parentRunId,
134
148
  name,
135
149
  input: convertToLLMonitorMessages(messages),
136
- extra: rest,
150
+ extra,
137
151
  userId,
138
152
  userProps,
139
153
  tags,
@@ -177,7 +177,7 @@ class MapReduceDocumentsChain extends base_js_1.BaseChain {
177
177
  [this.combineDocumentChain.inputKey]: currentDocs,
178
178
  ...rest,
179
179
  }));
180
- const length = await this.combineDocumentChain.llmChain.llm.getNumTokens(formatted);
180
+ const length = await this.combineDocumentChain.llmChain._getNumTokens(formatted);
181
181
  const withinTokenLimit = length < this.maxTokens;
182
182
  // If we can skip the map step, and we're within the token limit, we don't
183
183
  // need to run the map step, so just break out of the loop.
@@ -173,7 +173,7 @@ export class MapReduceDocumentsChain extends BaseChain {
173
173
  [this.combineDocumentChain.inputKey]: currentDocs,
174
174
  ...rest,
175
175
  }));
176
- const length = await this.combineDocumentChain.llmChain.llm.getNumTokens(formatted);
176
+ const length = await this.combineDocumentChain.llmChain._getNumTokens(formatted);
177
177
  const withinTokenLimit = length < this.maxTokens;
178
178
  // If we can skip the map step, and we're within the token limit, we don't
179
179
  // need to run the map step, so just break out of the loop.
@@ -5,6 +5,29 @@ const base_js_1 = require("./base.cjs");
5
5
  const base_js_2 = require("../prompts/base.cjs");
6
6
  const index_js_1 = require("../base_language/index.cjs");
7
7
  const noop_js_1 = require("../output_parsers/noop.cjs");
8
+ const base_js_3 = require("../schema/runnable/base.cjs");
9
+ function isBaseLanguageModel(llmLike) {
10
+ return typeof llmLike._llmType === "function";
11
+ }
12
+ function _getLanguageModel(llmLike) {
13
+ if (isBaseLanguageModel(llmLike)) {
14
+ return llmLike;
15
+ }
16
+ else if ("bound" in llmLike && base_js_3.Runnable.isRunnable(llmLike.bound)) {
17
+ return _getLanguageModel(llmLike.bound);
18
+ }
19
+ else if ("runnable" in llmLike &&
20
+ "fallbacks" in llmLike &&
21
+ base_js_3.Runnable.isRunnable(llmLike.runnable)) {
22
+ return _getLanguageModel(llmLike.runnable);
23
+ }
24
+ else if ("default" in llmLike && base_js_3.Runnable.isRunnable(llmLike.default)) {
25
+ return _getLanguageModel(llmLike.default);
26
+ }
27
+ else {
28
+ throw new Error("Unable to extract BaseLanguageModel from llmLike object.");
29
+ }
30
+ }
8
31
  /**
9
32
  * Chain to run queries against LLMs.
10
33
  *
@@ -79,10 +102,15 @@ class LLMChain extends base_js_1.BaseChain {
79
102
  this.outputParser = this.prompt.outputParser;
80
103
  }
81
104
  }
105
+ getCallKeys() {
106
+ const callKeys = "callKeys" in this.llm ? this.llm.callKeys : [];
107
+ return callKeys;
108
+ }
82
109
  /** @ignore */
83
110
  _selectMemoryInputs(values) {
84
111
  const valuesForMemory = super._selectMemoryInputs(values);
85
- for (const key of this.llm.callKeys) {
112
+ const callKeys = this.getCallKeys();
113
+ for (const key of callKeys) {
86
114
  if (key in values) {
87
115
  delete valuesForMemory[key];
88
116
  }
@@ -114,16 +142,29 @@ class LLMChain extends base_js_1.BaseChain {
114
142
  const valuesForLLM = {
115
143
  ...this.llmKwargs,
116
144
  };
117
- for (const key of this.llm.callKeys) {
145
+ const callKeys = this.getCallKeys();
146
+ for (const key of callKeys) {
118
147
  if (key in values) {
119
- valuesForLLM[key] = values[key];
120
- delete valuesForPrompt[key];
148
+ if (valuesForLLM) {
149
+ valuesForLLM[key] =
150
+ values[key];
151
+ delete valuesForPrompt[key];
152
+ }
121
153
  }
122
154
  }
123
155
  const promptValue = await this.prompt.formatPromptValue(valuesForPrompt);
124
- const { generations } = await this.llm.generatePrompt([promptValue], valuesForLLM, runManager?.getChild());
156
+ if ("generatePrompt" in this.llm) {
157
+ const { generations } = await this.llm.generatePrompt([promptValue], valuesForLLM, runManager?.getChild());
158
+ return {
159
+ [this.outputKey]: await this._getFinalOutput(generations[0], promptValue, runManager),
160
+ };
161
+ }
162
+ const modelWithParser = this.outputParser
163
+ ? this.llm.pipe(this.outputParser)
164
+ : this.llm;
165
+ const response = await modelWithParser.invoke(promptValue, runManager?.getChild());
125
166
  return {
126
- [this.outputKey]: await this._getFinalOutput(generations[0], promptValue, runManager),
167
+ [this.outputKey]: response,
127
168
  };
128
169
  }
129
170
  /**
@@ -160,11 +201,15 @@ class LLMChain extends base_js_1.BaseChain {
160
201
  }
161
202
  /** @deprecated */
162
203
  serialize() {
204
+ const serialize = "serialize" in this.llm ? this.llm.serialize() : undefined;
163
205
  return {
164
206
  _type: `${this._chainType()}_chain`,
165
- llm: this.llm.serialize(),
207
+ llm: serialize,
166
208
  prompt: this.prompt.serialize(),
167
209
  };
168
210
  }
211
+ _getNumTokens(text) {
212
+ return _getLanguageModel(this.llm).getNumTokens(text);
213
+ }
169
214
  }
170
215
  exports.LLMChain = LLMChain;
@@ -1,21 +1,26 @@
1
1
  import { BaseChain, ChainInputs } from "./base.js";
2
2
  import { BasePromptTemplate } from "../prompts/base.js";
3
- import { BaseLanguageModel } from "../base_language/index.js";
4
- import { ChainValues, Generation, BasePromptValue } from "../schema/index.js";
3
+ import { BaseLanguageModel, BaseLanguageModelInput } from "../base_language/index.js";
4
+ import { ChainValues, Generation, BasePromptValue, BaseMessage } from "../schema/index.js";
5
5
  import { BaseLLMOutputParser } from "../schema/output_parser.js";
6
6
  import { SerializedLLMChain } from "./serde.js";
7
7
  import { CallbackManager } from "../callbacks/index.js";
8
8
  import { BaseCallbackConfig, CallbackManagerForChainRun, Callbacks } from "../callbacks/manager.js";
9
+ import { Runnable } from "../schema/runnable/base.js";
10
+ type LLMType = BaseLanguageModel | Runnable<BaseLanguageModelInput, string> | Runnable<BaseLanguageModelInput, BaseMessage>;
11
+ type CallOptionsIfAvailable<T> = T extends {
12
+ CallOptions: infer CO;
13
+ } ? CO : any;
9
14
  /**
10
15
  * Interface for the input parameters of the LLMChain class.
11
16
  */
12
- export interface LLMChainInput<T extends string | object = string, L extends BaseLanguageModel = BaseLanguageModel> extends ChainInputs {
17
+ export interface LLMChainInput<T extends string | object = string, Model extends LLMType = LLMType> extends ChainInputs {
13
18
  /** Prompt object to use */
14
19
  prompt: BasePromptTemplate;
15
20
  /** LLM Wrapper to use */
16
- llm: L;
21
+ llm: Model;
17
22
  /** Kwargs to pass to LLM */
18
- llmKwargs?: this["llm"]["CallOptions"];
23
+ llmKwargs?: CallOptionsIfAvailable<Model>;
19
24
  /** OutputParser to use */
20
25
  outputParser?: BaseLLMOutputParser<T>;
21
26
  /** Key to use for output, defaults to `text` */
@@ -34,17 +39,18 @@ export interface LLMChainInput<T extends string | object = string, L extends Bas
34
39
  * const llm = new LLMChain({ llm: new OpenAI(), prompt });
35
40
  * ```
36
41
  */
37
- export declare class LLMChain<T extends string | object = string, L extends BaseLanguageModel = BaseLanguageModel> extends BaseChain implements LLMChainInput<T> {
42
+ export declare class LLMChain<T extends string | object = string, Model extends LLMType = LLMType> extends BaseChain implements LLMChainInput<T> {
38
43
  static lc_name(): string;
39
44
  lc_serializable: boolean;
40
45
  prompt: BasePromptTemplate;
41
- llm: L;
42
- llmKwargs?: this["llm"]["CallOptions"];
46
+ llm: Model;
47
+ llmKwargs?: CallOptionsIfAvailable<Model>;
43
48
  outputKey: string;
44
49
  outputParser?: BaseLLMOutputParser<T>;
45
50
  get inputKeys(): string[];
46
51
  get outputKeys(): string[];
47
- constructor(fields: LLMChainInput<T, L>);
52
+ constructor(fields: LLMChainInput<T, Model>);
53
+ private getCallKeys;
48
54
  /** @ignore */
49
55
  _selectMemoryInputs(values: ChainValues): ChainValues;
50
56
  /** @ignore */
@@ -54,9 +60,9 @@ export declare class LLMChain<T extends string | object = string, L extends Base
54
60
  *
55
61
  * Wraps _call and handles memory.
56
62
  */
57
- call(values: ChainValues & this["llm"]["CallOptions"], config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;
63
+ call(values: ChainValues & CallOptionsIfAvailable<Model>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;
58
64
  /** @ignore */
59
- _call(values: ChainValues & this["llm"]["CallOptions"], runManager?: CallbackManagerForChainRun): Promise<ChainValues>;
65
+ _call(values: ChainValues & CallOptionsIfAvailable<Model>, runManager?: CallbackManagerForChainRun): Promise<ChainValues>;
60
66
  /**
61
67
  * Format prompt with values and pass to LLM
62
68
  *
@@ -69,9 +75,11 @@ export declare class LLMChain<T extends string | object = string, L extends Base
69
75
  * llm.predict({ adjective: "funny" })
70
76
  * ```
71
77
  */
72
- predict(values: ChainValues & this["llm"]["CallOptions"], callbackManager?: CallbackManager): Promise<T>;
78
+ predict(values: ChainValues & CallOptionsIfAvailable<Model>, callbackManager?: CallbackManager): Promise<T>;
73
79
  _chainType(): "llm";
74
80
  static deserialize(data: SerializedLLMChain): Promise<LLMChain<string, BaseLanguageModel<any, import("../base_language/index.js").BaseLanguageModelCallOptions>>>;
75
81
  /** @deprecated */
76
82
  serialize(): SerializedLLMChain;
83
+ _getNumTokens(text: string): Promise<number>;
77
84
  }
85
+ export {};
@@ -1,7 +1,30 @@
1
1
  import { BaseChain } from "./base.js";
2
2
  import { BasePromptTemplate } from "../prompts/base.js";
3
- import { BaseLanguageModel } from "../base_language/index.js";
3
+ import { BaseLanguageModel, } from "../base_language/index.js";
4
4
  import { NoOpOutputParser } from "../output_parsers/noop.js";
5
+ import { Runnable } from "../schema/runnable/base.js";
6
+ function isBaseLanguageModel(llmLike) {
7
+ return typeof llmLike._llmType === "function";
8
+ }
9
+ function _getLanguageModel(llmLike) {
10
+ if (isBaseLanguageModel(llmLike)) {
11
+ return llmLike;
12
+ }
13
+ else if ("bound" in llmLike && Runnable.isRunnable(llmLike.bound)) {
14
+ return _getLanguageModel(llmLike.bound);
15
+ }
16
+ else if ("runnable" in llmLike &&
17
+ "fallbacks" in llmLike &&
18
+ Runnable.isRunnable(llmLike.runnable)) {
19
+ return _getLanguageModel(llmLike.runnable);
20
+ }
21
+ else if ("default" in llmLike && Runnable.isRunnable(llmLike.default)) {
22
+ return _getLanguageModel(llmLike.default);
23
+ }
24
+ else {
25
+ throw new Error("Unable to extract BaseLanguageModel from llmLike object.");
26
+ }
27
+ }
5
28
  /**
6
29
  * Chain to run queries against LLMs.
7
30
  *
@@ -76,10 +99,15 @@ export class LLMChain extends BaseChain {
76
99
  this.outputParser = this.prompt.outputParser;
77
100
  }
78
101
  }
102
+ getCallKeys() {
103
+ const callKeys = "callKeys" in this.llm ? this.llm.callKeys : [];
104
+ return callKeys;
105
+ }
79
106
  /** @ignore */
80
107
  _selectMemoryInputs(values) {
81
108
  const valuesForMemory = super._selectMemoryInputs(values);
82
- for (const key of this.llm.callKeys) {
109
+ const callKeys = this.getCallKeys();
110
+ for (const key of callKeys) {
83
111
  if (key in values) {
84
112
  delete valuesForMemory[key];
85
113
  }
@@ -111,16 +139,29 @@ export class LLMChain extends BaseChain {
111
139
  const valuesForLLM = {
112
140
  ...this.llmKwargs,
113
141
  };
114
- for (const key of this.llm.callKeys) {
142
+ const callKeys = this.getCallKeys();
143
+ for (const key of callKeys) {
115
144
  if (key in values) {
116
- valuesForLLM[key] = values[key];
117
- delete valuesForPrompt[key];
145
+ if (valuesForLLM) {
146
+ valuesForLLM[key] =
147
+ values[key];
148
+ delete valuesForPrompt[key];
149
+ }
118
150
  }
119
151
  }
120
152
  const promptValue = await this.prompt.formatPromptValue(valuesForPrompt);
121
- const { generations } = await this.llm.generatePrompt([promptValue], valuesForLLM, runManager?.getChild());
153
+ if ("generatePrompt" in this.llm) {
154
+ const { generations } = await this.llm.generatePrompt([promptValue], valuesForLLM, runManager?.getChild());
155
+ return {
156
+ [this.outputKey]: await this._getFinalOutput(generations[0], promptValue, runManager),
157
+ };
158
+ }
159
+ const modelWithParser = this.outputParser
160
+ ? this.llm.pipe(this.outputParser)
161
+ : this.llm;
162
+ const response = await modelWithParser.invoke(promptValue, runManager?.getChild());
122
163
  return {
123
- [this.outputKey]: await this._getFinalOutput(generations[0], promptValue, runManager),
164
+ [this.outputKey]: response,
124
165
  };
125
166
  }
126
167
  /**
@@ -157,10 +198,14 @@ export class LLMChain extends BaseChain {
157
198
  }
158
199
  /** @deprecated */
159
200
  serialize() {
201
+ const serialize = "serialize" in this.llm ? this.llm.serialize() : undefined;
160
202
  return {
161
203
  _type: `${this._chainType()}_chain`,
162
- llm: this.llm.serialize(),
204
+ llm: serialize,
163
205
  prompt: this.prompt.serialize(),
164
206
  };
165
207
  }
208
+ _getNumTokens(text) {
209
+ return _getLanguageModel(this.llm).getNumTokens(text);
210
+ }
166
211
  }
@@ -213,6 +213,12 @@ class ChatOllama extends base_js_1.SimpleChatModel {
213
213
  writable: true,
214
214
  value: void 0
215
215
  });
216
+ Object.defineProperty(this, "format", {
217
+ enumerable: true,
218
+ configurable: true,
219
+ writable: true,
220
+ value: void 0
221
+ });
216
222
  this.model = fields.model ?? this.model;
217
223
  this.baseUrl = fields.baseUrl?.endsWith("/")
218
224
  ? fields.baseUrl.slice(0, -1)
@@ -247,6 +253,7 @@ class ChatOllama extends base_js_1.SimpleChatModel {
247
253
  this.useMLock = fields.useMLock;
248
254
  this.useMMap = fields.useMMap;
249
255
  this.vocabOnly = fields.vocabOnly;
256
+ this.format = fields.format;
250
257
  }
251
258
  _llmType() {
252
259
  return "ollama";
@@ -260,6 +267,7 @@ class ChatOllama extends base_js_1.SimpleChatModel {
260
267
  invocationParams(options) {
261
268
  return {
262
269
  model: this.model,
270
+ format: this.format,
263
271
  options: {
264
272
  embedding_only: this.embeddingOnly,
265
273
  f16_kv: this.f16KV,
@@ -3,6 +3,7 @@ import { BaseLanguageModelCallOptions } from "../base_language/index.js";
3
3
  import { OllamaInput } from "../util/ollama.js";
4
4
  import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
5
5
  import { BaseMessage, ChatGenerationChunk } from "../schema/index.js";
6
+ import type { StringWithAutocomplete } from "../util/types.js";
6
7
  /**
7
8
  * An interface defining the options for an Ollama API call. It extends
8
9
  * the BaseLanguageModelCallOptions interface.
@@ -49,6 +50,7 @@ export declare class ChatOllama extends SimpleChatModel<OllamaCallOptions> imple
49
50
  useMLock?: boolean;
50
51
  useMMap?: boolean;
51
52
  vocabOnly?: boolean;
53
+ format?: StringWithAutocomplete<"json">;
52
54
  constructor(fields: OllamaInput & BaseChatModelParams);
53
55
  _llmType(): string;
54
56
  /**
@@ -59,6 +61,7 @@ export declare class ChatOllama extends SimpleChatModel<OllamaCallOptions> imple
59
61
  */
60
62
  invocationParams(options?: this["ParsedCallOptions"]): {
61
63
  model: string;
64
+ format: StringWithAutocomplete<"json"> | undefined;
62
65
  options: {
63
66
  embedding_only: boolean | undefined;
64
67
  f16_kv: boolean | undefined;
@@ -210,6 +210,12 @@ export class ChatOllama extends SimpleChatModel {
210
210
  writable: true,
211
211
  value: void 0
212
212
  });
213
+ Object.defineProperty(this, "format", {
214
+ enumerable: true,
215
+ configurable: true,
216
+ writable: true,
217
+ value: void 0
218
+ });
213
219
  this.model = fields.model ?? this.model;
214
220
  this.baseUrl = fields.baseUrl?.endsWith("/")
215
221
  ? fields.baseUrl.slice(0, -1)
@@ -244,6 +250,7 @@ export class ChatOllama extends SimpleChatModel {
244
250
  this.useMLock = fields.useMLock;
245
251
  this.useMMap = fields.useMMap;
246
252
  this.vocabOnly = fields.vocabOnly;
253
+ this.format = fields.format;
247
254
  }
248
255
  _llmType() {
249
256
  return "ollama";
@@ -257,6 +264,7 @@ export class ChatOllama extends SimpleChatModel {
257
264
  invocationParams(options) {
258
265
  return {
259
266
  model: this.model,
267
+ format: this.format,
260
268
  options: {
261
269
  embedding_only: this.embeddingOnly,
262
270
  f16_kv: this.f16KV,
@@ -421,6 +421,9 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
421
421
  continue;
422
422
  }
423
423
  const { delta } = choice;
424
+ if (!delta) {
425
+ continue;
426
+ }
424
427
  const chunk = _convertDeltaToMessageChunk(delta, defaultRole);
425
428
  defaultRole = delta.role ?? defaultRole;
426
429
  const newTokenIndices = {
@@ -418,6 +418,9 @@ export class ChatOpenAI extends BaseChatModel {
418
418
  continue;
419
419
  }
420
420
  const { delta } = choice;
421
+ if (!delta) {
422
+ continue;
423
+ }
421
424
  const chunk = _convertDeltaToMessageChunk(delta, defaultRole);
422
425
  defaultRole = delta.role ?? defaultRole;
423
426
  const newTokenIndices = {
@@ -5,6 +5,7 @@ import type { readFile as ReadFileT } from "node:fs/promises";
5
5
  import { DirectoryLoader, UnknownHandling } from "./directory.js";
6
6
  import { Document } from "../../document.js";
7
7
  import { BaseDocumentLoader } from "../base.js";
8
+ import type { StringWithAutocomplete } from "../../util/types.js";
8
9
  /**
9
10
  * Represents an element returned by the Unstructured API. It has
10
11
  * properties for the element type, text content, and metadata.
@@ -40,11 +41,6 @@ export type SkipInferTableTypes = "txt" | "text" | "pdf" | "docx" | "doc" | "jpg
40
41
  * Set the chunking_strategy to chunk text into larger or smaller elements. Defaults to None with optional arg of by_title
41
42
  */
42
43
  type ChunkingStrategy = "None" | "by_title";
43
- /**
44
- * Represents a string value with autocomplete suggestions. It is used for
45
- * the `strategy` property in the UnstructuredLoaderOptions.
46
- */
47
- type StringWithAutocomplete<T> = T | (string & Record<never, never>);
48
44
  export type UnstructuredLoaderOptions = {
49
45
  apiKey?: string;
50
46
  apiUrl?: string;
@@ -1,6 +1,6 @@
1
1
  import { OllamaInput, OllamaRequestParams } from "../util/ollama.js";
2
2
  import { Embeddings, EmbeddingsParams } from "./base.js";
3
- type CamelCasedRequestOptions = Omit<OllamaInput, "baseUrl" | "model">;
3
+ type CamelCasedRequestOptions = Omit<OllamaInput, "baseUrl" | "model" | "format">;
4
4
  /**
5
5
  * Interface for OllamaEmbeddings parameters. Extends EmbeddingsParams and
6
6
  * defines additional parameters specific to the OllamaEmbeddings class.